diff --git a/ocaml/.gitignore b/ocaml/.gitignore index cc1fc4428b3..cb0e4f7bec7 100644 --- a/ocaml/.gitignore +++ b/ocaml/.gitignore @@ -360,4 +360,4 @@ META /otherlibs/dynlink/natdynlinkops dune.runtime_selection - +/otherlibs/dune diff --git a/ocaml/Makefile.common b/ocaml/Makefile.common index 5bec430b796..2855ef4b1a5 100644 --- a/ocaml/Makefile.common +++ b/ocaml/Makefile.common @@ -140,7 +140,7 @@ endif # ifeq "$(wildcard $(ROOTDIR)/flexlink.opt$(EXE))" "" endif # ifeq "$(BOOTSTRAPPING_FLEXDLL)" "false" # List of other libraries -ALL_OTHERLIBS = dynlink str systhreads unix runtime_events +ALL_OTHERLIBS = dynlink str systhreads$(RUNTIME_SUFFIX) unix runtime_events # Flags to pass to the C preprocessor when preprocessing assembly files OC_ASPPFLAGS=$(OC_CPPFLAGS) $(OC_NATIVE_CPPFLAGS) diff --git a/ocaml/Makefile.common-jst b/ocaml/Makefile.common-jst index 1c65adca01e..7660136a16a 100644 --- a/ocaml/Makefile.common-jst +++ b/ocaml/Makefile.common-jst @@ -110,7 +110,15 @@ dune_config_targets = \ $(ocamldir)/duneconf/jst-extra.inc \ dune-project \ $(ocamldir)/stdlib/ocaml_compiler_internal_params \ - $(ocamldir)/dune.runtime_selection + $(ocamldir)/dune.runtime_selection \ + $(ocamldir)/otherlibs/dune + +$(ocamldir)/otherlibs/dune: + if [ "$(RUNTIME_DIR)" = "runtime4" ]; then \ + echo "(dirs (:standard \ systhreads))" > $@; \ + else \ + echo "(dirs (:standard \ systhreads4))" > $@; \ + fi $(ocamldir)/dune.runtime_selection: if [ "$(RUNTIME_DIR)" = "runtime4" ]; then \ @@ -242,7 +250,7 @@ install_for_test: _install ln -s . lex; ln -s . yacc; \ ln -s _install/lib/ocaml/compiler-libs compilerlibs; \ mkdir -p otherlibs/{unix,dynlink/native,str,bigarray}; \ - ln -s ../stdlib/threads otherlibs/systhreads; \ + ln -s ../stdlib/threads otherlibs/systhreads$(RUNTIME_SUFFIX); \ $(cpl) stdlib/unix/{lib,}unix* otherlibs/unix; \ $(cpl) stdlib/dynlink/dynlink* otherlibs/dynlink; \ $(cpl) stdlib/str/{lib,}str* otherlibs/str; \ diff --git a/ocaml/configure b/ocaml/configure index c732c15a344..ac8a7156c27 100755 --- a/ocaml/configure +++ b/ocaml/configure @@ -784,6 +784,7 @@ ocamltest_libunix ocamltest_CPP lib_unix lib_systhreads +lib_systhreads_path lib_str lib_runtime_events lib_dynlink @@ -3214,6 +3215,7 @@ flexdll_dir= lib_dynlink=false lib_str=false lib_systhreads=false +lib_systhreads_path="" lib_unix=false ocamltest_libunix=None ocamltest_unix_impl="dummy" @@ -19285,6 +19287,12 @@ esac ## Activate the systhread library +if [ "$enable_runtime5" = "yes" ]; then + runtime_suffix= +else + runtime_suffix=4 +fi + case $enable_systhreads,$enable_unix_lib in #( yes,no) : systhread_support=false @@ -19295,10 +19303,11 @@ case $enable_systhreads,$enable_unix_lib in #( printf "%s\n" "$as_me: the threads library is disabled" >&6;} ;; #( *) : systhread_support=true - ac_config_files="$ac_config_files otherlibs/systhreads/META" + ac_config_files="$ac_config_files otherlibs/systhreads${runtime_suffix}/META" - otherlibraries="$otherlibraries systhreads" + otherlibraries="$otherlibraries systhreads${runtime_suffix}" lib_systhreads=true + lib_systhreads_path="systhreads${runtime_suffix}" { printf "%s\n" "$as_me:${as_lineno-$LINENO}: the threads library is supported" >&5 printf "%s\n" "$as_me: the threads library is supported" >&6;} ;; esac @@ -21258,6 +21267,7 @@ do "otherlibs/unix/META") CONFIG_FILES="$CONFIG_FILES otherlibs/unix/META" ;; "otherlibs/str/META") CONFIG_FILES="$CONFIG_FILES otherlibs/str/META" ;; "otherlibs/systhreads/META") CONFIG_FILES="$CONFIG_FILES otherlibs/systhreads/META" ;; + "otherlibs/systhreads4/META") CONFIG_FILES="$CONFIG_FILES otherlibs/systhreads4/META" ;; "ocamldoc/META") CONFIG_FILES="$CONFIG_FILES ocamldoc/META" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; @@ -22394,5 +22404,3 @@ if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi - - diff --git a/ocaml/configure.ac b/ocaml/configure.ac index d08b09731fc..223533e3cca 100644 --- a/ocaml/configure.ac +++ b/ocaml/configure.ac @@ -69,6 +69,7 @@ flexdll_dir= lib_dynlink=false lib_str=false lib_systhreads=false +lib_systhreads_path="" lib_unix=false ocamltest_libunix=None ocamltest_unix_impl="dummy" @@ -170,6 +171,7 @@ AC_SUBST([lib_dynlink]) AC_SUBST([lib_runtime_events]) AC_SUBST([lib_str]) AC_SUBST([lib_systhreads]) +AC_SUBST([lib_systhreads_path]) AC_SUBST([lib_unix]) AC_SUBST([ocamltest_CPP]) AC_SUBST([ocamltest_libunix]) @@ -2116,6 +2118,12 @@ AS_CASE([$host], ## Activate the systhread library +if [ "$enable_runtime5" = "yes" ]; then + runtime_suffix= +else + runtime_suffix=4 +fi + AS_CASE([$enable_systhreads,$enable_unix_lib], [yes,no], [systhread_support=false @@ -2124,9 +2132,10 @@ AS_CASE([$enable_systhreads,$enable_unix_lib], [systhread_support=false AC_MSG_NOTICE([the threads library is disabled])], [systhread_support=true - AC_CONFIG_FILES([otherlibs/systhreads/META]) - otherlibraries="$otherlibraries systhreads" + AC_CONFIG_FILES([otherlibs/systhreads${runtime_suffix}/META]) + otherlibraries="$otherlibraries systhreads${runtime_suffix}" lib_systhreads=true + lib_systhreads_path="systhreads${runtime_suffix}" AC_MSG_NOTICE([the threads library is supported])]) ## Does the assembler support debug prefix map and CFI directives diff --git a/ocaml/ocamltest/ocaml_modifiers.ml b/ocaml/ocamltest/ocaml_modifiers.ml index af4ebc0684c..b3f20b72b8d 100644 --- a/ocaml/ocamltest/ocaml_modifiers.ml +++ b/ocaml/ocamltest/ocaml_modifiers.ml @@ -96,7 +96,7 @@ let str = make_library_modifier let systhreads = unix @ (make_library_modifier - "threads" [compiler_subdir ["otherlibs"; "systhreads"]]) + "threads" [compiler_subdir ["otherlibs"; Ocamltest_config.systhreads_path]]) let runtime_events = make_library_modifier diff --git a/ocaml/ocamltest/ocamltest_config.ml.in b/ocaml/ocamltest/ocamltest_config.ml.in index 47b890edc54..d414d227ab1 100644 --- a/ocaml/ocamltest/ocamltest_config.ml.in +++ b/ocaml/ocamltest/ocamltest_config.ml.in @@ -38,6 +38,8 @@ let libunix = @ocamltest_libunix@ let systhreads = @lib_systhreads@ +let systhreads_path = {@QS@|@lib_systhreads_path@|@QS@} + let str = @lib_str@ let objext = {@QS@|@OBJEXT@|@QS@} diff --git a/ocaml/ocamltest/ocamltest_config.mli b/ocaml/ocamltest/ocamltest_config.mli index 1643a8163e4..54bf3d43c7f 100644 --- a/ocaml/ocamltest/ocamltest_config.mli +++ b/ocaml/ocamltest/ocamltest_config.mli @@ -49,6 +49,9 @@ val libunix : bool option val systhreads : bool (** Indicates whether systhreads is available. *) +val systhreads_path : string +(** Indicates where the systhreads library is installed under otherlibs/. *) + val str : bool (** Indicates whether str is available. *) diff --git a/ocaml/otherlibs/Makefile b/ocaml/otherlibs/Makefile index 5fe2d750cdf..8c8d34f8db5 100644 --- a/ocaml/otherlibs/Makefile +++ b/ocaml/otherlibs/Makefile @@ -21,7 +21,7 @@ include $(ROOTDIR)/Makefile.common # at the moment, the clean targets depend on this variable but # when they are invoked ../Makefile.config is not included, so that # OTHERLIBRARIES would be empty and the clean targets would thus not work. -OTHERLIBRARIES ?= dynlink str systhreads unix runtime_events +OTHERLIBRARIES ?= dynlink str systhreads$(RUNTIME_SUFFIX) unix runtime_events # $1: target name to dispatch to all otherlibs/*/Makefile define dispatch_ diff --git a/ocaml/otherlibs/systhreads/.depend b/ocaml/otherlibs/systhreads/.depend index 661d3575dec..11b76f90bb7 100644 --- a/ocaml/otherlibs/systhreads/.depend +++ b/ocaml/otherlibs/systhreads/.depend @@ -1,34 +1,8 @@ -condition.cmo : \ - mutex.cmi \ - condition.cmi -condition.cmx : \ - mutex.cmx \ - condition.cmi -condition.cmi : \ - mutex.cmi event.cmo : \ - mutex.cmi \ - condition.cmi \ event.cmi event.cmx : \ - mutex.cmx \ - condition.cmx \ event.cmi event.cmi : -mutex.cmo : \ - mutex.cmi -mutex.cmx : \ - mutex.cmi -mutex.cmi : -semaphore.cmo : \ - mutex.cmi \ - condition.cmi \ - semaphore.cmi -semaphore.cmx : \ - mutex.cmx \ - condition.cmx \ - semaphore.cmi -semaphore.cmi : thread.cmo : \ thread.cmi thread.cmx : \ diff --git a/ocaml/otherlibs/systhreads/Makefile b/ocaml/otherlibs/systhreads/Makefile index 1404646da39..a098f0bafdb 100644 --- a/ocaml/otherlibs/systhreads/Makefile +++ b/ocaml/otherlibs/systhreads/Makefile @@ -22,7 +22,7 @@ ifneq "$(CCOMPTYPE)" "msvc" OC_CFLAGS += -g endif -OC_CFLAGS += $(SHAREDLIB_CFLAGS) $(PTHREAD_CFLAGS) +OC_CFLAGS += $(SHAREDLIB_CFLAGS) LIBS = $(STDLIBFLAGS) -I $(ROOTDIR)/otherlibs/unix @@ -45,12 +45,12 @@ LIBNAME=threads BYTECODE_C_OBJS=st_stubs.b.$(O) NATIVECODE_C_OBJS=st_stubs.n.$(O) -THREADS_SOURCES = thread.ml mutex.ml condition.ml event.ml semaphore.ml +THREADS_SOURCES = thread.ml event.ml THREADS_BCOBJS = $(THREADS_SOURCES:.ml=.cmo) THREADS_NCOBJS = $(THREADS_SOURCES:.ml=.cmx) -MLIFILES=thread.mli mutex.mli condition.mli event.mli semaphore.mli +MLIFILES=thread.mli event.mli CMIFILES=$(MLIFILES:.mli=.cmi) @@ -61,7 +61,7 @@ allopt: lib$(LIBNAME)nat.$(A) $(LIBNAME).cmxa $(CMIFILES) lib$(LIBNAME).$(A): $(BYTECODE_C_OBJS) $(V_OCAMLMKLIB)$(MKLIB_CMD) -o $(LIBNAME) $(BYTECODE_C_OBJS) -lib$(LIBNAME)nat.$(A): OC_CFLAGS += $(OC_NATIVE_CFLAGS) -DNATIVE_CODE +lib$(LIBNAME)nat.$(A): OC_CFLAGS += $(OC_NATIVE_CFLAGS) lib$(LIBNAME)nat.$(A): $(NATIVECODE_C_OBJS) $(V_OCAMLMKLIB)$(MKLIB_CMD) -o $(LIBNAME)nat $^ diff --git a/ocaml/otherlibs/systhreads/dune b/ocaml/otherlibs/systhreads/dune index 5c9f889f564..68028fa05a6 100644 --- a/ocaml/otherlibs/systhreads/dune +++ b/ocaml/otherlibs/systhreads/dune @@ -12,47 +12,22 @@ ;* * ;************************************************************************** - ; For some reason the C header files aren't being found if this library ; is given a public name, so we do the installation manually. -(install - (files - (byte/threads.cma as threads/threads.cma) - (native/threadsnat.cmxa as threads/threads.cmxa) - (native/threadsnat.a as threads/threads.a) - - (byte/libthreads_stubs.a as libthreads_stubs.a) - (byte/dllthreads_stubs.so as stublibs/dllthreads_stubs.so) - (native/libthreadsnat_stubs.a as libthreadsnat_stubs.a) - (native/libthreadsnat_stubs.a as libthreadsnat_stubs_native.a) ; for special_dune compat - - - (thread.mli as threads/thread.mli) - (condition.mli as threads/condition.mli) - (event.mli as threads/event.mli) - (mutex.mli as threads/mutex.mli) - (semaphore.mli as threads/semaphore.mli) - - (threads.h as caml/threads.h) - - (native/.threadsnat.objs/native/condition.cmx as threads/condition.cmx) - (native/.threadsnat.objs/native/event.cmx as threads/event.cmx) - (native/.threadsnat.objs/native/mutex.cmx as threads/mutex.cmx) - (native/.threadsnat.objs/native/semaphore.cmx as threads/semaphore.cmx) - (native/.threadsnat.objs/native/thread.cmx as threads/thread.cmx) - - (byte/.threads.objs/byte/condition.cmi as threads/condition.cmi) - (byte/.threads.objs/byte/condition.cmti as threads/condition.cmti) - (byte/.threads.objs/byte/event.cmi as threads/event.cmi) - (byte/.threads.objs/byte/event.cmti as threads/event.cmti) - (byte/.threads.objs/byte/mutex.cmi as threads/mutex.cmi) - (byte/.threads.objs/byte/mutex.cmti as threads/mutex.cmti) - (byte/.threads.objs/byte/semaphore.cmi as threads/semaphore.cmi) - (byte/.threads.objs/byte/semaphore.cmti as threads/semaphore.cmti) - (byte/.threads.objs/byte/thread.cmi as threads/thread.cmi) - (byte/.threads.objs/byte/thread.cmti as threads/thread.cmti) - ) - (section lib) - (package ocaml)) - +(install + (files + (byte/threads.cma as threads/threads.cma) + (native/threadsnat.cmxa as threads/threads.cmxa) + (native/threadsnat.a as threads/threads.a) + (byte/libthreads_stubs.a as libthreads_stubs.a) + (byte/dllthreads_stubs.so as stublibs/dllthreads_stubs.so) + (native/libthreadsnat_stubs.a as libthreadsnat_stubs.a) + (native/libthreadsnat_stubs.a as libthreadsnat_stubs_native.a) ; for special_dune compat + (thread.mli as threads/thread.mli) + (threads.h as caml/threads.h) + (native/.threadsnat.objs/native/thread.cmx as threads/thread.cmx) + (byte/.threads.objs/byte/thread.cmi as threads/thread.cmi) + (byte/.threads.objs/byte/thread.cmti as threads/thread.cmti)) + (section lib) + (package ocaml)) diff --git a/ocaml/otherlibs/systhreads/event.ml b/ocaml/otherlibs/systhreads/event.ml index fc8a65b042f..f5fc9785fcb 100644 --- a/ocaml/otherlibs/systhreads/event.ml +++ b/ocaml/otherlibs/systhreads/event.ml @@ -13,8 +13,6 @@ (* *) (**************************************************************************) -[@@@ocaml.flambda_o3] - (* Events *) type 'a basic_event = { poll: unit -> bool; diff --git a/ocaml/otherlibs/systhreads/st_posix.h b/ocaml/otherlibs/systhreads/st_posix.h index a43c78fbc92..1ed25fdef35 100644 --- a/ocaml/otherlibs/systhreads/st_posix.h +++ b/ocaml/otherlibs/systhreads/st_posix.h @@ -15,487 +15,26 @@ /* POSIX thread implementation of the "st" interface */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#ifdef __linux__ -#include -#include -#include -#include -#include +#ifdef HAS_SYS_SELECT_H +#include #endif -typedef int st_retcode; - -#define SIGPREEMPTION SIGVTALRM - -/* OS-specific initialization */ - -static int st_initialize(void) -{ - caml_sigmask_hook = pthread_sigmask; - return 0; -} - -/* Thread creation. Created in detached mode if [res] is NULL. */ - -typedef pthread_t st_thread_id; - -static int st_thread_create(st_thread_id * res, - void * (*fn)(void *), void * arg) -{ - pthread_t thr; - pthread_attr_t attr; - int rc; - - pthread_attr_init(&attr); - if (res == NULL) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - rc = pthread_create(&thr, &attr, fn, arg); - if (res != NULL) *res = thr; - return rc; -} - -#define ST_THREAD_FUNCTION void * - -/* Cleanup at thread exit */ - -Caml_inline void st_thread_cleanup(void) -{ - return; -} - -/* Thread termination */ - -CAMLnoreturn_start -static void st_thread_exit(void) -CAMLnoreturn_end; - -static void st_thread_exit(void) -{ - pthread_exit(NULL); -} - -static void st_thread_join(st_thread_id thr) -{ - pthread_join(thr, NULL); - /* best effort: ignore errors */ -} - -/* Thread-specific state */ - -typedef pthread_key_t st_tlskey; - -static int st_tls_newkey(st_tlskey * res) -{ - return pthread_key_create(res, NULL); -} - -Caml_inline void * st_tls_get(st_tlskey k) -{ - return pthread_getspecific(k); -} - -Caml_inline void st_tls_set(st_tlskey k, void * v) -{ - pthread_setspecific(k, v); -} - -/* Windows-specific hook. */ -Caml_inline void st_thread_set_id(intnat id) -{ - return; -} - -/* If we're using glibc, use a custom condition variable implementation to - avoid this bug: https://sourceware.org/bugzilla/show_bug.cgi?id=25847 - - For now we only have this on linux because it directly uses the linux futex - syscalls. */ -#if defined(__linux__) && defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) -typedef struct { - volatile unsigned counter; -} custom_condvar; - -static int custom_condvar_init(custom_condvar * cv) -{ - cv->counter = 0; - return 0; -} - -static int custom_condvar_destroy(custom_condvar * cv) -{ - return 0; -} - -static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) -{ - unsigned old_count = cv->counter; - pthread_mutex_unlock(mutex); - syscall(SYS_futex, &cv->counter, FUTEX_WAIT_PRIVATE, old_count, NULL, NULL, 0); - pthread_mutex_lock(mutex); - return 0; -} - -static int custom_condvar_signal(custom_condvar * cv) -{ - __sync_add_and_fetch(&cv->counter, 1); - syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, 1, NULL, NULL, 0); - return 0; -} - -static int custom_condvar_broadcast(custom_condvar * cv) -{ - __sync_add_and_fetch(&cv->counter, 1); - syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); - return 0; -} -#else -typedef pthread_cond_t custom_condvar; - -static int custom_condvar_init(custom_condvar * cv) -{ - return pthread_cond_init(cv, NULL); -} - -static int custom_condvar_destroy(custom_condvar * cv) -{ - return pthread_cond_destroy(cv); -} - -static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) -{ - return pthread_cond_wait(cv, mutex); -} - -static int custom_condvar_signal(custom_condvar * cv) -{ - return pthread_cond_signal(cv); -} - -static int custom_condvar_broadcast(custom_condvar * cv) -{ - return pthread_cond_broadcast(cv); -} -#endif - -/* The master lock. This is a mutex that is held most of the time, - so we implement it in a slightly convoluted way to avoid - all risks of busy-waiting. Also, we count the number of waiting - threads. */ - -typedef struct { - pthread_mutex_t lock; /* to protect contents */ - int busy; /* 0 = free, 1 = taken */ - volatile int waiters; /* number of threads waiting on master lock */ - custom_condvar is_free; /* signaled when free */ -} st_masterlock; - -static void st_masterlock_init(st_masterlock * m) -{ - pthread_mutex_init(&m->lock, NULL); - custom_condvar_init(&m->is_free); - m->busy = 1; - m->waiters = 0; -} - -static void st_masterlock_acquire(st_masterlock * m) -{ - pthread_mutex_lock(&m->lock); - while (m->busy) { - m->waiters ++; - custom_condvar_wait(&m->is_free, &m->lock); - m->waiters --; - } - m->busy = 1; - pthread_mutex_unlock(&m->lock); -} - -static void st_masterlock_release(st_masterlock * m) +Caml_inline void st_msleep(int msec) { - pthread_mutex_lock(&m->lock); - m->busy = 0; - pthread_mutex_unlock(&m->lock); - custom_condvar_signal(&m->is_free); + struct timeval timeout = {0, msec * 1000}; + select(0, NULL, NULL, NULL, &timeout); } -CAMLno_tsan /* This can be called for reading [waiters] without locking. */ -Caml_inline int st_masterlock_waiters(st_masterlock * m) -{ - return m->waiters; -} - -/* Scheduling hints */ - -/* This is mostly equivalent to release(); acquire(), but better. In particular, - release(); acquire(); leaves both us and the waiter we signal() racing to - acquire the lock. Calling yield or sleep helps there but does not solve the - problem. Sleeping ourselves is much more reliable--and since we're handing - off the lock to a waiter we know exists, it's safe, as they'll certainly - re-wake us later. -*/ -Caml_inline void st_thread_yield(st_masterlock * m) -{ - pthread_mutex_lock(&m->lock); - /* We must hold the lock to call this. */ - assert(m->busy); - - /* We already checked this without the lock, but we might have raced--if - there's no waiter, there's nothing to do and no one to wake us if we did - wait, so just keep going. */ - if (m->waiters == 0) { - pthread_mutex_unlock(&m->lock); - return; - } - - m->busy = 0; - custom_condvar_signal(&m->is_free); - m->waiters++; - do { - /* Note: the POSIX spec prevents the above signal from pairing with this - wait, which is good: we'll reliably continue waiting until the next - yield() or enter_blocking_section() call (or we see a spurious condvar - wakeup, which are rare at best.) */ - custom_condvar_wait(&m->is_free, &m->lock); - } while (m->busy); - m->busy = 1; - m->waiters--; - pthread_mutex_unlock(&m->lock); -} - -/* Mutexes */ - -typedef pthread_mutex_t * st_mutex; - -static int st_mutex_create(st_mutex * res) -{ - int rc; - pthread_mutexattr_t attr; - st_mutex m; - - rc = pthread_mutexattr_init(&attr); - if (rc != 0) goto error1; - rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); - if (rc != 0) goto error2; - m = caml_stat_alloc_noexc(sizeof(pthread_mutex_t)); - if (m == NULL) { rc = ENOMEM; goto error2; } - rc = pthread_mutex_init(m, &attr); - if (rc != 0) goto error3; - pthread_mutexattr_destroy(&attr); - *res = m; - return 0; -error3: - caml_stat_free(m); -error2: - pthread_mutexattr_destroy(&attr); -error1: - return rc; -} - -static int st_mutex_destroy(st_mutex m) -{ - int rc; - rc = pthread_mutex_destroy(m); - caml_stat_free(m); - return rc; -} - -#define MUTEX_DEADLOCK EDEADLK - -Caml_inline int st_mutex_lock(st_mutex m) -{ - return pthread_mutex_lock(m); -} - -#define MUTEX_PREVIOUSLY_UNLOCKED 0 -#define MUTEX_ALREADY_LOCKED EBUSY - -Caml_inline int st_mutex_trylock(st_mutex m) -{ - return pthread_mutex_trylock(m); -} - -#define MUTEX_NOT_OWNED EPERM - -Caml_inline int st_mutex_unlock(st_mutex m) -{ - return pthread_mutex_unlock(m); -} - -/* Condition variables */ - -typedef custom_condvar * st_condvar; - -static int st_condvar_create(st_condvar * res) -{ - int rc; - st_condvar c = caml_stat_alloc_noexc(sizeof(custom_condvar)); - if (c == NULL) return ENOMEM; - rc = custom_condvar_init(c); - if (rc != 0) { caml_stat_free(c); return rc; } - *res = c; - return 0; -} - -static int st_condvar_destroy(st_condvar c) -{ - int rc; - rc = custom_condvar_destroy(c); - caml_stat_free(c); - return rc; -} - -Caml_inline int st_condvar_signal(st_condvar c) -{ - return custom_condvar_signal(c); -} - -Caml_inline int st_condvar_broadcast(st_condvar c) -{ - return custom_condvar_broadcast(c); -} - -Caml_inline int st_condvar_wait(st_condvar c, st_mutex m) -{ - return custom_condvar_wait(c, m); -} - -/* Triggered events */ - -typedef struct st_event_struct { - pthread_mutex_t lock; /* to protect contents */ - int status; /* 0 = not triggered, 1 = triggered */ - custom_condvar triggered; /* signaled when triggered */ -} * st_event; - -static int st_event_create(st_event * res) -{ - int rc; - st_event e = caml_stat_alloc_noexc(sizeof(struct st_event_struct)); - if (e == NULL) return ENOMEM; - rc = pthread_mutex_init(&e->lock, NULL); - if (rc != 0) { caml_stat_free(e); return rc; } - rc = custom_condvar_init(&e->triggered); - if (rc != 0) - { pthread_mutex_destroy(&e->lock); caml_stat_free(e); return rc; } - e->status = 0; - *res = e; - return 0; -} - -static int st_event_destroy(st_event e) -{ - int rc1, rc2; - rc1 = pthread_mutex_destroy(&e->lock); - rc2 = custom_condvar_destroy(&e->triggered); - caml_stat_free(e); - return rc1 != 0 ? rc1 : rc2; -} - -static int st_event_trigger(st_event e) -{ - int rc; - rc = pthread_mutex_lock(&e->lock); - if (rc != 0) return rc; - e->status = 1; - rc = pthread_mutex_unlock(&e->lock); - if (rc != 0) return rc; - rc = custom_condvar_broadcast(&e->triggered); - return rc; -} - -static int st_event_wait(st_event e) -{ - int rc; - rc = pthread_mutex_lock(&e->lock); - if (rc != 0) return rc; - while(e->status == 0) { - rc = custom_condvar_wait(&e->triggered, &e->lock); - if (rc != 0) return rc; - } - rc = pthread_mutex_unlock(&e->lock); - return rc; -} - -/* Reporting errors */ - -static void st_check_error(int retcode, char * msg) -{ - char * err; - int errlen, msglen; - value str; - - if (retcode == 0) return; - if (retcode == ENOMEM) caml_raise_out_of_memory(); - err = strerror(retcode); - msglen = strlen(msg); - errlen = strlen(err); - str = caml_alloc_string(msglen + 2 + errlen); - memmove (&Byte(str, 0), msg, msglen); - memmove (&Byte(str, msglen), ": ", 2); - memmove (&Byte(str, msglen + 2), err, errlen); - caml_raise_sys_error(str); -} - -/* Variable used to stop the "tick" thread */ -static volatile int caml_tick_thread_stop = 0; - -/* The tick thread: posts a SIGPREEMPTION signal periodically */ - -static void * caml_thread_tick(void * arg) -{ - struct timeval timeout; - sigset_t mask; - - /* Block all signals so that we don't try to execute an OCaml signal handler*/ - sigfillset(&mask); - pthread_sigmask(SIG_BLOCK, &mask, NULL); - while(! caml_tick_thread_stop) { - /* select() seems to be the most efficient way to suspend the - thread for sub-second intervals */ - timeout.tv_sec = 0; - timeout.tv_usec = Thread_timeout * 1000; - select(0, NULL, NULL, NULL, &timeout); - /* The preemption signal should never cause a callback, so don't - go through caml_handle_signal(), just record signal delivery via - caml_record_signal(). */ - caml_record_signal(SIGPREEMPTION); - } - return NULL; -} - -/* "At fork" processing */ - -#if defined(__ANDROID__) -/* Android's libc does not include declaration of pthread_atfork; - however, it implements it since API level 10 (Gingerbread). - The reason for the omission is that Android (GUI) applications - are not supposed to fork at all, however this workaround is still - included in case OCaml is used for an Android CLI utility. */ -int pthread_atfork(void (*prepare)(void), void (*parent)(void), - void (*child)(void)); -#endif - -static int st_atfork(void (*fn)(void)) -{ - return pthread_atfork(NULL, NULL, fn); -} +#include "st_pthreads.h" /* Signal handling */ static void st_decode_sigset(value vset, sigset_t * set) { sigemptyset(set); - while (vset != Val_int(0)) { + for (/*nothing*/; vset != Val_emptylist; vset = Field(vset, 1)) { int sig = caml_convert_signal_number(Int_val(Field(vset, 0))); sigaddset(set, sig); - vset = Field(vset, 1); } } @@ -505,24 +44,23 @@ static void st_decode_sigset(value vset, sigset_t * set) static value st_encode_sigset(sigset_t * set) { - value res = Val_int(0); + CAMLparam0(); + CAMLlocal1(res); int i; - Begin_root(res) - for (i = 1; i < NSIG; i++) - if (sigismember(set, i) > 0) { - value newcons = caml_alloc_small(2, 0); - Field(newcons, 0) = Val_int(caml_rev_convert_signal_number(i)); - Field(newcons, 1) = res; - res = newcons; - } - End_roots(); - return res; + res = Val_emptylist; + + for (i = 1; i < NSIG; i++) + if (sigismember(set, i) > 0) { + res = caml_alloc_2(Tag_cons, + Val_int(caml_rev_convert_signal_number(i)), res); + } + CAMLreturn(res); } static int sigmask_cmd[3] = { SIG_SETMASK, SIG_BLOCK, SIG_UNBLOCK }; -value caml_thread_sigmask(value cmd, value sigs) /* ML */ +value caml_thread_sigmask(value cmd, value sigs) { int how; sigset_t set, oldset; @@ -533,13 +71,13 @@ value caml_thread_sigmask(value cmd, value sigs) /* ML */ caml_enter_blocking_section(); retcode = pthread_sigmask(how, &set, &oldset); caml_leave_blocking_section(); - st_check_error(retcode, "Thread.sigmask"); + sync_check_error(retcode, "Thread.sigmask"); /* Run any handlers for just-unmasked pending signals */ caml_process_pending_actions(); return st_encode_sigset(&oldset); } -value caml_wait_signal(value sigs) /* ML */ +value caml_wait_signal(value sigs) { #ifdef HAS_SIGWAIT sigset_t set; @@ -549,7 +87,7 @@ value caml_wait_signal(value sigs) /* ML */ caml_enter_blocking_section(); retcode = sigwait(&set, &signo); caml_leave_blocking_section(); - st_check_error(retcode, "Thread.wait_signal"); + sync_check_error(retcode, "Thread.wait_signal"); return Val_int(caml_rev_convert_signal_number(signo)); #else caml_invalid_argument("Thread.wait_signal not implemented"); diff --git a/ocaml/otherlibs/systhreads/st_pthreads.h b/ocaml/otherlibs/systhreads/st_pthreads.h index 26e32caba5e..bd8839b6de2 100644 --- a/ocaml/otherlibs/systhreads/st_pthreads.h +++ b/ocaml/otherlibs/systhreads/st_pthreads.h @@ -13,12 +13,6 @@ /* */ /**************************************************************************/ -/* CR ocaml 5 runtime: When we update the OCaml 5 runtime, we'll need to - update this library as well. The base of - https://github.com/ocaml-flambda/ocaml-jst/pull/222 may be a good starting - point. - */ - /* POSIX thread implementation of the "st" interface */ #include @@ -29,13 +23,9 @@ #include #include #include -#include #ifdef HAS_UNISTD #include #endif -#include -#include -#include typedef int st_retcode; @@ -99,78 +89,6 @@ Caml_inline void st_tls_set(st_tlskey k, void * v) pthread_setspecific(k, v); } -/* If we're using glibc, use a custom condition variable implementation to - avoid this bug: https://sourceware.org/bugzilla/show_bug.cgi?id=25847 - - For now we only have this on linux because it directly uses the linux futex - syscalls. */ -#if defined(__linux__) && defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) -typedef struct { - volatile unsigned counter; -} custom_condvar; - -static int custom_condvar_init(custom_condvar * cv) -{ - cv->counter = 0; - return 0; -} - -static int custom_condvar_destroy(custom_condvar * cv) -{ - return 0; -} - -static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) -{ - unsigned old_count = cv->counter; - pthread_mutex_unlock(mutex); - syscall(SYS_futex, &cv->counter, FUTEX_WAIT_PRIVATE, old_count, NULL, NULL, 0); - pthread_mutex_lock(mutex); - return 0; -} - -static int custom_condvar_signal(custom_condvar * cv) -{ - __sync_add_and_fetch(&cv->counter, 1); - syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, 1, NULL, NULL, 0); - return 0; -} - -static int custom_condvar_broadcast(custom_condvar * cv) -{ - __sync_add_and_fetch(&cv->counter, 1); - syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); - return 0; -} -#else -typedef pthread_cond_t custom_condvar; - -static int custom_condvar_init(custom_condvar * cv) -{ - return pthread_cond_init(cv, NULL); -} - -static int custom_condvar_destroy(custom_condvar * cv) -{ - return pthread_cond_destroy(cv); -} - -static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) -{ - return pthread_cond_wait(cv, mutex); -} - -static int custom_condvar_signal(custom_condvar * cv) -{ - return pthread_cond_signal(cv); -} - -static int custom_condvar_broadcast(custom_condvar * cv) -{ - return pthread_cond_broadcast(cv); -} -#endif - /* The master lock. This is a mutex that is held most of the time, so we implement it in a slightly convoluted way to avoid all risks of busy-waiting. Also, we count the number of waiting @@ -182,7 +100,7 @@ typedef struct { pthread_mutex_t lock; /* to protect contents */ uintnat busy; /* 0 = free, 1 = taken */ atomic_uintnat waiters; /* number of threads waiting on master lock */ - custom_condvar is_free; /* signaled when free */ + pthread_cond_t is_free; /* signaled when free */ } st_masterlock; static void st_masterlock_init(st_masterlock * m) @@ -190,7 +108,7 @@ static void st_masterlock_init(st_masterlock * m) if (!m->init) { // FIXME: check errors pthread_mutex_init(&m->lock, NULL); - custom_condvar_init(&m->is_free); + pthread_cond_init(&m->is_free, NULL); m->init = 1; } m->busy = 1; @@ -238,7 +156,7 @@ static void st_masterlock_acquire(st_masterlock *m) pthread_mutex_lock(&m->lock); while (m->busy) { atomic_fetch_add(&m->waiters, +1); - custom_condvar_wait(&m->is_free, &m->lock); + pthread_cond_wait(&m->is_free, &m->lock); atomic_fetch_add(&m->waiters, -1); } m->busy = 1; @@ -253,7 +171,7 @@ static void st_masterlock_release(st_masterlock * m) pthread_mutex_lock(&m->lock); m->busy = 0; st_bt_lock_release(m); - custom_condvar_signal(&m->is_free); + pthread_cond_signal(&m->is_free); pthread_mutex_unlock(&m->lock); return; @@ -285,7 +203,7 @@ Caml_inline void st_thread_yield(st_masterlock * m) m->busy = 0; atomic_fetch_add(&m->waiters, +1); - custom_condvar_signal(&m->is_free); + pthread_cond_signal(&m->is_free); /* releasing the domain lock but not triggering bt messaging messaging the bt should not be required because yield assumes that a thread will resume execution (be it the yielding thread @@ -297,7 +215,7 @@ Caml_inline void st_thread_yield(st_masterlock * m) wait, which is good: we'll reliably continue waiting until the next yield() or enter_blocking_section() call (or we see a spurious condvar wakeup, which are rare at best.) */ - custom_condvar_wait(&m->is_free, &m->lock); + pthread_cond_wait(&m->is_free, &m->lock); } while (m->busy); m->busy = 1; @@ -315,7 +233,7 @@ Caml_inline void st_thread_yield(st_masterlock * m) typedef struct st_event_struct { pthread_mutex_t lock; /* to protect contents */ int status; /* 0 = not triggered, 1 = triggered */ - custom_condvar triggered; /* signaled when triggered */ + pthread_cond_t triggered; /* signaled when triggered */ } * st_event; @@ -326,7 +244,7 @@ static int st_event_create(st_event * res) if (e == NULL) return ENOMEM; rc = pthread_mutex_init(&e->lock, NULL); if (rc != 0) { caml_stat_free(e); return rc; } - rc = custom_condvar_init(&e->triggered); + rc = pthread_cond_init(&e->triggered, NULL); if (rc != 0) { pthread_mutex_destroy(&e->lock); caml_stat_free(e); return rc; } e->status = 0; @@ -338,7 +256,7 @@ static int st_event_destroy(st_event e) { int rc1, rc2; rc1 = pthread_mutex_destroy(&e->lock); - rc2 = custom_condvar_destroy(&e->triggered); + rc2 = pthread_cond_destroy(&e->triggered); caml_stat_free(e); return rc1 != 0 ? rc1 : rc2; } @@ -351,7 +269,7 @@ static int st_event_trigger(st_event e) e->status = 1; rc = pthread_mutex_unlock(&e->lock); if (rc != 0) return rc; - rc = custom_condvar_broadcast(&e->triggered); + rc = pthread_cond_broadcast(&e->triggered); return rc; } @@ -361,7 +279,7 @@ static int st_event_wait(st_event e) rc = pthread_mutex_lock(&e->lock); if (rc != 0) return rc; while(e->status == 0) { - rc = custom_condvar_wait(&e->triggered, &e->lock); + rc = pthread_cond_wait(&e->triggered, &e->lock); if (rc != 0) return rc; } rc = pthread_mutex_unlock(&e->lock); diff --git a/ocaml/otherlibs/systhreads/st_stubs.c b/ocaml/otherlibs/systhreads/st_stubs.c index ace63ea0cd7..fe1df205eca 100644 --- a/ocaml/otherlibs/systhreads/st_stubs.c +++ b/ocaml/otherlibs/systhreads/st_stubs.c @@ -13,23 +13,28 @@ /* */ /**************************************************************************/ -// CR ocaml 5 runtime: We will need to pull in changes from the same file in -// [tip-5] tag in ocaml-jst. We're considering this file to be part of the -// runtime. - #define CAML_INTERNALS -#define CAML_NAME_SPACE -#include "caml/compatibility.h" -#undef CAML_NAME_SPACE +#if defined(_WIN32) && !defined(NATIVE_CODE) +/* Ensure that pthread.h marks symbols __declspec(dllimport) so that they can be + picked up from the runtime (which will have linked winpthreads statically). + mingw-w64 11.0.0 introduced WINPTHREADS_USE_DLLIMPORT to do this explicitly; + prior versions co-opted this on the internal DLL_EXPORT, but this is ignored + in 11.0 and later unless IN_WINPTHREAD is also defined, so we can safely + define both to support both versions. */ +#define WINPTHREADS_USE_DLLIMPORT +#define DLL_EXPORT +#endif #include "caml/alloc.h" #include "caml/backtrace.h" +#include "caml/backtrace_prim.h" #include "caml/callback.h" #include "caml/custom.h" #include "caml/debugger.h" #include "caml/domain.h" #include "caml/fail.h" +#include "caml/fiber.h" #include "caml/io.h" #include "caml/memory.h" #include "caml/misc.h" @@ -37,21 +42,14 @@ #include "caml/printexc.h" #include "caml/roots.h" #include "caml/signals.h" -#ifdef NATIVE_CODE -#include "caml/stack.h" -#else -#include "caml/stacks.h" -#endif +#include "caml/sync.h" #include "caml/sys.h" #include "caml/memprof.h" -#define CAMLextern_libthreads -#include "threads.h" +#include "../../runtime/sync_posix.h" -#ifndef NATIVE_CODE -/* Initial size of bytecode stack when a thread is created (4 Ko) */ -#define Thread_stack_size (Stack_size / 4) -#endif +/* threads.h is *not* included since it contains the _external_ declarations for + the caml_c_thread_register and caml_c_thread_unregister functions. */ /* Max computation time before rescheduling, in milliseconds */ #define Thread_timeout 50 @@ -63,367 +61,201 @@ #include "st_posix.h" #endif -/* Atomics */ -#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8 - /* GCC 4.8 shipped with a working implementation of atomics, but no - stdatomic.h header, so we need to use GCC-specific intrinsics. */ - - #define _Atomic /* GCC intrinsics work on normal variables */ - #define atomic_store(v, x) \ - __atomic_store_n((v), (x), __ATOMIC_SEQ_CST) - #define atomic_load(v) \ - __atomic_load_n((v), __ATOMIC_SEQ_CST) - #define atomic_exchange(v, x) \ - __atomic_exchange_n((v), (x), __ATOMIC_SEQ_CST) -#else - #include -#endif - - /* The ML value describing a thread (heap-allocated) */ -struct caml_thread_descr { - value ident; /* Unique integer ID */ - value start_closure; /* The closure to start this thread */ - value terminated; /* Triggered event for thread termination */ -}; - -#define Ident(v) (((struct caml_thread_descr *)(v))->ident) -#define Start_closure(v) (((struct caml_thread_descr *)(v))->start_closure) -#define Terminated(v) (((struct caml_thread_descr *)(v))->terminated) +#define Ident(v) Field(v, 0) +#define Start_closure(v) Field(v, 1) +#define Terminated(v) Field(v, 2) /* The infos on threads (allocated via caml_stat_alloc()) */ struct caml_thread_struct { + value descr; /* The heap-allocated descriptor (root) */ - struct caml_thread_struct * next; /* Double linking of running threads */ + struct caml_thread_struct * next; /* Doubly-linked list of running threads */ struct caml_thread_struct * prev; -#ifdef NATIVE_CODE - char * top_of_stack; /* Top of stack for this thread (approx.) */ - char * bottom_of_stack; /* Saved value of Caml_state->_bottom_of_stack */ - uintnat last_retaddr; /* Saved value of Caml_state->_last_return_address */ - value * gc_regs; /* Saved value of Caml_state->_gc_regs */ - char * exn_handler; /* Saved value of Caml_state->_exn_handler */ - char * async_exception_pointer; - /* Saved value of Caml_state->_async_exception_pointer */ - struct caml__roots_block * local_roots; /* Saved value of local_roots */ - struct caml_local_arenas * local_arenas; - struct longjmp_buffer * exit_buf; /* For thread exit */ -#else - value * stack_low; /* The execution stack for this thread */ - value * stack_high; - value * stack_threshold; - value * sp; /* Saved value of Caml_state->_extern_sp for this thread */ - value * trapsp; /* Saved value of Caml_state->_trapsp for this thread */ - /* Saved value of Caml_state->_local_roots */ - struct caml__roots_block * local_roots; - struct longjmp_buffer * external_raise; /* Saved Caml_state->_external_raise */ - struct longjmp_buffer * external_raise_async; - /* Saved Caml_state->_external_raise_async */ + int domain_id; /* The id of the domain to which this thread belongs */ + struct stack_info* current_stack; /* saved Caml_state->current_stack */ + struct c_stack_link* c_stack; /* saved Caml_state->c_stack */ + /* Note: we do not save Caml_state->stack_cache, because it can + safely be shared between all threads on the same domain. */ + struct caml__roots_block *local_roots; /* saved value of local_roots */ + int backtrace_pos; /* saved value of Caml_state->backtrace_pos */ + backtrace_slot * backtrace_buffer; + /* saved value of Caml_state->backtrace_buffer */ + value backtrace_last_exn; + /* saved value of Caml_state->backtrace_last_exn (root) */ + value * gc_regs; /* saved value of Caml_state->gc_regs */ + value * gc_regs_buckets; /* saved value of Caml_state->gc_regs_buckets */ + void * exn_handler; /* saved value of Caml_state->exn_handler */ + +#ifndef NATIVE_CODE + intnat trap_sp_off; /* saved value of Caml_state->trap_sp_off */ + intnat trap_barrier_off; /* saved value of Caml_state->trap_barrier_off */ + struct caml_exception_context* external_raise; + /* saved value of Caml_state->external_raise */ +#endif + +#ifdef POSIX_SIGNALS + sigset_t init_mask; #endif - int backtrace_pos; /* Saved Caml_state->_backtrace_pos */ - backtrace_slot * backtrace_buffer; /* Saved Caml_state->_backtrace_buffer */ - value backtrace_last_exn; /* Saved Caml_state->_backtrace_last_exn (root) */ - struct caml_memprof_th_ctx *memprof_ctx; }; -typedef struct caml_thread_struct * caml_thread_t; +typedef struct caml_thread_struct* caml_thread_t; + +/* Thread-local key for accessing the current thread's [caml_thread_t] */ +st_tlskey caml_thread_key; -/* The "head" of the circular list of thread descriptors */ -static caml_thread_t all_threads = NULL; +#define This_thread ((caml_thread_t) st_tls_get(caml_thread_key)) -/* The descriptor for the currently executing thread */ -static caml_thread_t curr_thread = NULL; +/* overall table for threads across domains */ +struct caml_thread_table { + caml_thread_t active_thread; + st_masterlock thread_lock; + int tick_thread_running; + st_thread_id tick_thread_id; +}; + +/* thread_table instance, up to Max_domains */ +static struct caml_thread_table thread_table[Max_domains]; + +#define Thread_lock(dom_id) &thread_table[dom_id].thread_lock -/* The master lock protecting the OCaml runtime system */ -static struct caml_locking_scheme* _Atomic caml_locking_scheme; +static void thread_lock_acquire(int dom_id) +{ + st_masterlock_acquire(Thread_lock(dom_id)); +} -/* Whether the "tick" thread is already running */ -static int caml_tick_thread_running = 0; +static void thread_lock_release(int dom_id) +{ + st_masterlock_release(Thread_lock(dom_id)); +} -/* Whether the "tick" thread is enabled */ -static int caml_tick_thread_enabled = 1; +/* The remaining fields are accessed while holding the domain lock */ -/* The thread identifier of the "tick" thread */ -static st_thread_id caml_tick_thread_id; +/* The descriptor for the currently executing thread for this domain; + also the head of a circular list of thread descriptors for this + domain. Invariant: at every safe point, either Active_thread is + NULL, or Caml_state is setup for Active_thread. */ +#define Active_thread thread_table[Caml_state->id].active_thread -/* The key used for storing the thread descriptor in the specific data - of the corresponding system thread. */ -static st_tlskey thread_descriptor_key; +/* Whether the "tick" thread is already running for this domain */ +#define Tick_thread_running thread_table[Caml_state->id].tick_thread_running -/* The key used for unlocking I/O channels on exceptions */ -static st_tlskey last_channel_locked_key; +/* The thread identifier of the "tick" thread for this domain */ +#define Tick_thread_id thread_table[Caml_state->id].tick_thread_id /* Identifier for next thread creation */ -static intnat thread_next_ident = 0; +static atomic_uintnat thread_next_id = 0; /* Forward declarations */ static value caml_threadstatus_new (void); static void caml_threadstatus_terminate (value); static st_retcode caml_threadstatus_wait (value); -/* Imports from the native-code runtime system */ -#ifdef NATIVE_CODE -extern struct longjmp_buffer caml_termination_jmpbuf; -extern void (*caml_termination_hook)(void); -#endif - -/* The default locking scheme */ -static st_masterlock default_master_lock; - -static int default_can_skip_yield(void* m) -{ - return st_masterlock_waiters(m) == 0; -} - -struct caml_locking_scheme caml_default_locking_scheme = - { &default_master_lock, - (void (*)(void*))&st_masterlock_acquire, - (void (*)(void*))&st_masterlock_release, - NULL, - NULL, - (void (*)(void*))&st_masterlock_init, - default_can_skip_yield, - (void (*)(void*))&st_thread_yield }; - -static void acquire_runtime_lock() -{ - struct caml_locking_scheme* s; - - /* The locking scheme may be changed by the thread that currently - holds it. This means that it may change while we're waiting to - acquire it, so by the time we acquire it it may no longer be the - right scheme. */ - - retry: - s = atomic_load(&caml_locking_scheme); - s->lock(s->context); - if (atomic_load(&caml_locking_scheme) != s) { - /* This is no longer the right scheme. Unlock and try again */ - s->unlock(s->context); - goto retry; - } -} - -static void release_runtime_lock() -{ - /* There is no tricky case here like in acquire, as only the holder - of the lock can change it. (Here, that's us) */ - struct caml_locking_scheme* s; - s = atomic_load(&caml_locking_scheme); - s->unlock(s->context); -} - /* Hook for scanning the stacks of the other threads */ -static void (*prev_scan_roots_hook) (scanning_action); - -static void caml_thread_scan_roots(scanning_action action) -{ - caml_thread_t th = curr_thread; - do { - (*action)(th->descr, &th->descr); - (*action)(th->backtrace_last_exn, &th->backtrace_last_exn); - /* Don't rescan the stack of the current thread, it was done already */ - if (th != curr_thread) { -#ifdef NATIVE_CODE - if (th->bottom_of_stack != NULL) - caml_do_local_roots(action, action, th->bottom_of_stack, th->last_retaddr, - th->gc_regs, th->local_roots, th->local_arenas); -#else - caml_do_local_roots(action, th->sp, th->stack_high, th->local_roots); -#endif - } - th = th->next; - } while (th != curr_thread); - /* Hook */ - if (prev_scan_roots_hook != NULL) (*prev_scan_roots_hook)(action); -} - -/* Hook for iterating over Memprof's entries arrays */ +static scan_roots_hook prev_scan_roots_hook; -static void memprof_ctx_iter(th_ctx_action f, void* data) +static void caml_thread_scan_roots( + scanning_action action, scanning_action_flags fflags, void *fdata, + caml_domain_state *domain_state) { - caml_thread_t th = curr_thread; - do { - f(th->memprof_ctx, data); - th = th->next; - } while (th != curr_thread); -} - -/* Saving and restoring runtime state in curr_thread */ + caml_thread_t th; -CAMLexport void caml_thread_save_runtime_state(void) -{ - if (Caml_state->_in_minor_collection) - caml_fatal_error("Thread switch from inside minor GC"); -#ifdef NATIVE_CODE - curr_thread->top_of_stack = Caml_state->_top_of_stack; - curr_thread->bottom_of_stack = Caml_state->_bottom_of_stack; - curr_thread->last_retaddr = Caml_state->_last_return_address; - curr_thread->gc_regs = Caml_state->_gc_regs; - curr_thread->exn_handler = Caml_state->_exn_handler; - curr_thread->async_exception_pointer = Caml_state->_async_exception_pointer; - curr_thread->local_arenas = caml_get_local_arenas(); -#else - curr_thread->stack_low = Caml_state->_stack_low; - curr_thread->stack_high = Caml_state->_stack_high; - curr_thread->stack_threshold = Caml_state->_stack_threshold; - curr_thread->sp = Caml_state->_extern_sp; - curr_thread->trapsp = Caml_state->_trapsp; - curr_thread->external_raise = Caml_state->_external_raise; - curr_thread->external_raise_async = Caml_state->_external_raise_async; + th = Active_thread; + + /* GC could be triggered before [Active_thread] is initialized */ + if (th != NULL) { + do { + (*action)(fdata, th->descr, &th->descr); + (*action)(fdata, th->backtrace_last_exn, &th->backtrace_last_exn); + if (th != Active_thread) { + if (th->current_stack != NULL) + caml_do_local_roots(action, fflags, fdata, + th->local_roots, th->current_stack, th->gc_regs); + } + th = th->next; + } while (th != Active_thread); + + }; + + if (prev_scan_roots_hook != NULL) + (*prev_scan_roots_hook)(action, fflags, fdata, domain_state); + + return; +} + +static void save_runtime_state(void) +{ + CAMLassert(This_thread != NULL); + caml_thread_t this_thread = This_thread; + this_thread->current_stack = Caml_state->current_stack; + this_thread->c_stack = Caml_state->c_stack; + this_thread->gc_regs = Caml_state->gc_regs; + this_thread->gc_regs_buckets = Caml_state->gc_regs_buckets; + this_thread->exn_handler = Caml_state->exn_handler; + this_thread->local_roots = Caml_state->local_roots; + this_thread->backtrace_pos = Caml_state->backtrace_pos; + this_thread->backtrace_buffer = Caml_state->backtrace_buffer; + this_thread->backtrace_last_exn = Caml_state->backtrace_last_exn; +#ifndef NATIVE_CODE + this_thread->trap_sp_off = Caml_state->trap_sp_off; + this_thread->trap_barrier_off = Caml_state->trap_barrier_off; + this_thread->external_raise = Caml_state->external_raise; #endif - curr_thread->local_roots = Caml_state->_local_roots; - curr_thread->backtrace_pos = Caml_state->_backtrace_pos; - curr_thread->backtrace_buffer = Caml_state->_backtrace_buffer; - curr_thread->backtrace_last_exn = Caml_state->_backtrace_last_exn; - caml_memprof_leave_thread(); } -CAMLexport void caml_thread_restore_runtime_state(void) +static void restore_runtime_state(caml_thread_t th) { - /* Update curr_thread to point to the thread descriptor corresponding - to the thread currently executing */ - curr_thread = st_tls_get(thread_descriptor_key); - -#ifdef NATIVE_CODE - Caml_state->_top_of_stack = curr_thread->top_of_stack; - Caml_state->_bottom_of_stack= curr_thread->bottom_of_stack; - Caml_state->_last_return_address = curr_thread->last_retaddr; - Caml_state->_gc_regs = curr_thread->gc_regs; - Caml_state->_exn_handler = curr_thread->exn_handler; - Caml_state->_async_exception_pointer = curr_thread->async_exception_pointer; - caml_set_local_arenas(curr_thread->local_arenas); -#else - Caml_state->_stack_low = curr_thread->stack_low; - Caml_state->_stack_high = curr_thread->stack_high; - Caml_state->_stack_threshold = curr_thread->stack_threshold; - Caml_state->_extern_sp = curr_thread->sp; - Caml_state->_trapsp = curr_thread->trapsp; - Caml_state->_external_raise = curr_thread->external_raise; - Caml_state->_external_raise_async = curr_thread->external_raise_async; + CAMLassert(th != NULL); + Active_thread = th; + Caml_state->current_stack = th->current_stack; + Caml_state->c_stack = th->c_stack; + Caml_state->gc_regs = th->gc_regs; + Caml_state->gc_regs_buckets = th->gc_regs_buckets; + Caml_state->exn_handler = th->exn_handler; + Caml_state->local_roots = th->local_roots; + Caml_state->backtrace_pos = th->backtrace_pos; + Caml_state->backtrace_buffer = th->backtrace_buffer; + Caml_state->backtrace_last_exn = th->backtrace_last_exn; +#ifndef NATIVE_CODE + Caml_state->trap_sp_off = th->trap_sp_off; + Caml_state->trap_barrier_off = th->trap_barrier_off; + Caml_state->external_raise = th->external_raise; #endif - Caml_state->_local_roots = curr_thread->local_roots; - Caml_state->_backtrace_pos = curr_thread->backtrace_pos; - Caml_state->_backtrace_buffer = curr_thread->backtrace_buffer; - Caml_state->_backtrace_last_exn = curr_thread->backtrace_last_exn; - caml_memprof_enter_thread(curr_thread->memprof_ctx); } -CAMLexport void caml_switch_runtime_locking_scheme(struct caml_locking_scheme* new) +CAMLprim value caml_thread_cleanup(value unit); + +static void reset_active(void) { - struct caml_locking_scheme* old; - - caml_thread_save_runtime_state(); - old = atomic_exchange(&caml_locking_scheme, new); - /* We hold 'old', but it is no longer the runtime lock */ - old->unlock(old->context); - acquire_runtime_lock(); - caml_thread_restore_runtime_state(); + Active_thread = NULL; + /* If no other OCaml thread remains, ask the tick thread to stop + so that it does not prevent the whole process from exiting (#9971) */ + caml_thread_cleanup(Val_unit); } - /* Hooks for caml_enter_blocking_section and caml_leave_blocking_section */ - static void caml_thread_enter_blocking_section(void) { /* Save the current runtime state in the thread descriptor of the current thread */ - caml_thread_save_runtime_state(); + save_runtime_state(); /* Tell other threads that the runtime is free */ - release_runtime_lock(); + thread_lock_release(Caml_state->id); } static void caml_thread_leave_blocking_section(void) { -#ifdef _WIN32 - /* TlsGetValue calls SetLastError which will mask any error which occurred - prior to the caml_thread_leave_blocking_section call. EnterCriticalSection - does not do this. */ - DWORD error = GetLastError(); -#endif + caml_thread_t th = This_thread; /* Wait until the runtime is free */ - acquire_runtime_lock(); - caml_thread_restore_runtime_state(); -#ifdef _WIN32 - SetLastError(error); -#endif -} - -/* Hooks for I/O locking */ - -static void caml_io_mutex_free(struct channel *chan) -{ - st_mutex mutex = chan->mutex; - if (mutex != NULL) { - st_mutex_destroy(mutex); - chan->mutex = NULL; - } -} - -static void caml_io_mutex_lock(struct channel *chan) -{ - st_mutex mutex = chan->mutex; - - if (mutex == NULL) { - st_check_error(st_mutex_create(&mutex), "channel locking"); /*PR#7038*/ - chan->mutex = mutex; - } - /* PR#4351: first try to acquire mutex without releasing the master lock */ - if (st_mutex_trylock(mutex) == MUTEX_PREVIOUSLY_UNLOCKED) { - st_tls_set(last_channel_locked_key, (void *) chan); - return; - } - /* If unsuccessful, block on mutex */ - caml_enter_blocking_section(); - st_mutex_lock(mutex); - /* Problem: if a signal occurs at this point, - and the signal handler raises an exception, we will not - unlock the mutex. The alternative (doing the setspecific - before locking the mutex is also incorrect, since we could - then unlock a mutex that is unlocked or locked by someone else. */ - st_tls_set(last_channel_locked_key, (void *) chan); - caml_leave_blocking_section(); -} - -static void caml_io_mutex_unlock(struct channel *chan) -{ - st_mutex_unlock(chan->mutex); - st_tls_set(last_channel_locked_key, NULL); -} - -static void caml_io_mutex_unlock_exn(void) -{ - struct channel * chan = st_tls_get(last_channel_locked_key); - if (chan != NULL) caml_io_mutex_unlock(chan); -} - -/* Hook for estimating stack usage */ - -static uintnat (*prev_stack_usage_hook)(void); - -static uintnat caml_thread_stack_usage(void) -{ - uintnat sz; - caml_thread_t th; - - /* Don't add stack for current thread, this is done elsewhere */ - for (sz = 0, th = curr_thread->next; - th != curr_thread; - th = th->next) { -#ifdef NATIVE_CODE - if(th->top_of_stack != NULL && th->bottom_of_stack != NULL && - th->top_of_stack > th->bottom_of_stack) - sz += (value *) th->top_of_stack - (value *) th->bottom_of_stack; -#else - sz += th->stack_high - th->sp; -#endif - } - if (prev_stack_usage_hook != NULL) - sz += prev_stack_usage_hook(); - return sz; + thread_lock_acquire(th->domain_id); + /* Update Active_thread to point to the thread descriptor + corresponding to the thread currently executing and restore the + runtime state */ + restore_runtime_state(th); } /* Create and setup a new thread info block. @@ -433,277 +265,329 @@ static uintnat caml_thread_stack_usage(void) static caml_thread_t caml_thread_new_info(void) { caml_thread_t th; - th = (caml_thread_t) caml_stat_alloc_noexc(sizeof(struct caml_thread_struct)); + caml_domain_state *domain_state; + uintnat stack_wsize = caml_get_init_stack_wsize(); + + domain_state = Caml_state; + th = NULL; + th = (caml_thread_t)caml_stat_alloc_noexc(sizeof(struct caml_thread_struct)); if (th == NULL) return NULL; - th->descr = Val_unit; /* filled later */ -#ifdef NATIVE_CODE - th->bottom_of_stack = NULL; - th->top_of_stack = NULL; - th->last_retaddr = 1; - th->exn_handler = NULL; - th->async_exception_pointer = NULL; - th->local_roots = NULL; - th->local_arenas = NULL; - th->exit_buf = NULL; -#else - /* Allocate the stacks */ - th->stack_low = (value *) caml_stat_alloc(Thread_stack_size); - th->stack_high = th->stack_low + Thread_stack_size / sizeof(value); - th->stack_threshold = th->stack_low + Stack_threshold / sizeof(value); - th->sp = th->stack_high; - th->trapsp = th->stack_high; + th->descr = Val_unit; + th->next = NULL; + th->prev = NULL; + th->domain_id = domain_state->id; + th->current_stack = caml_alloc_main_stack(stack_wsize); + if (th->current_stack == NULL) { + caml_stat_free(th); + return NULL; + } + th->c_stack = NULL; th->local_roots = NULL; - th->external_raise = NULL; - th->external_raise_async = NULL; -#endif th->backtrace_pos = 0; th->backtrace_buffer = NULL; th->backtrace_last_exn = Val_unit; - th->memprof_ctx = caml_memprof_new_th_ctx(); + th->gc_regs = NULL; + th->gc_regs_buckets = NULL; + th->exn_handler = NULL; + +#ifndef NATIVE_CODE + th->trap_sp_off = 1; + th->trap_barrier_off = 2; + th->external_raise = NULL; +#endif + return th; } +/* Free the resources held by a thread. */ +void caml_thread_free_info(caml_thread_t th) +{ + /* the following fields do not need any specific cleanup: + descr: heap-allocated + c_stack: stack-allocated + local_roots: stack-allocated + backtrace_last_exn: heap-allocated + gc_regs: + must be empty for a terminated thread + (we assume that the C call stack must be empty at + thread-termination point, so there are no gc_regs buckets in + use in this variable nor on the stack) + exn_handler: stack-allocated + external_raise: stack-allocated + init_mask: stack-allocated + */ + caml_free_stack(th->current_stack); + caml_free_backtrace_buffer(th->backtrace_buffer); + + /* Remark: we could share gc_regs_buckets between threads on a same + domain, but this might break the invariant that it is always + non-empty at the point where we switch from OCaml to C, so we + would need to do something more complex when activating a thread + to restore this invariant. */ + caml_free_gc_regs_buckets(th->gc_regs_buckets); + + caml_stat_free(th); +} + /* Allocate a thread descriptor block. */ static value caml_thread_new_descriptor(value clos) { - value mu = Val_unit; + CAMLparam1(clos); + CAMLlocal1(mu); value descr; - Begin_roots2 (clos, mu) - /* Create and initialize the termination semaphore */ - mu = caml_threadstatus_new(); - /* Create a descriptor for the new thread */ - descr = caml_alloc_small(3, 0); - Ident(descr) = Val_long(thread_next_ident); - Start_closure(descr) = clos; - Terminated(descr) = mu; - thread_next_ident++; - End_roots(); - return descr; + /* Create and initialize the termination semaphore */ + mu = caml_threadstatus_new(); + /* Create a descriptor for the new thread */ + descr = caml_alloc_3(0, Val_long(atomic_fetch_add(&thread_next_id, +1)), + clos, mu); + CAMLreturn(descr); } -/* Remove a thread info block from the list of threads. - Free it and its stack resources. */ - -static void caml_thread_remove_info(caml_thread_t th) +/* Remove a thread info block from the list of threads + and free its resources. */ +static void caml_thread_remove_and_free(caml_thread_t th) { if (th->next == th) - all_threads = NULL; /* last OCaml thread exiting */ - else if (all_threads == th) - all_threads = th->next; /* PR#5295 */ + reset_active(); /* last OCaml thread exiting */ + else if (Active_thread == th) + restore_runtime_state(th->next); /* PR#5295 */ th->next->prev = th->prev; th->prev->next = th->next; -#ifndef NATIVE_CODE - caml_stat_free(th->stack_low); -#endif - if (th->backtrace_buffer != NULL) caml_stat_free(th->backtrace_buffer); - caml_stat_free(th); + + caml_thread_free_info(th); + return; } /* Reinitialize the thread machinery after a fork() (PR#4577) */ +/* TODO(engil): more work on the multicore fork machinery. */ static void caml_thread_reinitialize(void) { - struct channel * chan; - struct caml_locking_scheme* s; - - /* Remove all other threads (now nonexistent) - from the doubly-linked list of threads */ - while (curr_thread->next != curr_thread) { - caml_memprof_delete_th_ctx(curr_thread->next->memprof_ctx); - caml_thread_remove_info(curr_thread->next); - } + caml_thread_t th, next; - /* Reinitialize the master lock machinery, - just in case the fork happened while other threads were doing - caml_leave_blocking_section */ - s = atomic_load(&caml_locking_scheme); - s->reinitialize_after_fork(s->context); - /* Tick thread is not currently running in child process, will be - re-created at next Thread.create */ - caml_tick_thread_running = 0; - /* Destroy all IO mutexes; will be reinitialized on demand */ - for (chan = caml_all_opened_channels; - chan != NULL; - chan = chan->next) { - if (chan->mutex != NULL) { - st_mutex_destroy(chan->mutex); - chan->mutex = NULL; - } + th = Active_thread->next; + while (th != Active_thread) { + next = th->next; + caml_thread_free_info(th); + th = next; } -} + Active_thread->next = Active_thread; + Active_thread->prev = Active_thread; + + /* Within the child, the domain_lock needs to be reset and acquired. */ + caml_reset_domain_lock(); + caml_acquire_domain_lock(); + /* The master lock needs to be initialized again. This process will also be + the effective owner of the lock. So there is no need to run + st_masterlock_acquire (busy = 1) */ + st_masterlock *m = Thread_lock(Caml_state->id); + m->init = 0; /* force initialization */ + st_masterlock_init(m); +} + +CAMLprim value caml_thread_join(value th); + +/* This hook is run when a domain shuts down (see domains.c). + + When a domain shuts down, the state must be cleared to allow proper reuse of + the domain slot the next time a domain is started on this slot. If a program + is single-domain, we mimic OCaml 4's behavior and do not care about ongoing + thread: the program will exit. */ +static void caml_thread_domain_stop_hook(void) { + /* If the program runs multiple domains, we should not let systhreads to hang + around when a domain exit. If the domain is not the last one (and the last + one will always be domain 0) we force the domain to join on every thread + on its chain before wrapping up. */ + if (!caml_domain_alone()) { + + while (Active_thread->next != Active_thread) { + caml_thread_join(Active_thread->next->descr); + } -/* Initialize the thread machinery */ + /* another domain thread may be joining on this domain's descriptor */ + caml_threadstatus_terminate(Terminated(Active_thread->descr)); + /* Shut down the tick thread */ + reset_active(); + /* We free the thread info but not its resources: they are owned + by Caml_state at this point, and will be cleaned-up later. */ + caml_stat_free(This_thread); + }; +} -CAMLprim value caml_thread_initialize(value unit) /* ML */ +static void caml_thread_domain_initialize_hook(void) { - /* Protect against repeated initialization (PR#3532) */ - if (curr_thread != NULL) return Val_unit; + + caml_thread_t new_thread; + /* OS-specific initialization */ st_initialize(); - /* Initialize and acquire the master lock */ - st_masterlock_init(&default_master_lock); - caml_locking_scheme = &caml_default_locking_scheme; - /* Initialize the keys */ - st_tls_newkey(&thread_descriptor_key); - st_tls_newkey(&last_channel_locked_key); - /* Set up a thread info block for the current thread */ - curr_thread = + + st_masterlock_init(Thread_lock(Caml_state->id)); + + new_thread = (caml_thread_t) caml_stat_alloc(sizeof(struct caml_thread_struct)); - curr_thread->descr = caml_thread_new_descriptor(Val_unit); - curr_thread->next = curr_thread; - curr_thread->prev = curr_thread; - all_threads = curr_thread; - curr_thread->backtrace_last_exn = Val_unit; -#ifdef NATIVE_CODE - curr_thread->exit_buf = &caml_termination_jmpbuf; -#endif - curr_thread->memprof_ctx = &caml_memprof_main_ctx; - /* The stack-related fields will be filled in at the next - caml_enter_blocking_section */ - /* Associate the thread descriptor with the thread */ - st_tls_set(thread_descriptor_key, (void *) curr_thread); - st_thread_set_id(Ident(curr_thread->descr)); - /* Set up the hooks */ - prev_scan_roots_hook = caml_scan_roots_hook; - caml_scan_roots_hook = caml_thread_scan_roots; - caml_enter_blocking_section_hook = caml_thread_enter_blocking_section; - caml_leave_blocking_section_hook = caml_thread_leave_blocking_section; -#ifdef NATIVE_CODE - caml_termination_hook = st_thread_exit; -#endif - caml_channel_mutex_free = caml_io_mutex_free; - caml_channel_mutex_lock = caml_io_mutex_lock; - caml_channel_mutex_unlock = caml_io_mutex_unlock; - caml_channel_mutex_unlock_exn = caml_io_mutex_unlock_exn; - prev_stack_usage_hook = caml_stack_usage_hook; - caml_stack_usage_hook = caml_thread_stack_usage; - caml_memprof_th_ctx_iter_hook = memprof_ctx_iter; - /* Set up fork() to reinitialize the thread machinery in the child - (PR#4577) */ - st_atfork(caml_thread_reinitialize); - return Val_unit; -} -/* Start tick thread, if not already running */ -static st_retcode start_tick_thread() -{ - st_retcode err; - if (caml_tick_thread_running) return 0; - err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL); - if (err == 0) caml_tick_thread_running = 1; - return err; + new_thread->domain_id = Caml_state->id; + new_thread->descr = caml_thread_new_descriptor(Val_unit); + new_thread->next = new_thread; + new_thread->prev = new_thread; + new_thread->backtrace_last_exn = Val_unit; + + st_tls_set(caml_thread_key, new_thread); + + Active_thread = new_thread; + } -/* Stop tick thread, if currently running */ -static void stop_tick_thread() +CAMLprim value caml_thread_yield(value unit); + +void caml_thread_interrupt_hook(void) { - if (!caml_tick_thread_running) return; - caml_tick_thread_stop = 1; - st_thread_join(caml_tick_thread_id); - caml_tick_thread_stop = 0; - caml_tick_thread_running = 0; + /* Do not attempt to yield from the backup thread */ + if (caml_bt_is_self()) return; + + uintnat is_on = 1; + atomic_uintnat* req_external_interrupt = + &Caml_state->requested_external_interrupt; + + if (atomic_compare_exchange_strong(req_external_interrupt, &is_on, 0)) { + caml_thread_yield(Val_unit); + } + + return; } -CAMLprim value caml_enable_tick_thread(value v_enable) +/* [caml_thread_initialize] initialises the systhreads infrastructure. This + function first sets up the chain for systhreads on this domain, then setup + the global variables and hooks for systhreads to cooperate with the runtime + system. */ +CAMLprim value caml_thread_initialize(value unit) { - int enable = Long_val(v_enable) ? 1 : 0; + /* Protect against repeated initialization (PR#3532) */ + if (Active_thread != NULL) return Val_unit; - if (enable) { - st_retcode err = start_tick_thread(); - st_check_error(err, "caml_enable_tick_thread"); - } else { - stop_tick_thread(); - } + if (!caml_domain_alone()) + caml_failwith("caml_thread_initialize: cannot initialize Thread " + "while several domains are running."); + + /* Initialize the key to the [caml_thread_t] structure */ + st_tls_newkey(&caml_thread_key); + + /* First initialise the systhread chain on this domain */ + caml_thread_domain_initialize_hook(); + + prev_scan_roots_hook = atomic_exchange(&caml_scan_roots_hook, + caml_thread_scan_roots); + caml_enter_blocking_section_hook = caml_thread_enter_blocking_section; + caml_leave_blocking_section_hook = caml_thread_leave_blocking_section; + caml_domain_external_interrupt_hook = caml_thread_interrupt_hook; + caml_domain_initialize_hook = caml_thread_domain_initialize_hook; + caml_domain_stop_hook = caml_thread_domain_stop_hook; + + caml_atfork_hook = caml_thread_reinitialize; - caml_tick_thread_enabled = enable; return Val_unit; } -/* Cleanup the thread machinery when the runtime is shut down. Joining the tick - thread take 25ms on average / 50ms in the worst case, so we don't do it on - program exit. */ - -CAMLprim value caml_thread_cleanup(value unit) /* ML */ +CAMLprim value caml_thread_cleanup(value unit) { - stop_tick_thread(); + if (Tick_thread_running){ + atomic_store_release(&Tick_thread_stop, 1); + st_thread_join(Tick_thread_id); + atomic_store_release(&Tick_thread_stop, 0); + Tick_thread_running = 0; + } + return Val_unit; } -/* Thread cleanup at termination */ - static void caml_thread_stop(void) { /* PR#5188, PR#7220: some of the global runtime state may have changed as the thread was running, so we save it in the - curr_thread data to make sure that the cleanup logic + This_thread data to make sure that the cleanup logic below uses accurate information. */ - caml_thread_save_runtime_state(); - /* Tell memprof that this thread is terminating. */ - caml_memprof_delete_th_ctx(curr_thread->memprof_ctx); - /* Signal that the thread has terminated */ - caml_threadstatus_terminate(Terminated(curr_thread->descr)); - /* Remove th from the doubly-linked list of threads and free its info block */ - caml_thread_remove_info(curr_thread); - /* If no other OCaml thread remains, ask the tick thread to stop - so that it does not prevent the whole process from exiting (#9971) */ - if (all_threads == NULL) caml_thread_cleanup(Val_unit); - /* OS-specific cleanups */ - st_thread_cleanup(); - /* Release the runtime system */ - release_runtime_lock(); + save_runtime_state(); + + /* The main domain thread does not go through [caml_thread_stop]. There is + always one more thread in the chain at this point in time. */ + CAMLassert(Active_thread->next != Active_thread); + + caml_threadstatus_terminate(Terminated(Active_thread->descr)); + + /* The following also sets Active_thread to a sane value in case the + backup thread does a GC before the domain lock is acquired + again. */ + caml_thread_remove_and_free(Active_thread); + thread_lock_release(Caml_state->id); } /* Create a thread */ -static ST_THREAD_FUNCTION caml_thread_start(void * arg) +/* the thread lock is not held when entering */ +static void * caml_thread_start(void * v) { - caml_thread_t th = (caml_thread_t) arg; + caml_thread_t th = (caml_thread_t) v; + int dom_id = th->domain_id; value clos; void * signal_stack; - struct caml_locking_scheme* sch; -#ifdef NATIVE_CODE - struct longjmp_buffer termination_buf; - char tos; - /* Record top of stack (approximative) */ - th->top_of_stack = &tos; + + caml_init_domain_self(dom_id); + + st_tls_set(caml_thread_key, th); + + thread_lock_acquire(dom_id); + restore_runtime_state(th); + signal_stack = caml_init_signal_stack(); + +#ifdef POSIX_SIGNALS + /* restore the signal mask from the spawning thread, now it is safe for the + signal handler to run (as Caml_state is initialised) */ + pthread_sigmask(SIG_SETMASK, &th->init_mask, NULL); #endif - /* Associate the thread descriptor with the thread */ - st_tls_set(thread_descriptor_key, (void *) th); - st_thread_set_id(Ident(th->descr)); - sch = atomic_load(&caml_locking_scheme); - if (sch->thread_start != NULL) - sch->thread_start(sch->context, Thread_type_caml); - /* Acquire the global mutex */ - caml_leave_blocking_section(); - st_thread_set_id(Ident(th->descr)); - signal_stack = caml_setup_stack_overflow_detection(); -#ifdef NATIVE_CODE - /* Setup termination handler (for caml_thread_exit) */ - if (sigsetjmp(termination_buf.buf, 0) == 0) { - th->exit_buf = &termination_buf; + clos = Start_closure(Active_thread->descr); + caml_modify(&(Start_closure(Active_thread->descr)), Val_unit); + caml_callback_exn(clos, Val_unit); + caml_thread_stop(); + caml_free_signal_stack(signal_stack); + return 0; +} + +static int create_tick_thread(void) +{ + int err; +#ifdef POSIX_SIGNALS + sigset_t mask, old_mask; + + /* Block all signals so that we don't try to execute an OCaml signal + handler in the new tick thread */ + sigfillset(&mask); + pthread_sigmask(SIG_BLOCK, &mask, &old_mask); #endif - /* Callback the closure */ - clos = Start_closure(th->descr); - caml_modify(&(Start_closure(th->descr)), Val_unit); - caml_callback_exn(clos, Val_unit); - caml_thread_stop(); - sch = atomic_load(&caml_locking_scheme); - if (sch->thread_stop != NULL) - sch->thread_stop(sch->context, Thread_type_caml); -#ifdef NATIVE_CODE - } + + err = st_thread_create(&Tick_thread_id, caml_thread_tick, + (void *) &Caml_state->id); + +#ifdef POSIX_SIGNALS + pthread_sigmask(SIG_SETMASK, &old_mask, NULL); #endif - caml_stop_stack_overflow_detection(signal_stack); - /* The thread now stops running */ - return 0; + + return err; } -CAMLprim value caml_thread_new(value clos) /* ML */ +CAMLprim value caml_thread_new(value clos) { + CAMLparam1(clos); caml_thread_t th; st_retcode err; +#ifdef POSIX_SIGNALS + sigset_t mask, old_mask; + + sigfillset(&mask); + pthread_sigmask(SIG_BLOCK, &mask, &old_mask); +#endif #ifndef NATIVE_CODE if (caml_debugger_in_use) @@ -711,175 +595,146 @@ CAMLprim value caml_thread_new(value clos) /* ML */ #endif /* Create a thread info block */ th = caml_thread_new_info(); - if (th == NULL) caml_raise_out_of_memory(); - /* Equip it with a thread descriptor */ + + if (th == NULL) + caml_raise_out_of_memory(); + th->descr = caml_thread_new_descriptor(clos); - /* Add thread info block to the list of threads */ - th->next = curr_thread->next; - th->prev = curr_thread; - curr_thread->next->prev = th; - curr_thread->next = th; - /* Create the new thread */ + +#ifdef POSIX_SIGNALS + th->init_mask = old_mask; +#endif + + th->next = Active_thread->next; + th->prev = Active_thread; + + Active_thread->next->prev = th; + Active_thread->next = th; + err = st_thread_create(NULL, caml_thread_start, (void *) th); + +#ifdef POSIX_SIGNALS + /* regardless of error, return our sigmask to the original state */ + pthread_sigmask(SIG_SETMASK, &old_mask, NULL); +#endif + if (err != 0) { /* Creation failed, remove thread info block from list of threads */ - caml_thread_remove_info(th); - st_check_error(err, "Thread.create"); + caml_thread_remove_and_free(th); + sync_check_error(err, "Thread.create"); } - /* Create the tick thread if not already done. - Because of PR#4666, we start the tick thread late, only when we create - the first additional thread in the current process*/ - if (caml_tick_thread_enabled) { - err = start_tick_thread(); - st_check_error(err, "Thread.create"); + + if (! Tick_thread_running) { + err = create_tick_thread(); + sync_check_error(err, "Thread.create"); + Tick_thread_running = 1; } - return th->descr; + CAMLreturn(th->descr); } /* Register a thread already created from C */ +#define Dom_c_threads 0 + +/* the thread lock is not held when entering */ CAMLexport int caml_c_thread_register(void) { - caml_thread_t th; - struct caml_locking_scheme* sch; -#ifdef NATIVE_CODE - st_retcode err; -#endif + /* Already registered? */ + if (This_thread != NULL) return 0; - sch = atomic_load(&caml_locking_scheme); - if (sch->thread_start != NULL) - sch->thread_start(sch->context, Thread_type_c_registered); + CAMLassert(Caml_state_opt == NULL); + caml_init_domain_self(Dom_c_threads); - /* Already registered? */ - if (st_tls_get(thread_descriptor_key) != NULL) return 0; + /* Take master lock to protect access to the runtime */ + thread_lock_acquire(Dom_c_threads); /* Create a thread info block */ - th = caml_thread_new_info(); - if (th == NULL) return 0; -#ifdef NATIVE_CODE - th->top_of_stack = (char *) &err; -#endif - /* Take master lock to protect access to the chaining of threads */ - acquire_runtime_lock(); + caml_thread_t th = caml_thread_new_info(); + /* If it fails, we release the lock and return an error. */ + if (th == NULL) { + thread_lock_release(Dom_c_threads); + return 0; + } /* Add thread info block to the list of threads */ - if (all_threads == NULL) { + if (Active_thread == NULL) { th->next = th; th->prev = th; - all_threads = th; + Active_thread = th; } else { - th->next = all_threads->next; - th->prev = all_threads; - all_threads->next->prev = th; - all_threads->next = th; + th->next = Active_thread->next; + th->prev = Active_thread; + Active_thread->next->prev = th; + Active_thread->next = th; } /* Associate the thread descriptor with the thread */ - st_tls_set(thread_descriptor_key, (void *) th); - /* Release the master lock */ - release_runtime_lock(); - /* Now we can re-enter the run-time system and heap-allocate the descriptor */ - caml_leave_blocking_section(); + st_tls_set(caml_thread_key, (void *) th); + /* Allocate the thread descriptor on the heap */ th->descr = caml_thread_new_descriptor(Val_unit); /* no closure */ - st_thread_set_id(Ident(th->descr)); - /* Create the tick thread if not already done. */ - if (caml_tick_thread_enabled) start_tick_thread(); - /* Exit the run-time system */ - caml_enter_blocking_section(); + + if (! Tick_thread_running) { + st_retcode err = create_tick_thread(); + sync_check_error(err, "caml_register_c_thread"); + Tick_thread_running = 1; + } + + /* Release the master lock */ + thread_lock_release(Dom_c_threads); return 1; } /* Unregister a thread that was created from C and registered with the function above */ +/* the thread lock is not held when entering */ CAMLexport int caml_c_thread_unregister(void) { - struct caml_locking_scheme* sch; - caml_thread_t th = st_tls_get(thread_descriptor_key); - /* Not registered? */ + caml_thread_t th = This_thread; + + /* If this thread is not set, then it was not registered */ if (th == NULL) return 0; /* Wait until the runtime is available */ - acquire_runtime_lock(); - /* Forget the thread descriptor */ - st_tls_set(thread_descriptor_key, NULL); + thread_lock_acquire(Dom_c_threads); + /* Forget the thread descriptor */ + st_tls_set(caml_thread_key, NULL); /* Remove thread info block from list of threads, and free it */ - caml_thread_remove_info(th); - /* If no other OCaml thread remains, ask the tick thread to stop - so that it does not prevent the whole process from exiting (#9971) */ - if (all_threads == NULL) caml_thread_cleanup(Val_unit); + caml_thread_remove_and_free(th); /* Release the runtime */ - release_runtime_lock(); - sch = atomic_load(&caml_locking_scheme); - if (sch->thread_stop != NULL) - sch->thread_stop(sch->context, Thread_type_c_registered); + thread_lock_release(Dom_c_threads); return 1; } /* Return the current thread */ -CAMLprim value caml_thread_self(value unit) /* ML */ +CAMLprim value caml_thread_self(value unit) { - if (curr_thread == NULL) - caml_invalid_argument("Thread.self: not initialized"); - return curr_thread->descr; + return Active_thread->descr; } /* Return the identifier of a thread */ -CAMLprim value caml_thread_id(value th) /* ML */ +CAMLprim value caml_thread_id(value th) { return Ident(th); } /* Print uncaught exception and backtrace */ -CAMLprim value caml_thread_uncaught_exception(value exn) /* ML */ +CAMLprim value caml_thread_uncaught_exception(value exn) { char * msg = caml_format_exception(exn); fprintf(stderr, "Thread %d killed on uncaught exception %s\n", - Int_val(Ident(curr_thread->descr)), msg); + Int_val(Ident(Active_thread->descr)), msg); caml_stat_free(msg); - if (Caml_state->_backtrace_active) caml_print_exception_backtrace(); + if (Caml_state->backtrace_active) caml_print_exception_backtrace(); fflush(stderr); return Val_unit; } -/* Terminate current thread */ - -CAMLprim value caml_thread_exit(value unit) /* ML */ -{ - struct longjmp_buffer * exit_buf = NULL; - - if (curr_thread == NULL) - caml_invalid_argument("Thread.exit: not initialized"); - - /* In native code, we cannot call pthread_exit here because on some - systems this raises a C++ exception, and ocamlopt-generated stack - frames cannot be unwound. Instead, we longjmp to the thread - creation point (in caml_thread_start) or to the point in - caml_main where caml_termination_hook will be called. - Note that threads created in C then registered do not have - a creation point (exit_buf == NULL). - */ -#ifdef NATIVE_CODE - exit_buf = curr_thread->exit_buf; -#endif - caml_thread_stop(); - if (exit_buf != NULL) { - /* Native-code and (main thread or thread created by OCaml) */ - siglongjmp(exit_buf->buf, 1); - } else { - /* Bytecode, or thread created from C */ - st_thread_exit(); - } - return Val_unit; /* not reached */ -} - /* Allow re-scheduling */ -CAMLprim value caml_thread_yield(value unit) /* ML */ +CAMLprim value caml_thread_yield(value unit) { - struct caml_locking_scheme* s; - - s = atomic_load(&caml_locking_scheme); - if (s->can_skip_yield != NULL && s->can_skip_yield(s->context)) + st_masterlock *m = Thread_lock(Caml_state->id); + if (st_masterlock_waiters(m) == 0) return Val_unit; /* Do all the parts of a blocking section enter/leave except lock @@ -887,182 +742,22 @@ CAMLprim value caml_thread_yield(value unit) /* ML */ our blocking section doesn't contain anything interesting, don't bother with saving errno.) */ - caml_raise_async_if_exception(caml_process_pending_signals_exn(), - "signal handler"); - caml_thread_save_runtime_state(); - /* caml_locking_scheme may have changed in caml_process_pending_signals_exn */ - s = atomic_load(&caml_locking_scheme); - s->yield(s->context); - if (atomic_load(&caml_locking_scheme) != s) { - /* The lock we have is no longer the runtime lock */ - s->unlock(s->context); - acquire_runtime_lock(); - } - caml_thread_restore_runtime_state(); - caml_raise_async_if_exception(caml_process_pending_signals_exn(), - "signal handler"); + + caml_raise_if_exception(caml_process_pending_signals_exn()); + save_runtime_state(); + st_thread_yield(m); + restore_runtime_state(This_thread); + caml_raise_if_exception(caml_process_pending_signals_exn()); return Val_unit; } /* Suspend the current thread until another thread terminates */ -CAMLprim value caml_thread_join(value th) /* ML */ +CAMLprim value caml_thread_join(value th) { st_retcode rc = caml_threadstatus_wait(Terminated(th)); - st_check_error(rc, "Thread.join"); - return Val_unit; -} - -/* Mutex operations */ - -#define Mutex_val(v) (* ((st_mutex *) Data_custom_val(v))) - -static void caml_mutex_finalize(value wrapper) -{ - st_mutex_destroy(Mutex_val(wrapper)); -} - -static int caml_mutex_compare(value wrapper1, value wrapper2) -{ - st_mutex mut1 = Mutex_val(wrapper1); - st_mutex mut2 = Mutex_val(wrapper2); - return mut1 == mut2 ? 0 : mut1 < mut2 ? -1 : 1; -} - -static intnat caml_mutex_hash(value wrapper) -{ - return (intnat) (Mutex_val(wrapper)); -} - -static struct custom_operations caml_mutex_ops = { - "_mutex", - caml_mutex_finalize, - caml_mutex_compare, - caml_mutex_hash, - custom_serialize_default, - custom_deserialize_default, - custom_compare_ext_default, - custom_fixed_length_default -}; - -CAMLprim value caml_mutex_new(value unit) /* ML */ -{ - st_mutex mut = NULL; /* suppress warning */ - value wrapper; - st_check_error(st_mutex_create(&mut), "Mutex.create"); - wrapper = caml_alloc_custom(&caml_mutex_ops, sizeof(st_mutex *), - 0, 1); - Mutex_val(wrapper) = mut; - return wrapper; -} - -CAMLprim value caml_mutex_lock(value wrapper) /* ML */ -{ - st_mutex mut = Mutex_val(wrapper); - st_retcode retcode; - - /* PR#4351: first try to acquire mutex without releasing the master lock */ - if (st_mutex_trylock(mut) == MUTEX_PREVIOUSLY_UNLOCKED) return Val_unit; - /* If unsuccessful, block on mutex */ - Begin_root(wrapper) /* prevent the deallocation of mutex */ - caml_enter_blocking_section(); - retcode = st_mutex_lock(mut); - caml_leave_blocking_section(); - End_roots(); - st_check_error(retcode, "Mutex.lock"); - return Val_unit; -} - -CAMLprim value caml_mutex_unlock(value wrapper) /* ML */ -{ - st_mutex mut = Mutex_val(wrapper); - st_retcode retcode; - /* PR#4351: no need to release and reacquire master lock */ - retcode = st_mutex_unlock(mut); - st_check_error(retcode, "Mutex.unlock"); - return Val_unit; -} - -CAMLprim value caml_mutex_try_lock(value wrapper) /* ML */ -{ - st_mutex mut = Mutex_val(wrapper); - st_retcode retcode; - retcode = st_mutex_trylock(mut); - if (retcode == MUTEX_ALREADY_LOCKED) return Val_false; - st_check_error(retcode, "Mutex.try_lock"); - return Val_true; -} - -/* Conditions operations */ - -#define Condition_val(v) (* (st_condvar *) Data_custom_val(v)) - -static void caml_condition_finalize(value wrapper) -{ - st_condvar_destroy(Condition_val(wrapper)); -} - -static int caml_condition_compare(value wrapper1, value wrapper2) -{ - st_condvar cond1 = Condition_val(wrapper1); - st_condvar cond2 = Condition_val(wrapper2); - return cond1 == cond2 ? 0 : cond1 < cond2 ? -1 : 1; -} - -static intnat caml_condition_hash(value wrapper) -{ - return (intnat) (Condition_val(wrapper)); -} - -static struct custom_operations caml_condition_ops = { - "_condition", - caml_condition_finalize, - caml_condition_compare, - caml_condition_hash, - custom_serialize_default, - custom_deserialize_default, - custom_compare_ext_default, - custom_fixed_length_default -}; - -CAMLprim value caml_condition_new(value unit) /* ML */ -{ - st_condvar cond = NULL; /* suppress warning */ - value wrapper; - st_check_error(st_condvar_create(&cond), "Condition.create"); - wrapper = caml_alloc_custom(&caml_condition_ops, sizeof(st_condvar *), - 0, 1); - Condition_val(wrapper) = cond; - return wrapper; -} - -CAMLprim value caml_condition_wait(value wcond, value wmut) /* ML */ -{ - st_condvar cond = Condition_val(wcond); - st_mutex mut = Mutex_val(wmut); - st_retcode retcode; - - Begin_roots2(wcond, wmut) /* prevent deallocation of cond and mutex */ - caml_enter_blocking_section(); - retcode = st_condvar_wait(cond, mut); - caml_leave_blocking_section(); - End_roots(); - st_check_error(retcode, "Condition.wait"); - return Val_unit; -} - -CAMLprim value caml_condition_signal(value wrapper) /* ML */ -{ - st_check_error(st_condvar_signal(Condition_val(wrapper)), - "Condition.signal"); - return Val_unit; -} - -CAMLprim value caml_condition_broadcast(value wrapper) /* ML */ -{ - st_check_error(st_condvar_broadcast(Condition_val(wrapper)), - "Condition.broadcast"); + sync_check_error(rc, "Thread.join"); return Val_unit; } @@ -1097,8 +792,9 @@ static value caml_threadstatus_new (void) { st_event ts = NULL; /* suppress warning */ value wrapper; - st_check_error(st_event_create(&ts), "Thread.create"); - wrapper = caml_alloc_custom(&caml_threadstatus_ops, sizeof(st_event *), + sync_check_error(st_event_create(&ts), "Thread.create"); + wrapper = caml_alloc_custom(&caml_threadstatus_ops, + sizeof(st_event *), 0, 1); Threadstatus_val(wrapper) = ts; return wrapper; @@ -1111,13 +807,13 @@ static void caml_threadstatus_terminate (value wrapper) static st_retcode caml_threadstatus_wait (value wrapper) { + CAMLparam1(wrapper); /* prevent deallocation of ts */ st_event ts = Threadstatus_val(wrapper); st_retcode retcode; - Begin_roots1(wrapper) /* prevent deallocation of ts */ - caml_enter_blocking_section(); - retcode = st_event_wait(ts); - caml_leave_blocking_section(); - End_roots(); - return retcode; + caml_enter_blocking_section(); + retcode = st_event_wait(ts); + caml_leave_blocking_section(); + + CAMLreturnT(st_retcode, retcode); } diff --git a/ocaml/otherlibs/systhreads/st_win32.h b/ocaml/otherlibs/systhreads/st_win32.h index 3f598a715de..5092134db09 100644 --- a/ocaml/otherlibs/systhreads/st_win32.h +++ b/ocaml/otherlibs/systhreads/st_win32.h @@ -18,520 +18,23 @@ #undef _WIN32_WINNT #define _WIN32_WINNT 0x0400 #include -#include -#include -#include -#include - -#if 1 -#define TRACE(x) -#define TRACE1(x,y) -#else -#include -#define TRACE(x) printf("%d: %s\n", GetCurrentThreadId(), x); fflush(stdout) -#define TRACE1(x,y) printf("%d: %s %p\n", GetCurrentThreadId(), x, (void *)y); \ - fflush(stdout) -#endif - -typedef DWORD st_retcode; - -#define SIGPREEMPTION SIGTERM - -/* Unique thread identifiers and atomic operations over them */ -#ifdef ARCH_SIXTYFOUR -typedef LONG64 st_tid; -#define Tid_Atomic_Exchange InterlockedExchange64 -#define Tid_Atomic_Compare_Exchange InterlockedCompareExchange64 -#else -typedef LONG st_tid; -#define Tid_Atomic_Exchange InterlockedExchange -#define Tid_Atomic_Compare_Exchange InterlockedCompareExchange -#endif - -/* Thread-local storage associating a Win32 event to every thread. */ -static DWORD st_thread_sem_key; - -/* Thread-local storage for the OCaml thread ID. */ -static DWORD st_thread_id_key; - -/* OS-specific initialization */ - -static DWORD st_initialize(void) -{ - DWORD result = 0; - st_thread_sem_key = TlsAlloc(); - if (st_thread_sem_key == TLS_OUT_OF_INDEXES) - return GetLastError(); - st_thread_id_key = TlsAlloc(); - if (st_thread_id_key == TLS_OUT_OF_INDEXES) { - result = GetLastError(); - TlsFree(st_thread_sem_key); - } - return result; -} - -/* Thread creation. Created in detached mode if [res] is NULL. */ - -typedef HANDLE st_thread_id; - -static DWORD st_thread_create(st_thread_id * res, - LPTHREAD_START_ROUTINE fn, void * arg) -{ - HANDLE h = CreateThread(NULL, 0, fn, arg, 0, NULL); - TRACE1("st_thread_create", h); - if (h == NULL) return GetLastError(); - if (res == NULL) - CloseHandle(h); - else - *res = h; - return 0; -} - -#define ST_THREAD_FUNCTION DWORD WINAPI - -/* Cleanup at thread exit */ - -static void st_thread_cleanup(void) -{ - HANDLE ev = (HANDLE) TlsGetValue(st_thread_sem_key); - if (ev != NULL) CloseHandle(ev); -} - -/* Thread termination */ - -CAMLnoreturn_start -static void st_thread_exit(void) -CAMLnoreturn_end; - -static void st_thread_exit(void) -{ - TRACE("st_thread_exit"); - ExitThread(0); -} - -static void st_thread_join(st_thread_id thr) -{ - TRACE1("st_thread_join", h); - WaitForSingleObject(thr, INFINITE); -} - -/* Thread-specific state */ - -typedef DWORD st_tlskey; - -static DWORD st_tls_newkey(st_tlskey * res) -{ - *res = TlsAlloc(); - if (*res == TLS_OUT_OF_INDEXES) - return GetLastError(); - else - return 0; -} - -Caml_inline void * st_tls_get(st_tlskey k) -{ - return TlsGetValue(k); -} - -Caml_inline void st_tls_set(st_tlskey k, void * v) -{ - TlsSetValue(k, v); -} - -/* OS-specific handling of the OCaml thread ID (must be called with the runtime - lock). */ -Caml_inline void st_thread_set_id(intnat id) -{ - CAMLassert(id != 0); - st_tls_set(st_thread_id_key, (void *)id); -} - -/* Return the identifier for the current thread. The 0 value is reserved. */ -Caml_inline intnat st_current_thread_id(void) -{ - intnat id = (intnat)st_tls_get(st_thread_id_key); - CAMLassert(id != 0); - return id; -} - -/* The master lock. */ - -typedef CRITICAL_SECTION st_masterlock; - -static void st_masterlock_init(st_masterlock * m) -{ - TRACE("st_masterlock_init"); - InitializeCriticalSection(m); - EnterCriticalSection(m); -} - -Caml_inline void st_masterlock_acquire(st_masterlock * m) -{ - TRACE("st_masterlock_acquire"); - EnterCriticalSection(m); - TRACE("st_masterlock_acquire (done)"); -} - -Caml_inline void st_masterlock_release(st_masterlock * m) -{ - LeaveCriticalSection(m); - TRACE("st_masterlock_released"); -} - -Caml_inline int st_masterlock_waiters(st_masterlock * m) -{ - return 1; /* info not maintained */ -} - -/* Scheduling hints */ - -Caml_inline void st_thread_yield(st_masterlock * m) -{ - LeaveCriticalSection(m); - Sleep(0); - EnterCriticalSection(m); -} - -/* Mutexes */ - -struct st_mutex_ { - CRITICAL_SECTION crit; - volatile st_tid owner; /* 0 if unlocked */ - /* The "owner" field is not always protected by "crit"; it is also - accessed without holding "crit", using the Interlocked API for - atomic accesses */ -}; - -typedef struct st_mutex_ * st_mutex; - -static DWORD st_mutex_create(st_mutex * res) -{ - st_mutex m = caml_stat_alloc_noexc(sizeof(struct st_mutex_)); - if (m == NULL) return ERROR_NOT_ENOUGH_MEMORY; - InitializeCriticalSection(&m->crit); - m->owner = 0; - *res = m; - return 0; -} - -static DWORD st_mutex_destroy(st_mutex m) +Caml_inline void st_msleep(int msec) { - DeleteCriticalSection(&m->crit); - caml_stat_free(m); - return 0; + Sleep(msec); } -/* Error codes with the 29th bit set are reserved for the application */ - -#define MUTEX_DEADLOCK (1<<29 | 1) -#define MUTEX_PREVIOUSLY_UNLOCKED 0 -#define MUTEX_ALREADY_LOCKED (1 << 29) -#define MUTEX_NOT_OWNED (1<<29 | 2) - -Caml_inline DWORD st_mutex_lock(st_mutex m) -{ - st_tid self, prev; - TRACE1("st_mutex_lock", m); - self = st_current_thread_id(); - /* Critical sections are recursive locks, so this will succeed - if we already own the lock */ - EnterCriticalSection(&m->crit); - /* Record that we are the owner of the lock */ - prev = Tid_Atomic_Exchange(&m->owner, self); - if (prev != 0) { - /* The mutex was already locked by ourselves. - Cancel the EnterCriticalSection above and return an error. */ - TRACE1("st_mutex_lock (deadlock)", m); - LeaveCriticalSection(&m->crit); - return MUTEX_DEADLOCK; - } - TRACE1("st_mutex_lock (done)", m); - return 0; -} - -Caml_inline DWORD st_mutex_trylock(st_mutex m) -{ - st_tid self, prev; - TRACE1("st_mutex_trylock", m); - self = st_current_thread_id(); - if (! TryEnterCriticalSection(&m->crit)) { - TRACE1("st_mutex_trylock (failure)", m); - return MUTEX_ALREADY_LOCKED; - } - /* Record that we are the owner of the lock */ - prev = Tid_Atomic_Exchange(&m->owner, self); - if (prev != 0) { - /* The mutex was already locked by ourselves. - Cancel the EnterCriticalSection above and return "already locked". */ - TRACE1("st_mutex_trylock (already locked by self)", m); - LeaveCriticalSection(&m->crit); - return MUTEX_ALREADY_LOCKED; - } - TRACE1("st_mutex_trylock (done)", m); - return MUTEX_PREVIOUSLY_UNLOCKED; -} - -Caml_inline DWORD st_mutex_unlock(st_mutex m) -{ - st_tid self, prev; - /* If the calling thread holds the lock, m->owner is stable and equal - to st_current_thread_id(). - Otherwise, the value of m->owner can be 0 (if the mutex is unlocked) - or some other thread ID (if the mutex is held by another thread), - but is never equal to st_current_thread_id(). */ - self = st_current_thread_id(); - prev = Tid_Atomic_Compare_Exchange(&m->owner, 0, self); - if (prev != self) { - /* The value of m->owner is unchanged */ - TRACE1("st_mutex_unlock (error)", m); - return MUTEX_NOT_OWNED; - } - TRACE1("st_mutex_unlock", m); - LeaveCriticalSection(&m->crit); - return 0; -} - -/* Condition variables */ - -/* A condition variable is just a list of threads currently - waiting on this c.v. Each thread is represented by its - associated event. */ - -struct st_wait_list { - HANDLE event; /* event of the first waiting thread */ - struct st_wait_list * next; -}; - -typedef struct st_condvar_struct { - CRITICAL_SECTION lock; /* protect the data structure */ - struct st_wait_list * waiters; /* list of threads waiting */ -} * st_condvar; - -static DWORD st_condvar_create(st_condvar * res) -{ - st_condvar c = caml_stat_alloc_noexc(sizeof(struct st_condvar_struct)); - if (c == NULL) return ERROR_NOT_ENOUGH_MEMORY; - InitializeCriticalSection(&c->lock); - c->waiters = NULL; - *res = c; - return 0; -} - -static DWORD st_condvar_destroy(st_condvar c) -{ - TRACE1("st_condvar_destroy", c); - DeleteCriticalSection(&c->lock); - caml_stat_free(c); - return 0; -} - -static DWORD st_condvar_signal(st_condvar c) -{ - DWORD rc = 0; - struct st_wait_list * curr, * next; - - TRACE1("st_condvar_signal", c); - EnterCriticalSection(&c->lock); - curr = c->waiters; - if (curr != NULL) { - next = curr->next; - /* Wake up the first waiting thread */ - TRACE1("st_condvar_signal: waking up", curr->event); - if (! SetEvent(curr->event)) rc = GetLastError(); - /* Remove it from the waiting list */ - c->waiters = next; - } - LeaveCriticalSection(&c->lock); - return rc; -} - -static DWORD st_condvar_broadcast(st_condvar c) -{ - DWORD rc = 0; - struct st_wait_list * curr, * next; - - TRACE1("st_condvar_broadcast", c); - EnterCriticalSection(&c->lock); - /* Wake up all waiting threads */ - curr = c->waiters; - while (curr != NULL) { - next = curr->next; - TRACE1("st_condvar_signal: waking up", curr->event); - if (! SetEvent(curr->event)) rc = GetLastError(); - curr = next; - } - /* Remove them all from the waiting list */ - c->waiters = NULL; - LeaveCriticalSection(&c->lock); - return rc; -} - -static DWORD st_condvar_wait(st_condvar c, st_mutex m) -{ - HANDLE ev; - struct st_wait_list wait; - DWORD rc; - st_tid self, prev; - - TRACE1("st_condvar_wait", c); - /* Recover (or create) the event associated with the calling thread */ - ev = (HANDLE) TlsGetValue(st_thread_sem_key); - if (ev == 0) { - ev = CreateEvent(NULL, - FALSE /*auto reset*/, - FALSE /*initially unset*/, - NULL); - if (ev == NULL) return GetLastError(); - TlsSetValue(st_thread_sem_key, (void *) ev); - } - /* Get ready to release the mutex */ - self = st_current_thread_id(); - prev = Tid_Atomic_Compare_Exchange(&m->owner, 0, self); - if (prev != self) { - /* The value of m->owner is unchanged */ - TRACE1("st_condvar_wait: error: mutex not held", m); - return MUTEX_NOT_OWNED; - } - /* Insert the current thread in the waiting list (atomically) */ - EnterCriticalSection(&c->lock); - wait.event = ev; - wait.next = c->waiters; - c->waiters = &wait; - LeaveCriticalSection(&c->lock); - /* Finish releasing the mutex m (like st_mutex_unlock does, minus - the error checking, which we've already done above). */ - LeaveCriticalSection(&m->crit); - /* Wait for our event to be signaled. There is no risk of lost - wakeup, since we inserted ourselves on the waiting list of c - before releasing m */ - TRACE1("st_condvar_wait: blocking on event", ev); - if (WaitForSingleObject(ev, INFINITE) == WAIT_FAILED) - return GetLastError(); - /* Reacquire the mutex m */ - TRACE1("st_condvar_wait: restarted, acquiring mutex", c); - rc = st_mutex_lock(m); - if (rc != 0) return rc; - TRACE1("st_condvar_wait: acquired mutex", c); - return 0; -} - -/* Triggered events */ - -typedef HANDLE st_event; - -static DWORD st_event_create(st_event * res) -{ - st_event m = - CreateEvent(NULL, TRUE/*manual reset*/, FALSE/*initially unset*/, NULL); - TRACE1("st_event_create", m); - if (m == NULL) return GetLastError(); - *res = m; - return 0; -} - -static DWORD st_event_destroy(st_event e) -{ - TRACE1("st_event_destroy", e); - if (CloseHandle(e)) - return 0; - else - return GetLastError(); -} - -static DWORD st_event_trigger(st_event e) -{ - TRACE1("st_event_trigger", e); - if (SetEvent(e)) - return 0; - else - return GetLastError(); -} - -static DWORD st_event_wait(st_event e) -{ - TRACE1("st_event_wait", e); - if (WaitForSingleObject(e, INFINITE) == WAIT_FAILED) - return GetLastError(); - else - return 0; -} - -/* Reporting errors */ - -static void st_check_error(DWORD retcode, char * msg) -{ - wchar_t err[1024]; - int errlen, msglen, ret; - value str; - - if (retcode == 0) return; - if (retcode == ERROR_NOT_ENOUGH_MEMORY) caml_raise_out_of_memory(); - switch (retcode) { - case MUTEX_DEADLOCK: - ret = swprintf(err, sizeof(err)/sizeof(wchar_t), - L"Mutex is already locked by calling thread"); - break; - case MUTEX_NOT_OWNED: - ret = swprintf(err, sizeof(err)/sizeof(wchar_t), - L"Mutex is not locked by calling thread"); - break; - default: - ret = FormatMessage( - FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, - retcode, - 0, - err, - sizeof(err)/sizeof(wchar_t), - NULL); - if (! ret) { - ret = - swprintf(err, sizeof(err)/sizeof(wchar_t), L"error code %lx", retcode); - } - } - msglen = strlen(msg); - errlen = win_wide_char_to_multi_byte(err, ret, NULL, 0); - str = caml_alloc_string(msglen + 2 + errlen); - memmove (&Byte(str, 0), msg, msglen); - memmove (&Byte(str, msglen), ": ", 2); - win_wide_char_to_multi_byte(err, ret, &Byte(str, msglen + 2), errlen); - caml_raise_sys_error(str); -} - -/* Variable used to stop the "tick" thread */ -static volatile int caml_tick_thread_stop = 0; - -/* The tick thread: posts a SIGPREEMPTION signal periodically */ - -static DWORD WINAPI caml_thread_tick(void * arg) -{ - while(! caml_tick_thread_stop) { - Sleep(Thread_timeout); - /* The preemption signal should never cause a callback, so don't - go through caml_handle_signal(), just record signal delivery via - caml_record_signal(). */ - caml_record_signal(SIGPREEMPTION); - } - return 0; -} - -/* "At fork" processing -- none under Win32 */ - -static DWORD st_atfork(void (*fn)(void)) -{ - return 0; -} +#include "st_pthreads.h" /* Signal handling -- none under Win32 */ -value caml_thread_sigmask(value cmd, value sigs) /* ML */ +value caml_thread_sigmask(value cmd, value sigs) { caml_invalid_argument("Thread.sigmask not implemented"); return Val_int(0); /* not reached */ } -value caml_wait_signal(value sigs) /* ML */ +value caml_wait_signal(value sigs) { caml_invalid_argument("Thread.wait_signal not implemented"); return Val_int(0); /* not reached */ diff --git a/ocaml/otherlibs/systhreads/thread.ml b/ocaml/otherlibs/systhreads/thread.ml index a021d3223a4..83cbb80ca83 100644 --- a/ocaml/otherlibs/systhreads/thread.ml +++ b/ocaml/otherlibs/systhreads/thread.ml @@ -1,4 +1,3 @@ -# 1 "thread.ml" (**************************************************************************) (* *) (* OCaml *) @@ -16,8 +15,6 @@ (* User-level threads *) -[@@@ocaml.flambda_o3] - type t external thread_initialize : unit -> unit = "caml_thread_initialize" @@ -30,7 +27,6 @@ external yield : unit -> unit = "caml_thread_yield" external self : unit -> t = "caml_thread_self" [@@noalloc] external id : t -> int = "caml_thread_id" [@@noalloc] external join : t -> unit = "caml_thread_join" -external exit_stub : unit -> unit = "caml_thread_exit" (* For new, make sure the function passed to thread_new never raises an exception. *) @@ -73,17 +69,7 @@ let create fn arg = flush stderr) let exit () = - ignore (Sys.opaque_identity (check_memprof_cb ())); - exit_stub () - -let exit () = - ignore (Sys.opaque_identity (check_memprof_cb ())); - exit_stub () - -(* Thread.kill is currently not implemented due to problems with - cleanup handlers on several platforms *) - -let kill th = invalid_arg "Thread.kill: not implemented" + raise Exit (* Preemption *) @@ -97,8 +83,9 @@ let preempt_signal = | _ -> Sys.sigvtalrm let () = - Sys.set_signal preempt_signal (Sys.Signal_handle preempt); thread_initialize (); + Sys.set_signal preempt_signal (Sys.Signal_handle preempt); + (* Callback in [caml_shutdown], when the last domain exits. *) Callback.register "Thread.at_shutdown" (fun () -> thread_cleanup(); (* In case of DLL-embedded OCaml the preempt_signal handler @@ -112,9 +99,6 @@ let () = let delay = Unix.sleepf -let wait_read fd = () -let wait_write fd = () - let wait_timed_read fd d = match Unix.select [fd] [] [] d with ([], _, _) -> false | (_, _, _) -> true let wait_timed_write fd d = diff --git a/ocaml/otherlibs/systhreads/thread.mli b/ocaml/otherlibs/systhreads/thread.mli index 8f9013dd796..b2cf78a5cdc 100644 --- a/ocaml/otherlibs/systhreads/thread.mli +++ b/ocaml/otherlibs/systhreads/thread.mli @@ -23,7 +23,7 @@ type t val create : ('a -> 'b) -> 'a -> t (** [Thread.create funct arg] creates a new thread of control, in which the function application [funct arg] - is executed concurrently with the other threads of the program. + is executed concurrently with the other threads of the domain. The application of [Thread.create] returns the handle of the newly created thread. The new thread terminates when the application [funct arg] @@ -32,7 +32,10 @@ val create : ('a -> 'b) -> 'a -> t In the last case, the uncaught exception is printed on standard error, but not propagated back to the parent thread. Similarly, the result of the application [funct arg] is discarded and not - directly accessible to the parent thread. *) + directly accessible to the parent thread. + + See also {!Domain.spawn} if you want parallel execution instead. + *) val self : unit -> t (** Return the handle for the thread currently executing. *) @@ -43,25 +46,30 @@ val id : t -> int It can be used to build data structures indexed by threads. *) exception Exit -(** Exception that can be raised by user code to initiate termination - of the current thread. - Compared to calling the {!Thread.exit} function, raising the - {!Thread.Exit} exception will trigger {!Fun.finally} finalizers - and catch-all exception handlers. - It is the recommended way to terminate threads prematurely. - - @since 4.14.0 -*) +(** Exception raised by user code to initiate termination of the + current thread. + In a thread created by {!Thread.create} [funct] [arg], if the + {!Thread.Exit} exception reaches the top of the application + [funct arg], it has the effect of terminating the current thread + silently. In other contexts, there is no implicit handling of the + {!Thread.Exit} exception. *) val exit : unit -> unit -(** Terminate prematurely the currently executing thread. *) - -val kill : t -> unit - [@@ocaml.deprecated "Not implemented, do not use"] -(** This function was supposed to terminate prematurely the thread - whose handle is given. It is not currently implemented due to - problems with cleanup handlers on many POSIX 1003.1c implementations. - It always raises the [Invalid_argument] exception. *) +[@@ocaml.deprecated "Use 'raise Thread.Exit' instead."] +(** Raise the {!Thread.Exit} exception. + In a thread created by {!Thread.create}, this will cause the thread + to terminate prematurely, unless the thread function handles the + exception itself. {!Fun.protect} finalizers and catch-all + exception handlers will be executed. + + To make it clear that an exception is raised and will trigger + finalizers and catch-all exception handlers, it is recommended + to write [raise Thread.Exit] instead of [Thread.exit ()]. + + @before 5.0 A different implementation was used, not based on raising + an exception, and not running finalizers and catch-all handlers. + The previous implementation had a different behavior when called + outside of a thread created by {!Thread.create}. *) (** {1 Suspending threads} *) @@ -87,20 +95,12 @@ val yield : unit -> unit a more general and more standard-conformant manner. It is recommended to use {!Unix} functions directly. *) -val wait_read : Unix.file_descr -> unit - [@@ocaml.deprecated "This function no longer does anything"] -(** This function does nothing in the current implementation of the threading - library and can be removed from all user programs. *) - -val wait_write : Unix.file_descr -> unit - [@@ocaml.deprecated "This function no longer does anything"] -(** This function does nothing in the current implementation of the threading - library and can be removed from all user programs. *) - val wait_timed_read : Unix.file_descr -> float -> bool +[@@ocaml.deprecated "Use Unix.select instead."] (** See {!Thread.wait_timed_write}.*) val wait_timed_write : Unix.file_descr -> float -> bool +[@@ocaml.deprecated "Use Unix.select instead."] (** Suspend the execution of the calling thread until at least one character or EOF is available for reading ([wait_timed_read]) or one character can be written without blocking ([wait_timed_write]) @@ -115,6 +115,7 @@ val select : Unix.file_descr list -> Unix.file_descr list -> Unix.file_descr list -> float -> Unix.file_descr list * Unix.file_descr list * Unix.file_descr list +[@@ocaml.deprecated "Use Unix.select instead."] (** Same function as {!Unix.select}. Suspend the execution of the calling thread until input/output becomes possible on the given Unix file descriptors. @@ -122,6 +123,7 @@ val select : {!Unix.select}. *) val wait_pid : int -> int * Unix.process_status +[@@ocaml.deprecated "Use Unix.waitpid instead."] (** Same function as {!Unix.waitpid}. [wait_pid p] suspends the execution of the calling thread until the process specified by the process identifier [p] diff --git a/ocaml/otherlibs/systhreads/threads.h b/ocaml/otherlibs/systhreads/threads.h index df6f7aa0e83..11da0471707 100644 --- a/ocaml/otherlibs/systhreads/threads.h +++ b/ocaml/otherlibs/systhreads/threads.h @@ -25,19 +25,19 @@ CAMLextern void caml_leave_blocking_section (void); #define caml_acquire_runtime_system caml_leave_blocking_section #define caml_release_runtime_system caml_enter_blocking_section -/* Manage the master lock around the OCaml run-time system. +/* Manage the domain lock around the OCaml run-time system. Only one thread at a time can execute OCaml compiled code or - OCaml run-time system functions. + OCaml run-time system functions within a domain. - When OCaml calls a C function, the current thread holds the master - lock. The C function can release it by calling + When OCaml calls a C function, the current thread holds the domain lock. + The C function can release it by calling [caml_release_runtime_system]. Then, another thread can execute OCaml code. However, the calling thread must not access any OCaml data, nor call any runtime system function, nor call back into OCaml. Before returning to its OCaml caller, or accessing OCaml data, or call runtime system functions, the current thread must - re-acquire the master lock by calling [caml_acquire_runtime_system]. + re-acquire the domain lock by calling [caml_acquire_runtime_system]. Symmetrically, if a C function (not called from OCaml) wishes to call back into OCaml code, it should invoke [caml_acquire_runtime_system] @@ -50,64 +50,20 @@ CAMLextern void caml_leave_blocking_section (void); use the runtime system (typically, a blocking I/O operation). */ -/* These functions are defined in the threads library, not the runtime */ -#ifndef CAMLextern_libthreads -#define CAMLextern_libthreads CAMLextern -#endif -CAMLextern_libthreads int caml_c_thread_register(void); -CAMLextern_libthreads int caml_c_thread_unregister(void); +CAMLextern int caml_c_thread_register(void); +CAMLextern int caml_c_thread_unregister(void); /* If a thread is created by C code (instead of by OCaml itself), it must be registered with the OCaml runtime system before being able to call back into OCaml code or use other runtime system - functions. Just call [caml_c_thread_register] once. - Before the thread finishes, it must call [caml_c_thread_unregister]. + functions. Just call [caml_c_thread_register] once. The domain lock + is not held when [caml_c_thread_register] returns. + Before the thread finishes, it must call [caml_c_thread_unregister] + (without holding the domain lock). Both functions return 1 on success, 0 on error. + Note that threads registered by C code belong to the domain 0. */ -enum caml_thread_type { Thread_type_caml, Thread_type_c_registered }; -struct caml_locking_scheme { - void* context; - void (*lock)(void*); - void (*unlock)(void*); - - /* If non-NULL, these functions are called when threads start and stop. - For threads created by OCaml, that's at creation and termination. - For threads created by C, that's at caml_c_thread_register/unregister. - The lock is not held when these functions are called. */ - void (*thread_start)(void*, enum caml_thread_type); - void (*thread_stop)(void*, enum caml_thread_type); - - /* Called after fork(). - The lock should be held after this function returns. */ - void (*reinitialize_after_fork)(void*); - - /* can_skip_yield and yield are both called with the lock held, - and expect it held on return */ - int (*can_skip_yield)(void*); - void (*yield)(void*); -}; - -extern struct caml_locking_scheme caml_default_locking_scheme; - -/* Switch to a new runtime locking scheme. - - The old runtime lock must be held (i.e. not in a blocking section), - and the new runtime lock must not be held. After this function - returns, the old lock is released and the new one is held. - - There is a period during this function when neither lock is held, - so context-switches may occur. */ -CAMLextern_libthreads -void caml_switch_runtime_locking_scheme(struct caml_locking_scheme*); - -CAMLextern_libthreads -void caml_thread_save_runtime_state(void); - -CAMLextern_libthreads -void caml_thread_restore_runtime_state(void); - - #ifdef __cplusplus } #endif diff --git a/ocaml/otherlibs/systhreads4/.depend b/ocaml/otherlibs/systhreads4/.depend new file mode 100644 index 00000000000..661d3575dec --- /dev/null +++ b/ocaml/otherlibs/systhreads4/.depend @@ -0,0 +1,36 @@ +condition.cmo : \ + mutex.cmi \ + condition.cmi +condition.cmx : \ + mutex.cmx \ + condition.cmi +condition.cmi : \ + mutex.cmi +event.cmo : \ + mutex.cmi \ + condition.cmi \ + event.cmi +event.cmx : \ + mutex.cmx \ + condition.cmx \ + event.cmi +event.cmi : +mutex.cmo : \ + mutex.cmi +mutex.cmx : \ + mutex.cmi +mutex.cmi : +semaphore.cmo : \ + mutex.cmi \ + condition.cmi \ + semaphore.cmi +semaphore.cmx : \ + mutex.cmx \ + condition.cmx \ + semaphore.cmi +semaphore.cmi : +thread.cmo : \ + thread.cmi +thread.cmx : \ + thread.cmi +thread.cmi : diff --git a/ocaml/otherlibs/systhreads4/META.in b/ocaml/otherlibs/systhreads4/META.in new file mode 100644 index 00000000000..07d301f116a --- /dev/null +++ b/ocaml/otherlibs/systhreads4/META.in @@ -0,0 +1,13 @@ +# @configure_input@ + +version = "@VERSION@" +description = "Multi-threading" +requires = "unix" +archive(byte) = "threads.cma" +archive(native) = "threads.cmxa" +type_of_threads = "posix" + +package "posix" ( + requires = "threads" + version = "[internal]" +) diff --git a/ocaml/otherlibs/systhreads4/Makefile b/ocaml/otherlibs/systhreads4/Makefile new file mode 100644 index 00000000000..1404646da39 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/Makefile @@ -0,0 +1,170 @@ +#************************************************************************** +#* * +#* OCaml * +#* * +#* Xavier Leroy, projet Cristal, INRIA Rocquencourt * +#* * +#* Copyright 1999 Institut National de Recherche en Informatique et * +#* en Automatique. * +#* * +#* All rights reserved. This file is distributed under the terms of * +#* the GNU Lesser General Public License version 2.1, with the * +#* special exception on linking described in the file LICENSE. * +#* * +#************************************************************************** + +ROOTDIR=../.. + +include $(ROOTDIR)/Makefile.common +include $(ROOTDIR)/Makefile.best_binaries + +ifneq "$(CCOMPTYPE)" "msvc" +OC_CFLAGS += -g +endif + +OC_CFLAGS += $(SHAREDLIB_CFLAGS) $(PTHREAD_CFLAGS) + +LIBS = $(STDLIBFLAGS) -I $(ROOTDIR)/otherlibs/unix + +CAMLC=$(BEST_OCAMLC) $(LIBS) +CAMLOPT=$(BEST_OCAMLOPT) $(LIBS) + +MKLIB=$(OCAMLRUN) $(ROOTDIR)/tools/ocamlmklib$(EXE) +COMPFLAGS=-w +33..39 -warn-error +A -g -bin-annot +ifeq "$(FLAMBDA)" "true" +OPTCOMPFLAGS += -O3 +endif + +LIBNAME=threads + +# Note: the header on which object files produced from st_stubs.c +# should actually depend is known for sure only at compile-time. +# That's why this dependency is handled in the Makefile directly +# and removed from the output of the C compiler during make depend + +BYTECODE_C_OBJS=st_stubs.b.$(O) +NATIVECODE_C_OBJS=st_stubs.n.$(O) + +THREADS_SOURCES = thread.ml mutex.ml condition.ml event.ml semaphore.ml + +THREADS_BCOBJS = $(THREADS_SOURCES:.ml=.cmo) +THREADS_NCOBJS = $(THREADS_SOURCES:.ml=.cmx) + +MLIFILES=thread.mli mutex.mli condition.mli event.mli semaphore.mli + +CMIFILES=$(MLIFILES:.mli=.cmi) + +all: lib$(LIBNAME).$(A) $(LIBNAME).cma $(CMIFILES) + +allopt: lib$(LIBNAME)nat.$(A) $(LIBNAME).cmxa $(CMIFILES) + +lib$(LIBNAME).$(A): $(BYTECODE_C_OBJS) + $(V_OCAMLMKLIB)$(MKLIB_CMD) -o $(LIBNAME) $(BYTECODE_C_OBJS) + +lib$(LIBNAME)nat.$(A): OC_CFLAGS += $(OC_NATIVE_CFLAGS) -DNATIVE_CODE + +lib$(LIBNAME)nat.$(A): $(NATIVECODE_C_OBJS) + $(V_OCAMLMKLIB)$(MKLIB_CMD) -o $(LIBNAME)nat $^ + +$(LIBNAME).cma: $(THREADS_BCOBJS) +ifeq "$(UNIX_OR_WIN32)" "unix" + $(V_OCAMLMKLIB)$(MKLIB) -o $(LIBNAME) -ocamlc '$(CAMLC)' -cclib -lunixbyt -linkall $^ +# TODO: Figure out why -cclib -lunix is used here. +# It may be because of the threadsUnix module which is deprecated. +# It may hence be good to figure out whether this module shouldn't be +# removed, and then -cclib -lunix arguments. +else # Windows + $(V_OCAMLMKLIB)$(MKLIB) -o $(LIBNAME) -ocamlc "$(CAMLC)" -linkall $^ +endif + +# See remark above: force static linking of libthreadsnat.a +$(LIBNAME).cmxa: $(THREADS_NCOBJS) + $(V_LINKOPT)$(CAMLOPT) -linkall -a -cclib -lthreadsnat -o $@ $^ + +# Note: I removed "-cclib -lunix" from the line above. +# Indeed, if we link threads.cmxa, then we must also link unix.cmxa, +# which itself will pass -lunix to the C linker. It seems more +# modular to me this way. -- Alain + +# The following lines produce two object files st_stubs.b.$(O) and +# st_stubs.n.$(O) from the same source file st_stubs.c (it is compiled +# twice, each time with different options). + +ifeq "$(COMPUTE_DEPS)" "true" +st_stubs.%.$(O): st_stubs.c +else +st_stubs.%.$(O): st_stubs.c $(RUNTIME_HEADERS) $(wildcard *.h) +endif + $(V_CC)$(CC) -c $(OC_CFLAGS) $(CFLAGS) $(OC_CPPFLAGS) $(CPPFLAGS) \ + $(OUTPUTOBJ)$@ $< + +.PHONY: partialclean +partialclean: + rm -f *.cm* + +.PHONY: clean +clean: partialclean + rm -f dllthreads*.so dllthreads*.dll *.a *.lib *.o *.obj + rm -rf $(DEPDIR) + +.PHONY: distclean +distclean: clean + rm -f META + +INSTALL_THREADSLIBDIR=$(INSTALL_LIBDIR)/$(LIBNAME) + +install: + if test -f dllthreads$(EXT_DLL); then \ + $(INSTALL_PROG) dllthreads$(EXT_DLL) "$(INSTALL_STUBLIBDIR)"; \ + fi + $(INSTALL_DATA) libthreads.$(A) "$(INSTALL_LIBDIR)" + $(MKDIR) "$(INSTALL_THREADSLIBDIR)" + $(INSTALL_DATA) \ + $(CMIFILES) threads.cma META \ + "$(INSTALL_THREADSLIBDIR)" +ifeq "$(INSTALL_SOURCE_ARTIFACTS)" "true" + $(INSTALL_DATA) \ + $(CMIFILES:.cmi=.cmti) \ + "$(INSTALL_THREADSLIBDIR)" + $(INSTALL_DATA) $(MLIFILES) "$(INSTALL_THREADSLIBDIR)" +endif + $(INSTALL_DATA) threads.h "$(INSTALL_INCDIR)" + +installopt: + $(INSTALL_DATA) libthreadsnat.$(A) "$(INSTALL_LIBDIR)" + $(INSTALL_DATA) \ + $(THREADS_NCOBJS) threads.cmxa threads.$(A) \ + "$(INSTALL_THREADSLIBDIR)" + +%.cmi: %.mli + $(V_OCAMLC)$(CAMLC) -c $(COMPFLAGS) $< + +%.cmo: %.ml + $(V_OCAMLC)$(CAMLC) -c $(COMPFLAGS) $< + +%.cmx: %.ml + $(V_OCAMLOPT)$(CAMLOPT) -c $(COMPFLAGS) $(OPTCOMPFLAGS) $< + +DEP_FILES := st_stubs.b.$(D) +ifeq "$(NATIVE_COMPILER)" "true" +DEP_FILES += st_stubs.n.$(D) +endif + +ifeq "$(COMPUTE_DEPS)" "true" +include $(addprefix $(DEPDIR)/, $(DEP_FILES)) +endif + +%.n.$(D): OC_CPPFLAGS += $(OC_NATIVE_CPPFLAGS) + +define GEN_RULE +$(DEPDIR)/%.$(1).$(D): %.c | $(DEPDIR) + $$(V_CCDEPS)$$(DEP_CC) $$(OC_CPPFLAGS) $$(CPPFLAGS) $$< -MT '$$*.$(1).$(O)' -MF $$@ +endef + +$(foreach object_type, b n, $(eval $(call GEN_RULE,$(object_type)))) + +.PHONY: depend +depend: + $(V_GEN)$(OCAMLRUN) $(ROOTDIR)/boot/ocamlc -depend -slash *.mli *.ml > .depend + +include .depend diff --git a/ocaml/otherlibs/systhreads/byte/dune b/ocaml/otherlibs/systhreads4/byte/dune similarity index 100% rename from ocaml/otherlibs/systhreads/byte/dune rename to ocaml/otherlibs/systhreads4/byte/dune diff --git a/ocaml/otherlibs/systhreads/condition.ml b/ocaml/otherlibs/systhreads4/condition.ml similarity index 100% rename from ocaml/otherlibs/systhreads/condition.ml rename to ocaml/otherlibs/systhreads4/condition.ml diff --git a/ocaml/otherlibs/systhreads/condition.mli b/ocaml/otherlibs/systhreads4/condition.mli similarity index 100% rename from ocaml/otherlibs/systhreads/condition.mli rename to ocaml/otherlibs/systhreads4/condition.mli diff --git a/ocaml/otherlibs/systhreads4/dune b/ocaml/otherlibs/systhreads4/dune new file mode 100644 index 00000000000..5c9f889f564 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/dune @@ -0,0 +1,58 @@ +;************************************************************************** +;* * +;* OCaml * +;* * +;* Mark Shinwell, Jane Street Europe * +;* * +;* Copyright 2020 Jane Street Group LLC * +;* * +;* All rights reserved. This file is distributed under the terms of * +;* the GNU Lesser General Public License version 2.1, with the * +;* special exception on linking described in the file LICENSE. * +;* * +;************************************************************************** + + +; For some reason the C header files aren't being found if this library +; is given a public name, so we do the installation manually. +(install + (files + (byte/threads.cma as threads/threads.cma) + (native/threadsnat.cmxa as threads/threads.cmxa) + (native/threadsnat.a as threads/threads.a) + + (byte/libthreads_stubs.a as libthreads_stubs.a) + (byte/dllthreads_stubs.so as stublibs/dllthreads_stubs.so) + (native/libthreadsnat_stubs.a as libthreadsnat_stubs.a) + (native/libthreadsnat_stubs.a as libthreadsnat_stubs_native.a) ; for special_dune compat + + + (thread.mli as threads/thread.mli) + (condition.mli as threads/condition.mli) + (event.mli as threads/event.mli) + (mutex.mli as threads/mutex.mli) + (semaphore.mli as threads/semaphore.mli) + + (threads.h as caml/threads.h) + + (native/.threadsnat.objs/native/condition.cmx as threads/condition.cmx) + (native/.threadsnat.objs/native/event.cmx as threads/event.cmx) + (native/.threadsnat.objs/native/mutex.cmx as threads/mutex.cmx) + (native/.threadsnat.objs/native/semaphore.cmx as threads/semaphore.cmx) + (native/.threadsnat.objs/native/thread.cmx as threads/thread.cmx) + + (byte/.threads.objs/byte/condition.cmi as threads/condition.cmi) + (byte/.threads.objs/byte/condition.cmti as threads/condition.cmti) + (byte/.threads.objs/byte/event.cmi as threads/event.cmi) + (byte/.threads.objs/byte/event.cmti as threads/event.cmti) + (byte/.threads.objs/byte/mutex.cmi as threads/mutex.cmi) + (byte/.threads.objs/byte/mutex.cmti as threads/mutex.cmti) + (byte/.threads.objs/byte/semaphore.cmi as threads/semaphore.cmi) + (byte/.threads.objs/byte/semaphore.cmti as threads/semaphore.cmti) + (byte/.threads.objs/byte/thread.cmi as threads/thread.cmi) + (byte/.threads.objs/byte/thread.cmti as threads/thread.cmti) + ) + (section lib) + (package ocaml)) + + diff --git a/ocaml/otherlibs/systhreads4/event.ml b/ocaml/otherlibs/systhreads4/event.ml new file mode 100644 index 00000000000..fc8a65b042f --- /dev/null +++ b/ocaml/otherlibs/systhreads4/event.ml @@ -0,0 +1,278 @@ +(**************************************************************************) +(* *) +(* OCaml *) +(* *) +(* David Nowak and Xavier Leroy, projet Cristal, INRIA Rocquencourt *) +(* *) +(* Copyright 1996 Institut National de Recherche en Informatique et *) +(* en Automatique. *) +(* *) +(* All rights reserved. This file is distributed under the terms of *) +(* the GNU Lesser General Public License version 2.1, with the *) +(* special exception on linking described in the file LICENSE. *) +(* *) +(**************************************************************************) + +[@@@ocaml.flambda_o3] + +(* Events *) +type 'a basic_event = + { poll: unit -> bool; + (* If communication can take place immediately, return true. *) + suspend: unit -> unit; + (* Offer the communication on the channel and get ready + to suspend current process. *) + result: unit -> 'a } + (* Return the result of the communication *) + +type 'a behavior = int ref -> Condition.t -> int -> 'a basic_event + +type 'a event = + Communication of 'a behavior + | Choose of 'a event list + | WrapAbort of 'a event * (unit -> unit) + | Guard of (unit -> 'a event) + +(* Communication channels *) +type 'a channel = + { mutable writes_pending: 'a communication Queue.t; + (* All offers to write on it *) + mutable reads_pending: 'a communication Queue.t } + (* All offers to read from it *) + +(* Communication offered *) +and 'a communication = + { performed: int ref; (* -1 if not performed yet, set to the number *) + (* of the matching communication after rendez-vous. *) + condition: Condition.t; (* To restart the blocked thread. *) + mutable data: 'a option; (* The data sent or received. *) + event_number: int } (* Event number in select *) + +(* Create a channel *) + +let new_channel () = + { writes_pending = Queue.create(); + reads_pending = Queue.create() } + +(* Basic synchronization function *) + +let masterlock = Mutex.create() + +let do_aborts abort_env genev performed = + if abort_env <> [] then begin + if performed >= 0 then begin + let ids_done = snd genev.(performed) in + List.iter + (fun (id,f) -> if not (List.mem id ids_done) then f ()) + abort_env + end else begin + List.iter (fun (_,f) -> f ()) abort_env + end + end + +let basic_sync abort_env genev = + let performed = ref (-1) in + let condition = Condition.create() in + let bev = Array.make (Array.length genev) + (fst (genev.(0)) performed condition 0) in + for i = 1 to Array.length genev - 1 do + bev.(i) <- (fst genev.(i)) performed condition i + done; + (* See if any of the events is already activable *) + let rec poll_events i = + if i >= Array.length bev + then false + else bev.(i).poll() || poll_events (i+1) in + Mutex.lock masterlock; + if not (poll_events 0) then begin + (* Suspend on all events *) + for i = 0 to Array.length bev - 1 do bev.(i).suspend() done; + (* Wait until the condition is signalled *) + Condition.wait condition masterlock; + (* PR#7013: protect against spurious wake-up *) + while !performed < 0 do Condition.wait condition masterlock done + end; + Mutex.unlock masterlock; + (* Extract the result *) + if abort_env = [] then + (* Preserve tail recursion *) + bev.(!performed).result() + else begin + let num = !performed in + let result = bev.(num).result() in + (* Handle the aborts and return the result *) + do_aborts abort_env genev num; + result + end + +(* Apply a random permutation on an array *) + +let scramble_array a = + let len = Array.length a in + if len = 0 then invalid_arg "Event.choose"; + for i = len - 1 downto 1 do + let j = Random.int (i + 1) in + let temp = a.(i) in a.(i) <- a.(j); a.(j) <- temp + done; + a + +(* Main synchronization function *) + +let gensym = let count = ref 0 in fun () -> incr count; !count + +let rec flatten_event + (abort_list : int list) + (accu : ('a behavior * int list) list) + (accu_abort : (int * (unit -> unit)) list) + ev = + match ev with + Communication bev -> ((bev,abort_list) :: accu) , accu_abort + | WrapAbort (ev,fn) -> + let id = gensym () in + flatten_event (id :: abort_list) accu ((id,fn)::accu_abort) ev + | Choose evl -> + let rec flatten_list accu' accu_abort'= function + ev :: l -> + let (accu'',accu_abort'') = + flatten_event abort_list accu' accu_abort' ev in + flatten_list accu'' accu_abort'' l + | [] -> (accu',accu_abort') in + flatten_list accu accu_abort evl + | Guard fn -> flatten_event abort_list accu accu_abort (fn ()) + +let sync ev = + let (evl,abort_env) = flatten_event [] [] [] ev in + basic_sync abort_env (scramble_array(Array.of_list evl)) + +(* Event polling -- like sync, but non-blocking *) + +let basic_poll abort_env genev = + let performed = ref (-1) in + let condition = Condition.create() in + let bev = Array.make(Array.length genev) + (fst genev.(0) performed condition 0) in + for i = 1 to Array.length genev - 1 do + bev.(i) <- fst genev.(i) performed condition i + done; + (* See if any of the events is already activable *) + let rec poll_events i = + if i >= Array.length bev + then false + else bev.(i).poll() || poll_events (i+1) in + Mutex.lock masterlock; + let ready = poll_events 0 in + if ready then begin + (* Extract the result *) + Mutex.unlock masterlock; + let result = Some(bev.(!performed).result()) in + do_aborts abort_env genev !performed; result + end else begin + (* Cancel the communication offers *) + performed := 0; + Mutex.unlock masterlock; + do_aborts abort_env genev (-1); + None + end + +let poll ev = + let (evl,abort_env) = flatten_event [] [] [] ev in + basic_poll abort_env (scramble_array(Array.of_list evl)) + +(* Remove all communication opportunities already synchronized *) + +let cleanup_queue q = + let q' = Queue.create() in + Queue.iter (fun c -> if !(c.performed) = -1 then Queue.add c q') q; + q' + +(* Event construction *) + +let always data = + Communication(fun performed condition evnum -> + { poll = (fun () -> performed := evnum; true); + suspend = (fun () -> ()); + result = (fun () -> data) }) + +let send channel data = + Communication(fun performed condition evnum -> + let wcomm = + { performed = performed; + condition = condition; + data = Some data; + event_number = evnum } in + { poll = (fun () -> + let rec poll () = + let rcomm = Queue.take channel.reads_pending in + if !(rcomm.performed) >= 0 then + poll () + else begin + rcomm.data <- wcomm.data; + performed := evnum; + rcomm.performed := rcomm.event_number; + Condition.signal rcomm.condition + end in + try + poll(); + true + with Queue.Empty -> + false); + suspend = (fun () -> + channel.writes_pending <- cleanup_queue channel.writes_pending; + Queue.add wcomm channel.writes_pending); + result = (fun () -> ()) }) + +let receive channel = + Communication(fun performed condition evnum -> + let rcomm = + { performed = performed; + condition = condition; + data = None; + event_number = evnum } in + { poll = (fun () -> + let rec poll () = + let wcomm = Queue.take channel.writes_pending in + if !(wcomm.performed) >= 0 then + poll () + else begin + rcomm.data <- wcomm.data; + performed := evnum; + wcomm.performed := wcomm.event_number; + Condition.signal wcomm.condition + end in + try + poll(); + true + with Queue.Empty -> + false); + suspend = (fun () -> + channel.reads_pending <- cleanup_queue channel.reads_pending; + Queue.add rcomm channel.reads_pending); + result = (fun () -> + match rcomm.data with + None -> invalid_arg "Event.receive" + | Some res -> res) }) + +let choose evl = Choose evl + +let wrap_abort ev fn = WrapAbort(ev,fn) + +let guard fn = Guard fn + +let rec wrap ev fn = + match ev with + Communication genev -> + Communication(fun performed condition evnum -> + let bev = genev performed condition evnum in + { poll = bev.poll; + suspend = bev.suspend; + result = (fun () -> fn(bev.result())) }) + | Choose evl -> + Choose(List.map (fun ev -> wrap ev fn) evl) + | WrapAbort (ev, f') -> + WrapAbort (wrap ev fn, f') + | Guard gu -> + Guard(fun () -> wrap (gu()) fn) + +(* Convenience functions *) + +let select evl = sync(Choose evl) diff --git a/ocaml/otherlibs/systhreads4/event.mli b/ocaml/otherlibs/systhreads4/event.mli new file mode 100644 index 00000000000..fd452652c69 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/event.mli @@ -0,0 +1,81 @@ +(**************************************************************************) +(* *) +(* OCaml *) +(* *) +(* David Nowak and Xavier Leroy, projet Cristal, INRIA Rocquencourt *) +(* *) +(* Copyright 1996 Institut National de Recherche en Informatique et *) +(* en Automatique. *) +(* *) +(* All rights reserved. This file is distributed under the terms of *) +(* the GNU Lesser General Public License version 2.1, with the *) +(* special exception on linking described in the file LICENSE. *) +(* *) +(**************************************************************************) + +(** First-class synchronous communication. + + This module implements synchronous inter-thread communications over + channels. As in John Reppy's Concurrent ML system, the communication + events are first-class values: they can be built and combined + independently before being offered for communication. +*) + +type 'a channel +(** The type of communication channels carrying values of type ['a]. *) + +val new_channel : unit -> 'a channel +(** Return a new channel. *) + +type +'a event +(** The type of communication events returning a result of type ['a]. *) + +(** [send ch v] returns the event consisting in sending the value [v] + over the channel [ch]. The result value of this event is [()]. *) +val send : 'a channel -> 'a -> unit event + +(** [receive ch] returns the event consisting in receiving a value + from the channel [ch]. The result value of this event is the + value received. *) +val receive : 'a channel -> 'a event + +val always : 'a -> 'a event +(** [always v] returns an event that is always ready for + synchronization. The result value of this event is [v]. *) + +val choose : 'a event list -> 'a event +(** [choose evl] returns the event that is the alternative of + all the events in the list [evl]. *) + +val wrap : 'a event -> ('a -> 'b) -> 'b event +(** [wrap ev fn] returns the event that performs the same communications + as [ev], then applies the post-processing function [fn] + on the return value. *) + +val wrap_abort : 'a event -> (unit -> unit) -> 'a event +(** [wrap_abort ev fn] returns the event that performs + the same communications as [ev], but if it is not selected + the function [fn] is called after the synchronization. *) + +val guard : (unit -> 'a event) -> 'a event +(** [guard fn] returns the event that, when synchronized, computes + [fn()] and behaves as the resulting event. This enables + computing events with side-effects at the time of the synchronization + operation. *) + +val sync : 'a event -> 'a +(** 'Synchronize' on an event: offer all the communication + possibilities specified in the event to the outside world, + and block until one of the communications succeed. The result + value of that communication is returned. *) + +val select : 'a event list -> 'a +(** 'Synchronize' on an alternative of events. + [select evl] is shorthand for [sync(choose evl)]. *) + +val poll : 'a event -> 'a option +(** Non-blocking version of {!Event.sync}: offer all the communication + possibilities specified in the event to the outside world, + and if one can take place immediately, perform it and return + [Some r] where [r] is the result value of that communication. + Otherwise, return [None] without blocking. *) diff --git a/ocaml/otherlibs/systhreads/mutex.ml b/ocaml/otherlibs/systhreads4/mutex.ml similarity index 100% rename from ocaml/otherlibs/systhreads/mutex.ml rename to ocaml/otherlibs/systhreads4/mutex.ml diff --git a/ocaml/otherlibs/systhreads/mutex.mli b/ocaml/otherlibs/systhreads4/mutex.mli similarity index 100% rename from ocaml/otherlibs/systhreads/mutex.mli rename to ocaml/otherlibs/systhreads4/mutex.mli diff --git a/ocaml/otherlibs/systhreads/native/dune b/ocaml/otherlibs/systhreads4/native/dune similarity index 100% rename from ocaml/otherlibs/systhreads/native/dune rename to ocaml/otherlibs/systhreads4/native/dune diff --git a/ocaml/otherlibs/systhreads/semaphore.ml b/ocaml/otherlibs/systhreads4/semaphore.ml similarity index 100% rename from ocaml/otherlibs/systhreads/semaphore.ml rename to ocaml/otherlibs/systhreads4/semaphore.ml diff --git a/ocaml/otherlibs/systhreads/semaphore.mli b/ocaml/otherlibs/systhreads4/semaphore.mli similarity index 100% rename from ocaml/otherlibs/systhreads/semaphore.mli rename to ocaml/otherlibs/systhreads4/semaphore.mli diff --git a/ocaml/otherlibs/systhreads4/st_posix.h b/ocaml/otherlibs/systhreads4/st_posix.h new file mode 100644 index 00000000000..a43c78fbc92 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/st_posix.h @@ -0,0 +1,558 @@ +/**************************************************************************/ +/* */ +/* OCaml */ +/* */ +/* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */ +/* */ +/* Copyright 2009 Institut National de Recherche en Informatique et */ +/* en Automatique. */ +/* */ +/* All rights reserved. This file is distributed under the terms of */ +/* the GNU Lesser General Public License version 2.1, with the */ +/* special exception on linking described in the file LICENSE. */ +/* */ +/**************************************************************************/ + +/* POSIX thread implementation of the "st" interface */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef __linux__ +#include +#include +#include +#include +#include +#endif + +typedef int st_retcode; + +#define SIGPREEMPTION SIGVTALRM + +/* OS-specific initialization */ + +static int st_initialize(void) +{ + caml_sigmask_hook = pthread_sigmask; + return 0; +} + +/* Thread creation. Created in detached mode if [res] is NULL. */ + +typedef pthread_t st_thread_id; + +static int st_thread_create(st_thread_id * res, + void * (*fn)(void *), void * arg) +{ + pthread_t thr; + pthread_attr_t attr; + int rc; + + pthread_attr_init(&attr); + if (res == NULL) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + rc = pthread_create(&thr, &attr, fn, arg); + if (res != NULL) *res = thr; + return rc; +} + +#define ST_THREAD_FUNCTION void * + +/* Cleanup at thread exit */ + +Caml_inline void st_thread_cleanup(void) +{ + return; +} + +/* Thread termination */ + +CAMLnoreturn_start +static void st_thread_exit(void) +CAMLnoreturn_end; + +static void st_thread_exit(void) +{ + pthread_exit(NULL); +} + +static void st_thread_join(st_thread_id thr) +{ + pthread_join(thr, NULL); + /* best effort: ignore errors */ +} + +/* Thread-specific state */ + +typedef pthread_key_t st_tlskey; + +static int st_tls_newkey(st_tlskey * res) +{ + return pthread_key_create(res, NULL); +} + +Caml_inline void * st_tls_get(st_tlskey k) +{ + return pthread_getspecific(k); +} + +Caml_inline void st_tls_set(st_tlskey k, void * v) +{ + pthread_setspecific(k, v); +} + +/* Windows-specific hook. */ +Caml_inline void st_thread_set_id(intnat id) +{ + return; +} + +/* If we're using glibc, use a custom condition variable implementation to + avoid this bug: https://sourceware.org/bugzilla/show_bug.cgi?id=25847 + + For now we only have this on linux because it directly uses the linux futex + syscalls. */ +#if defined(__linux__) && defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) +typedef struct { + volatile unsigned counter; +} custom_condvar; + +static int custom_condvar_init(custom_condvar * cv) +{ + cv->counter = 0; + return 0; +} + +static int custom_condvar_destroy(custom_condvar * cv) +{ + return 0; +} + +static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) +{ + unsigned old_count = cv->counter; + pthread_mutex_unlock(mutex); + syscall(SYS_futex, &cv->counter, FUTEX_WAIT_PRIVATE, old_count, NULL, NULL, 0); + pthread_mutex_lock(mutex); + return 0; +} + +static int custom_condvar_signal(custom_condvar * cv) +{ + __sync_add_and_fetch(&cv->counter, 1); + syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, 1, NULL, NULL, 0); + return 0; +} + +static int custom_condvar_broadcast(custom_condvar * cv) +{ + __sync_add_and_fetch(&cv->counter, 1); + syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + return 0; +} +#else +typedef pthread_cond_t custom_condvar; + +static int custom_condvar_init(custom_condvar * cv) +{ + return pthread_cond_init(cv, NULL); +} + +static int custom_condvar_destroy(custom_condvar * cv) +{ + return pthread_cond_destroy(cv); +} + +static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) +{ + return pthread_cond_wait(cv, mutex); +} + +static int custom_condvar_signal(custom_condvar * cv) +{ + return pthread_cond_signal(cv); +} + +static int custom_condvar_broadcast(custom_condvar * cv) +{ + return pthread_cond_broadcast(cv); +} +#endif + +/* The master lock. This is a mutex that is held most of the time, + so we implement it in a slightly convoluted way to avoid + all risks of busy-waiting. Also, we count the number of waiting + threads. */ + +typedef struct { + pthread_mutex_t lock; /* to protect contents */ + int busy; /* 0 = free, 1 = taken */ + volatile int waiters; /* number of threads waiting on master lock */ + custom_condvar is_free; /* signaled when free */ +} st_masterlock; + +static void st_masterlock_init(st_masterlock * m) +{ + pthread_mutex_init(&m->lock, NULL); + custom_condvar_init(&m->is_free); + m->busy = 1; + m->waiters = 0; +} + +static void st_masterlock_acquire(st_masterlock * m) +{ + pthread_mutex_lock(&m->lock); + while (m->busy) { + m->waiters ++; + custom_condvar_wait(&m->is_free, &m->lock); + m->waiters --; + } + m->busy = 1; + pthread_mutex_unlock(&m->lock); +} + +static void st_masterlock_release(st_masterlock * m) +{ + pthread_mutex_lock(&m->lock); + m->busy = 0; + pthread_mutex_unlock(&m->lock); + custom_condvar_signal(&m->is_free); +} + +CAMLno_tsan /* This can be called for reading [waiters] without locking. */ +Caml_inline int st_masterlock_waiters(st_masterlock * m) +{ + return m->waiters; +} + +/* Scheduling hints */ + +/* This is mostly equivalent to release(); acquire(), but better. In particular, + release(); acquire(); leaves both us and the waiter we signal() racing to + acquire the lock. Calling yield or sleep helps there but does not solve the + problem. Sleeping ourselves is much more reliable--and since we're handing + off the lock to a waiter we know exists, it's safe, as they'll certainly + re-wake us later. +*/ +Caml_inline void st_thread_yield(st_masterlock * m) +{ + pthread_mutex_lock(&m->lock); + /* We must hold the lock to call this. */ + assert(m->busy); + + /* We already checked this without the lock, but we might have raced--if + there's no waiter, there's nothing to do and no one to wake us if we did + wait, so just keep going. */ + if (m->waiters == 0) { + pthread_mutex_unlock(&m->lock); + return; + } + + m->busy = 0; + custom_condvar_signal(&m->is_free); + m->waiters++; + do { + /* Note: the POSIX spec prevents the above signal from pairing with this + wait, which is good: we'll reliably continue waiting until the next + yield() or enter_blocking_section() call (or we see a spurious condvar + wakeup, which are rare at best.) */ + custom_condvar_wait(&m->is_free, &m->lock); + } while (m->busy); + m->busy = 1; + m->waiters--; + pthread_mutex_unlock(&m->lock); +} + +/* Mutexes */ + +typedef pthread_mutex_t * st_mutex; + +static int st_mutex_create(st_mutex * res) +{ + int rc; + pthread_mutexattr_t attr; + st_mutex m; + + rc = pthread_mutexattr_init(&attr); + if (rc != 0) goto error1; + rc = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK); + if (rc != 0) goto error2; + m = caml_stat_alloc_noexc(sizeof(pthread_mutex_t)); + if (m == NULL) { rc = ENOMEM; goto error2; } + rc = pthread_mutex_init(m, &attr); + if (rc != 0) goto error3; + pthread_mutexattr_destroy(&attr); + *res = m; + return 0; +error3: + caml_stat_free(m); +error2: + pthread_mutexattr_destroy(&attr); +error1: + return rc; +} + +static int st_mutex_destroy(st_mutex m) +{ + int rc; + rc = pthread_mutex_destroy(m); + caml_stat_free(m); + return rc; +} + +#define MUTEX_DEADLOCK EDEADLK + +Caml_inline int st_mutex_lock(st_mutex m) +{ + return pthread_mutex_lock(m); +} + +#define MUTEX_PREVIOUSLY_UNLOCKED 0 +#define MUTEX_ALREADY_LOCKED EBUSY + +Caml_inline int st_mutex_trylock(st_mutex m) +{ + return pthread_mutex_trylock(m); +} + +#define MUTEX_NOT_OWNED EPERM + +Caml_inline int st_mutex_unlock(st_mutex m) +{ + return pthread_mutex_unlock(m); +} + +/* Condition variables */ + +typedef custom_condvar * st_condvar; + +static int st_condvar_create(st_condvar * res) +{ + int rc; + st_condvar c = caml_stat_alloc_noexc(sizeof(custom_condvar)); + if (c == NULL) return ENOMEM; + rc = custom_condvar_init(c); + if (rc != 0) { caml_stat_free(c); return rc; } + *res = c; + return 0; +} + +static int st_condvar_destroy(st_condvar c) +{ + int rc; + rc = custom_condvar_destroy(c); + caml_stat_free(c); + return rc; +} + +Caml_inline int st_condvar_signal(st_condvar c) +{ + return custom_condvar_signal(c); +} + +Caml_inline int st_condvar_broadcast(st_condvar c) +{ + return custom_condvar_broadcast(c); +} + +Caml_inline int st_condvar_wait(st_condvar c, st_mutex m) +{ + return custom_condvar_wait(c, m); +} + +/* Triggered events */ + +typedef struct st_event_struct { + pthread_mutex_t lock; /* to protect contents */ + int status; /* 0 = not triggered, 1 = triggered */ + custom_condvar triggered; /* signaled when triggered */ +} * st_event; + +static int st_event_create(st_event * res) +{ + int rc; + st_event e = caml_stat_alloc_noexc(sizeof(struct st_event_struct)); + if (e == NULL) return ENOMEM; + rc = pthread_mutex_init(&e->lock, NULL); + if (rc != 0) { caml_stat_free(e); return rc; } + rc = custom_condvar_init(&e->triggered); + if (rc != 0) + { pthread_mutex_destroy(&e->lock); caml_stat_free(e); return rc; } + e->status = 0; + *res = e; + return 0; +} + +static int st_event_destroy(st_event e) +{ + int rc1, rc2; + rc1 = pthread_mutex_destroy(&e->lock); + rc2 = custom_condvar_destroy(&e->triggered); + caml_stat_free(e); + return rc1 != 0 ? rc1 : rc2; +} + +static int st_event_trigger(st_event e) +{ + int rc; + rc = pthread_mutex_lock(&e->lock); + if (rc != 0) return rc; + e->status = 1; + rc = pthread_mutex_unlock(&e->lock); + if (rc != 0) return rc; + rc = custom_condvar_broadcast(&e->triggered); + return rc; +} + +static int st_event_wait(st_event e) +{ + int rc; + rc = pthread_mutex_lock(&e->lock); + if (rc != 0) return rc; + while(e->status == 0) { + rc = custom_condvar_wait(&e->triggered, &e->lock); + if (rc != 0) return rc; + } + rc = pthread_mutex_unlock(&e->lock); + return rc; +} + +/* Reporting errors */ + +static void st_check_error(int retcode, char * msg) +{ + char * err; + int errlen, msglen; + value str; + + if (retcode == 0) return; + if (retcode == ENOMEM) caml_raise_out_of_memory(); + err = strerror(retcode); + msglen = strlen(msg); + errlen = strlen(err); + str = caml_alloc_string(msglen + 2 + errlen); + memmove (&Byte(str, 0), msg, msglen); + memmove (&Byte(str, msglen), ": ", 2); + memmove (&Byte(str, msglen + 2), err, errlen); + caml_raise_sys_error(str); +} + +/* Variable used to stop the "tick" thread */ +static volatile int caml_tick_thread_stop = 0; + +/* The tick thread: posts a SIGPREEMPTION signal periodically */ + +static void * caml_thread_tick(void * arg) +{ + struct timeval timeout; + sigset_t mask; + + /* Block all signals so that we don't try to execute an OCaml signal handler*/ + sigfillset(&mask); + pthread_sigmask(SIG_BLOCK, &mask, NULL); + while(! caml_tick_thread_stop) { + /* select() seems to be the most efficient way to suspend the + thread for sub-second intervals */ + timeout.tv_sec = 0; + timeout.tv_usec = Thread_timeout * 1000; + select(0, NULL, NULL, NULL, &timeout); + /* The preemption signal should never cause a callback, so don't + go through caml_handle_signal(), just record signal delivery via + caml_record_signal(). */ + caml_record_signal(SIGPREEMPTION); + } + return NULL; +} + +/* "At fork" processing */ + +#if defined(__ANDROID__) +/* Android's libc does not include declaration of pthread_atfork; + however, it implements it since API level 10 (Gingerbread). + The reason for the omission is that Android (GUI) applications + are not supposed to fork at all, however this workaround is still + included in case OCaml is used for an Android CLI utility. */ +int pthread_atfork(void (*prepare)(void), void (*parent)(void), + void (*child)(void)); +#endif + +static int st_atfork(void (*fn)(void)) +{ + return pthread_atfork(NULL, NULL, fn); +} + +/* Signal handling */ + +static void st_decode_sigset(value vset, sigset_t * set) +{ + sigemptyset(set); + while (vset != Val_int(0)) { + int sig = caml_convert_signal_number(Int_val(Field(vset, 0))); + sigaddset(set, sig); + vset = Field(vset, 1); + } +} + +#ifndef NSIG +#define NSIG 64 +#endif + +static value st_encode_sigset(sigset_t * set) +{ + value res = Val_int(0); + int i; + + Begin_root(res) + for (i = 1; i < NSIG; i++) + if (sigismember(set, i) > 0) { + value newcons = caml_alloc_small(2, 0); + Field(newcons, 0) = Val_int(caml_rev_convert_signal_number(i)); + Field(newcons, 1) = res; + res = newcons; + } + End_roots(); + return res; +} + +static int sigmask_cmd[3] = { SIG_SETMASK, SIG_BLOCK, SIG_UNBLOCK }; + +value caml_thread_sigmask(value cmd, value sigs) /* ML */ +{ + int how; + sigset_t set, oldset; + int retcode; + + how = sigmask_cmd[Int_val(cmd)]; + st_decode_sigset(sigs, &set); + caml_enter_blocking_section(); + retcode = pthread_sigmask(how, &set, &oldset); + caml_leave_blocking_section(); + st_check_error(retcode, "Thread.sigmask"); + /* Run any handlers for just-unmasked pending signals */ + caml_process_pending_actions(); + return st_encode_sigset(&oldset); +} + +value caml_wait_signal(value sigs) /* ML */ +{ +#ifdef HAS_SIGWAIT + sigset_t set; + int retcode, signo; + + st_decode_sigset(sigs, &set); + caml_enter_blocking_section(); + retcode = sigwait(&set, &signo); + caml_leave_blocking_section(); + st_check_error(retcode, "Thread.wait_signal"); + return Val_int(caml_rev_convert_signal_number(signo)); +#else + caml_invalid_argument("Thread.wait_signal not implemented"); + return Val_int(0); /* not reached */ +#endif +} diff --git a/ocaml/otherlibs/systhreads4/st_pthreads.h b/ocaml/otherlibs/systhreads4/st_pthreads.h new file mode 100644 index 00000000000..26e32caba5e --- /dev/null +++ b/ocaml/otherlibs/systhreads4/st_pthreads.h @@ -0,0 +1,387 @@ +/**************************************************************************/ +/* */ +/* OCaml */ +/* */ +/* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */ +/* */ +/* Copyright 2009 Institut National de Recherche en Informatique et */ +/* en Automatique. */ +/* */ +/* All rights reserved. This file is distributed under the terms of */ +/* the GNU Lesser General Public License version 2.1, with the */ +/* special exception on linking described in the file LICENSE. */ +/* */ +/**************************************************************************/ + +/* CR ocaml 5 runtime: When we update the OCaml 5 runtime, we'll need to + update this library as well. The base of + https://github.com/ocaml-flambda/ocaml-jst/pull/222 may be a good starting + point. + */ + +/* POSIX thread implementation of the "st" interface */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef HAS_UNISTD +#include +#endif +#include +#include +#include + +typedef int st_retcode; + +/* Variables used to stop "tick" threads */ +static atomic_uintnat tick_thread_stop[Max_domains]; +#define Tick_thread_stop tick_thread_stop[Caml_state->id] + +/* OS-specific initialization */ + +static int st_initialize(void) +{ + atomic_store_release(&Tick_thread_stop, 0); + return 0; +} + +/* Thread creation. Created in detached mode if [res] is NULL. */ + +typedef pthread_t st_thread_id; + + +static int st_thread_create(st_thread_id * res, + void * (*fn)(void *), void * arg) +{ + pthread_t thr; + pthread_attr_t attr; + int rc; + + pthread_attr_init(&attr); + if (res == NULL) pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + rc = pthread_create(&thr, &attr, fn, arg); + if (res != NULL) *res = thr; + return rc; +} + +#define ST_THREAD_FUNCTION void * + +/* Thread termination */ + +static void st_thread_join(st_thread_id thr) +{ + pthread_join(thr, NULL); + /* best effort: ignore errors */ +} + +/* Thread-specific state */ + +typedef pthread_key_t st_tlskey; + +static int st_tls_newkey(st_tlskey * res) +{ + return pthread_key_create(res, NULL); +} + +Caml_inline void * st_tls_get(st_tlskey k) +{ + return pthread_getspecific(k); +} + +Caml_inline void st_tls_set(st_tlskey k, void * v) +{ + pthread_setspecific(k, v); +} + +/* If we're using glibc, use a custom condition variable implementation to + avoid this bug: https://sourceware.org/bugzilla/show_bug.cgi?id=25847 + + For now we only have this on linux because it directly uses the linux futex + syscalls. */ +#if defined(__linux__) && defined(__GNU_LIBRARY__) && defined(__GLIBC__) && defined(__GLIBC_MINOR__) +typedef struct { + volatile unsigned counter; +} custom_condvar; + +static int custom_condvar_init(custom_condvar * cv) +{ + cv->counter = 0; + return 0; +} + +static int custom_condvar_destroy(custom_condvar * cv) +{ + return 0; +} + +static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) +{ + unsigned old_count = cv->counter; + pthread_mutex_unlock(mutex); + syscall(SYS_futex, &cv->counter, FUTEX_WAIT_PRIVATE, old_count, NULL, NULL, 0); + pthread_mutex_lock(mutex); + return 0; +} + +static int custom_condvar_signal(custom_condvar * cv) +{ + __sync_add_and_fetch(&cv->counter, 1); + syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, 1, NULL, NULL, 0); + return 0; +} + +static int custom_condvar_broadcast(custom_condvar * cv) +{ + __sync_add_and_fetch(&cv->counter, 1); + syscall(SYS_futex, &cv->counter, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0); + return 0; +} +#else +typedef pthread_cond_t custom_condvar; + +static int custom_condvar_init(custom_condvar * cv) +{ + return pthread_cond_init(cv, NULL); +} + +static int custom_condvar_destroy(custom_condvar * cv) +{ + return pthread_cond_destroy(cv); +} + +static int custom_condvar_wait(custom_condvar * cv, pthread_mutex_t * mutex) +{ + return pthread_cond_wait(cv, mutex); +} + +static int custom_condvar_signal(custom_condvar * cv) +{ + return pthread_cond_signal(cv); +} + +static int custom_condvar_broadcast(custom_condvar * cv) +{ + return pthread_cond_broadcast(cv); +} +#endif + +/* The master lock. This is a mutex that is held most of the time, + so we implement it in a slightly convoluted way to avoid + all risks of busy-waiting. Also, we count the number of waiting + threads. */ + +typedef struct { + int init; /* have the mutex and the cond been + initialized already? */ + pthread_mutex_t lock; /* to protect contents */ + uintnat busy; /* 0 = free, 1 = taken */ + atomic_uintnat waiters; /* number of threads waiting on master lock */ + custom_condvar is_free; /* signaled when free */ +} st_masterlock; + +static void st_masterlock_init(st_masterlock * m) +{ + if (!m->init) { + // FIXME: check errors + pthread_mutex_init(&m->lock, NULL); + custom_condvar_init(&m->is_free); + m->init = 1; + } + m->busy = 1; + atomic_store_release(&m->waiters, 0); + + return; +}; + +static uintnat st_masterlock_waiters(st_masterlock * m) +{ + return atomic_load_acquire(&m->waiters); +} + +static void st_bt_lock_acquire(st_masterlock *m) { + + /* We do not want to signal the backup thread is it is not "working" + as it may very well not be, because we could have just resumed + execution from another thread right away. */ + if (caml_bt_is_in_blocking_section()) { + caml_bt_enter_ocaml(); + } + + caml_acquire_domain_lock(); + + return; +} + +static void st_bt_lock_release(st_masterlock *m) { + + /* Here we do want to signal the backup thread iff there's + no thread waiting to be scheduled, and the backup thread is currently + idle. */ + if (st_masterlock_waiters(m) == 0 && + caml_bt_is_in_blocking_section() == 0) { + caml_bt_exit_ocaml(); + } + + caml_release_domain_lock(); + + return; +} + +static void st_masterlock_acquire(st_masterlock *m) +{ + pthread_mutex_lock(&m->lock); + while (m->busy) { + atomic_fetch_add(&m->waiters, +1); + custom_condvar_wait(&m->is_free, &m->lock); + atomic_fetch_add(&m->waiters, -1); + } + m->busy = 1; + st_bt_lock_acquire(m); + pthread_mutex_unlock(&m->lock); + + return; +} + +static void st_masterlock_release(st_masterlock * m) +{ + pthread_mutex_lock(&m->lock); + m->busy = 0; + st_bt_lock_release(m); + custom_condvar_signal(&m->is_free); + pthread_mutex_unlock(&m->lock); + + return; +} + +/* Scheduling hints */ + +/* This is mostly equivalent to release(); acquire(), but better. In particular, + release(); acquire(); leaves both us and the waiter we signal() racing to + acquire the lock. Calling yield or sleep helps there but does not solve the + problem. Sleeping ourselves is much more reliable--and since we're handing + off the lock to a waiter we know exists, it's safe, as they'll certainly + re-wake us later. +*/ +Caml_inline void st_thread_yield(st_masterlock * m) +{ + pthread_mutex_lock(&m->lock); + /* We must hold the lock to call this. */ + + /* We already checked this without the lock, but we might have raced--if + there's no waiter, there's nothing to do and no one to wake us if we did + wait, so just keep going. */ + uintnat waiters = st_masterlock_waiters(m); + + if (waiters == 0) { + pthread_mutex_unlock(&m->lock); + return; + } + + m->busy = 0; + atomic_fetch_add(&m->waiters, +1); + custom_condvar_signal(&m->is_free); + /* releasing the domain lock but not triggering bt messaging + messaging the bt should not be required because yield assumes + that a thread will resume execution (be it the yielding thread + or a waiting thread */ + caml_release_domain_lock(); + + do { + /* Note: the POSIX spec prevents the above signal from pairing with this + wait, which is good: we'll reliably continue waiting until the next + yield() or enter_blocking_section() call (or we see a spurious condvar + wakeup, which are rare at best.) */ + custom_condvar_wait(&m->is_free, &m->lock); + } while (m->busy); + + m->busy = 1; + atomic_fetch_add(&m->waiters, -1); + + caml_acquire_domain_lock(); + + pthread_mutex_unlock(&m->lock); + + return; +} + +/* Triggered events */ + +typedef struct st_event_struct { + pthread_mutex_t lock; /* to protect contents */ + int status; /* 0 = not triggered, 1 = triggered */ + custom_condvar triggered; /* signaled when triggered */ +} * st_event; + + +static int st_event_create(st_event * res) +{ + int rc; + st_event e = caml_stat_alloc_noexc(sizeof(struct st_event_struct)); + if (e == NULL) return ENOMEM; + rc = pthread_mutex_init(&e->lock, NULL); + if (rc != 0) { caml_stat_free(e); return rc; } + rc = custom_condvar_init(&e->triggered); + if (rc != 0) + { pthread_mutex_destroy(&e->lock); caml_stat_free(e); return rc; } + e->status = 0; + *res = e; + return 0; +} + +static int st_event_destroy(st_event e) +{ + int rc1, rc2; + rc1 = pthread_mutex_destroy(&e->lock); + rc2 = custom_condvar_destroy(&e->triggered); + caml_stat_free(e); + return rc1 != 0 ? rc1 : rc2; +} + +static int st_event_trigger(st_event e) +{ + int rc; + rc = pthread_mutex_lock(&e->lock); + if (rc != 0) return rc; + e->status = 1; + rc = pthread_mutex_unlock(&e->lock); + if (rc != 0) return rc; + rc = custom_condvar_broadcast(&e->triggered); + return rc; +} + +static int st_event_wait(st_event e) +{ + int rc; + rc = pthread_mutex_lock(&e->lock); + if (rc != 0) return rc; + while(e->status == 0) { + rc = custom_condvar_wait(&e->triggered, &e->lock); + if (rc != 0) return rc; + } + rc = pthread_mutex_unlock(&e->lock); + return rc; +} + +/* The tick thread: interrupt the domain periodically to force preemption */ + +static void * caml_thread_tick(void * arg) +{ + int *domain_id = (int *) arg; + + caml_init_domain_self(*domain_id); + caml_domain_state *domain = Caml_state; + + while(! atomic_load_acquire(&Tick_thread_stop)) { + st_msleep(Thread_timeout); + + atomic_store_release(&domain->requested_external_interrupt, 1); + caml_interrupt_self(); + } + return NULL; +} diff --git a/ocaml/otherlibs/systhreads4/st_stubs.c b/ocaml/otherlibs/systhreads4/st_stubs.c new file mode 100644 index 00000000000..ace63ea0cd7 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/st_stubs.c @@ -0,0 +1,1123 @@ +/**************************************************************************/ +/* */ +/* OCaml */ +/* */ +/* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */ +/* */ +/* Copyright 1995 Institut National de Recherche en Informatique et */ +/* en Automatique. */ +/* */ +/* All rights reserved. This file is distributed under the terms of */ +/* the GNU Lesser General Public License version 2.1, with the */ +/* special exception on linking described in the file LICENSE. */ +/* */ +/**************************************************************************/ + +// CR ocaml 5 runtime: We will need to pull in changes from the same file in +// [tip-5] tag in ocaml-jst. We're considering this file to be part of the +// runtime. + +#define CAML_INTERNALS + +#define CAML_NAME_SPACE +#include "caml/compatibility.h" +#undef CAML_NAME_SPACE + +#include "caml/alloc.h" +#include "caml/backtrace.h" +#include "caml/callback.h" +#include "caml/custom.h" +#include "caml/debugger.h" +#include "caml/domain.h" +#include "caml/fail.h" +#include "caml/io.h" +#include "caml/memory.h" +#include "caml/misc.h" +#include "caml/mlvalues.h" +#include "caml/printexc.h" +#include "caml/roots.h" +#include "caml/signals.h" +#ifdef NATIVE_CODE +#include "caml/stack.h" +#else +#include "caml/stacks.h" +#endif +#include "caml/sys.h" +#include "caml/memprof.h" + +#define CAMLextern_libthreads +#include "threads.h" + +#ifndef NATIVE_CODE +/* Initial size of bytecode stack when a thread is created (4 Ko) */ +#define Thread_stack_size (Stack_size / 4) +#endif + +/* Max computation time before rescheduling, in milliseconds */ +#define Thread_timeout 50 + +/* OS-specific code */ +#ifdef _WIN32 +#include "st_win32.h" +#else +#include "st_posix.h" +#endif + +/* Atomics */ +#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8 + /* GCC 4.8 shipped with a working implementation of atomics, but no + stdatomic.h header, so we need to use GCC-specific intrinsics. */ + + #define _Atomic /* GCC intrinsics work on normal variables */ + #define atomic_store(v, x) \ + __atomic_store_n((v), (x), __ATOMIC_SEQ_CST) + #define atomic_load(v) \ + __atomic_load_n((v), __ATOMIC_SEQ_CST) + #define atomic_exchange(v, x) \ + __atomic_exchange_n((v), (x), __ATOMIC_SEQ_CST) +#else + #include +#endif + + +/* The ML value describing a thread (heap-allocated) */ + +struct caml_thread_descr { + value ident; /* Unique integer ID */ + value start_closure; /* The closure to start this thread */ + value terminated; /* Triggered event for thread termination */ +}; + +#define Ident(v) (((struct caml_thread_descr *)(v))->ident) +#define Start_closure(v) (((struct caml_thread_descr *)(v))->start_closure) +#define Terminated(v) (((struct caml_thread_descr *)(v))->terminated) + +/* The infos on threads (allocated via caml_stat_alloc()) */ + +struct caml_thread_struct { + value descr; /* The heap-allocated descriptor (root) */ + struct caml_thread_struct * next; /* Double linking of running threads */ + struct caml_thread_struct * prev; +#ifdef NATIVE_CODE + char * top_of_stack; /* Top of stack for this thread (approx.) */ + char * bottom_of_stack; /* Saved value of Caml_state->_bottom_of_stack */ + uintnat last_retaddr; /* Saved value of Caml_state->_last_return_address */ + value * gc_regs; /* Saved value of Caml_state->_gc_regs */ + char * exn_handler; /* Saved value of Caml_state->_exn_handler */ + char * async_exception_pointer; + /* Saved value of Caml_state->_async_exception_pointer */ + struct caml__roots_block * local_roots; /* Saved value of local_roots */ + struct caml_local_arenas * local_arenas; + struct longjmp_buffer * exit_buf; /* For thread exit */ +#else + value * stack_low; /* The execution stack for this thread */ + value * stack_high; + value * stack_threshold; + value * sp; /* Saved value of Caml_state->_extern_sp for this thread */ + value * trapsp; /* Saved value of Caml_state->_trapsp for this thread */ + /* Saved value of Caml_state->_local_roots */ + struct caml__roots_block * local_roots; + struct longjmp_buffer * external_raise; /* Saved Caml_state->_external_raise */ + struct longjmp_buffer * external_raise_async; + /* Saved Caml_state->_external_raise_async */ +#endif + int backtrace_pos; /* Saved Caml_state->_backtrace_pos */ + backtrace_slot * backtrace_buffer; /* Saved Caml_state->_backtrace_buffer */ + value backtrace_last_exn; /* Saved Caml_state->_backtrace_last_exn (root) */ + struct caml_memprof_th_ctx *memprof_ctx; +}; + +typedef struct caml_thread_struct * caml_thread_t; + +/* The "head" of the circular list of thread descriptors */ +static caml_thread_t all_threads = NULL; + +/* The descriptor for the currently executing thread */ +static caml_thread_t curr_thread = NULL; + +/* The master lock protecting the OCaml runtime system */ +static struct caml_locking_scheme* _Atomic caml_locking_scheme; + +/* Whether the "tick" thread is already running */ +static int caml_tick_thread_running = 0; + +/* Whether the "tick" thread is enabled */ +static int caml_tick_thread_enabled = 1; + +/* The thread identifier of the "tick" thread */ +static st_thread_id caml_tick_thread_id; + +/* The key used for storing the thread descriptor in the specific data + of the corresponding system thread. */ +static st_tlskey thread_descriptor_key; + +/* The key used for unlocking I/O channels on exceptions */ +static st_tlskey last_channel_locked_key; + +/* Identifier for next thread creation */ +static intnat thread_next_ident = 0; + +/* Forward declarations */ +static value caml_threadstatus_new (void); +static void caml_threadstatus_terminate (value); +static st_retcode caml_threadstatus_wait (value); + +/* Imports from the native-code runtime system */ +#ifdef NATIVE_CODE +extern struct longjmp_buffer caml_termination_jmpbuf; +extern void (*caml_termination_hook)(void); +#endif + +/* The default locking scheme */ +static st_masterlock default_master_lock; + +static int default_can_skip_yield(void* m) +{ + return st_masterlock_waiters(m) == 0; +} + +struct caml_locking_scheme caml_default_locking_scheme = + { &default_master_lock, + (void (*)(void*))&st_masterlock_acquire, + (void (*)(void*))&st_masterlock_release, + NULL, + NULL, + (void (*)(void*))&st_masterlock_init, + default_can_skip_yield, + (void (*)(void*))&st_thread_yield }; + +static void acquire_runtime_lock() +{ + struct caml_locking_scheme* s; + + /* The locking scheme may be changed by the thread that currently + holds it. This means that it may change while we're waiting to + acquire it, so by the time we acquire it it may no longer be the + right scheme. */ + + retry: + s = atomic_load(&caml_locking_scheme); + s->lock(s->context); + if (atomic_load(&caml_locking_scheme) != s) { + /* This is no longer the right scheme. Unlock and try again */ + s->unlock(s->context); + goto retry; + } +} + +static void release_runtime_lock() +{ + /* There is no tricky case here like in acquire, as only the holder + of the lock can change it. (Here, that's us) */ + struct caml_locking_scheme* s; + s = atomic_load(&caml_locking_scheme); + s->unlock(s->context); +} + +/* Hook for scanning the stacks of the other threads */ + +static void (*prev_scan_roots_hook) (scanning_action); + +static void caml_thread_scan_roots(scanning_action action) +{ + caml_thread_t th = curr_thread; + do { + (*action)(th->descr, &th->descr); + (*action)(th->backtrace_last_exn, &th->backtrace_last_exn); + /* Don't rescan the stack of the current thread, it was done already */ + if (th != curr_thread) { +#ifdef NATIVE_CODE + if (th->bottom_of_stack != NULL) + caml_do_local_roots(action, action, th->bottom_of_stack, th->last_retaddr, + th->gc_regs, th->local_roots, th->local_arenas); +#else + caml_do_local_roots(action, th->sp, th->stack_high, th->local_roots); +#endif + } + th = th->next; + } while (th != curr_thread); + /* Hook */ + if (prev_scan_roots_hook != NULL) (*prev_scan_roots_hook)(action); +} + +/* Hook for iterating over Memprof's entries arrays */ + +static void memprof_ctx_iter(th_ctx_action f, void* data) +{ + caml_thread_t th = curr_thread; + do { + f(th->memprof_ctx, data); + th = th->next; + } while (th != curr_thread); +} + +/* Saving and restoring runtime state in curr_thread */ + +CAMLexport void caml_thread_save_runtime_state(void) +{ + if (Caml_state->_in_minor_collection) + caml_fatal_error("Thread switch from inside minor GC"); +#ifdef NATIVE_CODE + curr_thread->top_of_stack = Caml_state->_top_of_stack; + curr_thread->bottom_of_stack = Caml_state->_bottom_of_stack; + curr_thread->last_retaddr = Caml_state->_last_return_address; + curr_thread->gc_regs = Caml_state->_gc_regs; + curr_thread->exn_handler = Caml_state->_exn_handler; + curr_thread->async_exception_pointer = Caml_state->_async_exception_pointer; + curr_thread->local_arenas = caml_get_local_arenas(); +#else + curr_thread->stack_low = Caml_state->_stack_low; + curr_thread->stack_high = Caml_state->_stack_high; + curr_thread->stack_threshold = Caml_state->_stack_threshold; + curr_thread->sp = Caml_state->_extern_sp; + curr_thread->trapsp = Caml_state->_trapsp; + curr_thread->external_raise = Caml_state->_external_raise; + curr_thread->external_raise_async = Caml_state->_external_raise_async; +#endif + curr_thread->local_roots = Caml_state->_local_roots; + curr_thread->backtrace_pos = Caml_state->_backtrace_pos; + curr_thread->backtrace_buffer = Caml_state->_backtrace_buffer; + curr_thread->backtrace_last_exn = Caml_state->_backtrace_last_exn; + caml_memprof_leave_thread(); +} + +CAMLexport void caml_thread_restore_runtime_state(void) +{ + /* Update curr_thread to point to the thread descriptor corresponding + to the thread currently executing */ + curr_thread = st_tls_get(thread_descriptor_key); + +#ifdef NATIVE_CODE + Caml_state->_top_of_stack = curr_thread->top_of_stack; + Caml_state->_bottom_of_stack= curr_thread->bottom_of_stack; + Caml_state->_last_return_address = curr_thread->last_retaddr; + Caml_state->_gc_regs = curr_thread->gc_regs; + Caml_state->_exn_handler = curr_thread->exn_handler; + Caml_state->_async_exception_pointer = curr_thread->async_exception_pointer; + caml_set_local_arenas(curr_thread->local_arenas); +#else + Caml_state->_stack_low = curr_thread->stack_low; + Caml_state->_stack_high = curr_thread->stack_high; + Caml_state->_stack_threshold = curr_thread->stack_threshold; + Caml_state->_extern_sp = curr_thread->sp; + Caml_state->_trapsp = curr_thread->trapsp; + Caml_state->_external_raise = curr_thread->external_raise; + Caml_state->_external_raise_async = curr_thread->external_raise_async; +#endif + Caml_state->_local_roots = curr_thread->local_roots; + Caml_state->_backtrace_pos = curr_thread->backtrace_pos; + Caml_state->_backtrace_buffer = curr_thread->backtrace_buffer; + Caml_state->_backtrace_last_exn = curr_thread->backtrace_last_exn; + caml_memprof_enter_thread(curr_thread->memprof_ctx); +} + +CAMLexport void caml_switch_runtime_locking_scheme(struct caml_locking_scheme* new) +{ + struct caml_locking_scheme* old; + + caml_thread_save_runtime_state(); + old = atomic_exchange(&caml_locking_scheme, new); + /* We hold 'old', but it is no longer the runtime lock */ + old->unlock(old->context); + acquire_runtime_lock(); + caml_thread_restore_runtime_state(); +} + + +/* Hooks for caml_enter_blocking_section and caml_leave_blocking_section */ + + +static void caml_thread_enter_blocking_section(void) +{ + /* Save the current runtime state in the thread descriptor + of the current thread */ + caml_thread_save_runtime_state(); + /* Tell other threads that the runtime is free */ + release_runtime_lock(); +} + +static void caml_thread_leave_blocking_section(void) +{ +#ifdef _WIN32 + /* TlsGetValue calls SetLastError which will mask any error which occurred + prior to the caml_thread_leave_blocking_section call. EnterCriticalSection + does not do this. */ + DWORD error = GetLastError(); +#endif + /* Wait until the runtime is free */ + acquire_runtime_lock(); + caml_thread_restore_runtime_state(); +#ifdef _WIN32 + SetLastError(error); +#endif +} + +/* Hooks for I/O locking */ + +static void caml_io_mutex_free(struct channel *chan) +{ + st_mutex mutex = chan->mutex; + if (mutex != NULL) { + st_mutex_destroy(mutex); + chan->mutex = NULL; + } +} + +static void caml_io_mutex_lock(struct channel *chan) +{ + st_mutex mutex = chan->mutex; + + if (mutex == NULL) { + st_check_error(st_mutex_create(&mutex), "channel locking"); /*PR#7038*/ + chan->mutex = mutex; + } + /* PR#4351: first try to acquire mutex without releasing the master lock */ + if (st_mutex_trylock(mutex) == MUTEX_PREVIOUSLY_UNLOCKED) { + st_tls_set(last_channel_locked_key, (void *) chan); + return; + } + /* If unsuccessful, block on mutex */ + caml_enter_blocking_section(); + st_mutex_lock(mutex); + /* Problem: if a signal occurs at this point, + and the signal handler raises an exception, we will not + unlock the mutex. The alternative (doing the setspecific + before locking the mutex is also incorrect, since we could + then unlock a mutex that is unlocked or locked by someone else. */ + st_tls_set(last_channel_locked_key, (void *) chan); + caml_leave_blocking_section(); +} + +static void caml_io_mutex_unlock(struct channel *chan) +{ + st_mutex_unlock(chan->mutex); + st_tls_set(last_channel_locked_key, NULL); +} + +static void caml_io_mutex_unlock_exn(void) +{ + struct channel * chan = st_tls_get(last_channel_locked_key); + if (chan != NULL) caml_io_mutex_unlock(chan); +} + +/* Hook for estimating stack usage */ + +static uintnat (*prev_stack_usage_hook)(void); + +static uintnat caml_thread_stack_usage(void) +{ + uintnat sz; + caml_thread_t th; + + /* Don't add stack for current thread, this is done elsewhere */ + for (sz = 0, th = curr_thread->next; + th != curr_thread; + th = th->next) { +#ifdef NATIVE_CODE + if(th->top_of_stack != NULL && th->bottom_of_stack != NULL && + th->top_of_stack > th->bottom_of_stack) + sz += (value *) th->top_of_stack - (value *) th->bottom_of_stack; +#else + sz += th->stack_high - th->sp; +#endif + } + if (prev_stack_usage_hook != NULL) + sz += prev_stack_usage_hook(); + return sz; +} + +/* Create and setup a new thread info block. + This block has no associated thread descriptor and + is not inserted in the list of threads. */ + +static caml_thread_t caml_thread_new_info(void) +{ + caml_thread_t th; + th = (caml_thread_t) caml_stat_alloc_noexc(sizeof(struct caml_thread_struct)); + if (th == NULL) return NULL; + th->descr = Val_unit; /* filled later */ +#ifdef NATIVE_CODE + th->bottom_of_stack = NULL; + th->top_of_stack = NULL; + th->last_retaddr = 1; + th->exn_handler = NULL; + th->async_exception_pointer = NULL; + th->local_roots = NULL; + th->local_arenas = NULL; + th->exit_buf = NULL; +#else + /* Allocate the stacks */ + th->stack_low = (value *) caml_stat_alloc(Thread_stack_size); + th->stack_high = th->stack_low + Thread_stack_size / sizeof(value); + th->stack_threshold = th->stack_low + Stack_threshold / sizeof(value); + th->sp = th->stack_high; + th->trapsp = th->stack_high; + th->local_roots = NULL; + th->external_raise = NULL; + th->external_raise_async = NULL; +#endif + th->backtrace_pos = 0; + th->backtrace_buffer = NULL; + th->backtrace_last_exn = Val_unit; + th->memprof_ctx = caml_memprof_new_th_ctx(); + return th; +} + +/* Allocate a thread descriptor block. */ + +static value caml_thread_new_descriptor(value clos) +{ + value mu = Val_unit; + value descr; + Begin_roots2 (clos, mu) + /* Create and initialize the termination semaphore */ + mu = caml_threadstatus_new(); + /* Create a descriptor for the new thread */ + descr = caml_alloc_small(3, 0); + Ident(descr) = Val_long(thread_next_ident); + Start_closure(descr) = clos; + Terminated(descr) = mu; + thread_next_ident++; + End_roots(); + return descr; +} + +/* Remove a thread info block from the list of threads. + Free it and its stack resources. */ + +static void caml_thread_remove_info(caml_thread_t th) +{ + if (th->next == th) + all_threads = NULL; /* last OCaml thread exiting */ + else if (all_threads == th) + all_threads = th->next; /* PR#5295 */ + th->next->prev = th->prev; + th->prev->next = th->next; +#ifndef NATIVE_CODE + caml_stat_free(th->stack_low); +#endif + if (th->backtrace_buffer != NULL) caml_stat_free(th->backtrace_buffer); + caml_stat_free(th); +} + +/* Reinitialize the thread machinery after a fork() (PR#4577) */ + +static void caml_thread_reinitialize(void) +{ + struct channel * chan; + struct caml_locking_scheme* s; + + /* Remove all other threads (now nonexistent) + from the doubly-linked list of threads */ + while (curr_thread->next != curr_thread) { + caml_memprof_delete_th_ctx(curr_thread->next->memprof_ctx); + caml_thread_remove_info(curr_thread->next); + } + + /* Reinitialize the master lock machinery, + just in case the fork happened while other threads were doing + caml_leave_blocking_section */ + s = atomic_load(&caml_locking_scheme); + s->reinitialize_after_fork(s->context); + /* Tick thread is not currently running in child process, will be + re-created at next Thread.create */ + caml_tick_thread_running = 0; + /* Destroy all IO mutexes; will be reinitialized on demand */ + for (chan = caml_all_opened_channels; + chan != NULL; + chan = chan->next) { + if (chan->mutex != NULL) { + st_mutex_destroy(chan->mutex); + chan->mutex = NULL; + } + } +} + +/* Initialize the thread machinery */ + +CAMLprim value caml_thread_initialize(value unit) /* ML */ +{ + /* Protect against repeated initialization (PR#3532) */ + if (curr_thread != NULL) return Val_unit; + /* OS-specific initialization */ + st_initialize(); + /* Initialize and acquire the master lock */ + st_masterlock_init(&default_master_lock); + caml_locking_scheme = &caml_default_locking_scheme; + /* Initialize the keys */ + st_tls_newkey(&thread_descriptor_key); + st_tls_newkey(&last_channel_locked_key); + /* Set up a thread info block for the current thread */ + curr_thread = + (caml_thread_t) caml_stat_alloc(sizeof(struct caml_thread_struct)); + curr_thread->descr = caml_thread_new_descriptor(Val_unit); + curr_thread->next = curr_thread; + curr_thread->prev = curr_thread; + all_threads = curr_thread; + curr_thread->backtrace_last_exn = Val_unit; +#ifdef NATIVE_CODE + curr_thread->exit_buf = &caml_termination_jmpbuf; +#endif + curr_thread->memprof_ctx = &caml_memprof_main_ctx; + /* The stack-related fields will be filled in at the next + caml_enter_blocking_section */ + /* Associate the thread descriptor with the thread */ + st_tls_set(thread_descriptor_key, (void *) curr_thread); + st_thread_set_id(Ident(curr_thread->descr)); + /* Set up the hooks */ + prev_scan_roots_hook = caml_scan_roots_hook; + caml_scan_roots_hook = caml_thread_scan_roots; + caml_enter_blocking_section_hook = caml_thread_enter_blocking_section; + caml_leave_blocking_section_hook = caml_thread_leave_blocking_section; +#ifdef NATIVE_CODE + caml_termination_hook = st_thread_exit; +#endif + caml_channel_mutex_free = caml_io_mutex_free; + caml_channel_mutex_lock = caml_io_mutex_lock; + caml_channel_mutex_unlock = caml_io_mutex_unlock; + caml_channel_mutex_unlock_exn = caml_io_mutex_unlock_exn; + prev_stack_usage_hook = caml_stack_usage_hook; + caml_stack_usage_hook = caml_thread_stack_usage; + caml_memprof_th_ctx_iter_hook = memprof_ctx_iter; + /* Set up fork() to reinitialize the thread machinery in the child + (PR#4577) */ + st_atfork(caml_thread_reinitialize); + return Val_unit; +} + +/* Start tick thread, if not already running */ +static st_retcode start_tick_thread() +{ + st_retcode err; + if (caml_tick_thread_running) return 0; + err = st_thread_create(&caml_tick_thread_id, caml_thread_tick, NULL); + if (err == 0) caml_tick_thread_running = 1; + return err; +} + +/* Stop tick thread, if currently running */ +static void stop_tick_thread() +{ + if (!caml_tick_thread_running) return; + caml_tick_thread_stop = 1; + st_thread_join(caml_tick_thread_id); + caml_tick_thread_stop = 0; + caml_tick_thread_running = 0; +} + +CAMLprim value caml_enable_tick_thread(value v_enable) +{ + int enable = Long_val(v_enable) ? 1 : 0; + + if (enable) { + st_retcode err = start_tick_thread(); + st_check_error(err, "caml_enable_tick_thread"); + } else { + stop_tick_thread(); + } + + caml_tick_thread_enabled = enable; + return Val_unit; +} + +/* Cleanup the thread machinery when the runtime is shut down. Joining the tick + thread take 25ms on average / 50ms in the worst case, so we don't do it on + program exit. */ + +CAMLprim value caml_thread_cleanup(value unit) /* ML */ +{ + stop_tick_thread(); + return Val_unit; +} + +/* Thread cleanup at termination */ + +static void caml_thread_stop(void) +{ + /* PR#5188, PR#7220: some of the global runtime state may have + changed as the thread was running, so we save it in the + curr_thread data to make sure that the cleanup logic + below uses accurate information. */ + caml_thread_save_runtime_state(); + /* Tell memprof that this thread is terminating. */ + caml_memprof_delete_th_ctx(curr_thread->memprof_ctx); + /* Signal that the thread has terminated */ + caml_threadstatus_terminate(Terminated(curr_thread->descr)); + /* Remove th from the doubly-linked list of threads and free its info block */ + caml_thread_remove_info(curr_thread); + /* If no other OCaml thread remains, ask the tick thread to stop + so that it does not prevent the whole process from exiting (#9971) */ + if (all_threads == NULL) caml_thread_cleanup(Val_unit); + /* OS-specific cleanups */ + st_thread_cleanup(); + /* Release the runtime system */ + release_runtime_lock(); +} + +/* Create a thread */ + +static ST_THREAD_FUNCTION caml_thread_start(void * arg) +{ + caml_thread_t th = (caml_thread_t) arg; + value clos; + void * signal_stack; + struct caml_locking_scheme* sch; +#ifdef NATIVE_CODE + struct longjmp_buffer termination_buf; + char tos; + /* Record top of stack (approximative) */ + th->top_of_stack = &tos; +#endif + + /* Associate the thread descriptor with the thread */ + st_tls_set(thread_descriptor_key, (void *) th); + st_thread_set_id(Ident(th->descr)); + sch = atomic_load(&caml_locking_scheme); + if (sch->thread_start != NULL) + sch->thread_start(sch->context, Thread_type_caml); + /* Acquire the global mutex */ + caml_leave_blocking_section(); + st_thread_set_id(Ident(th->descr)); + signal_stack = caml_setup_stack_overflow_detection(); +#ifdef NATIVE_CODE + /* Setup termination handler (for caml_thread_exit) */ + if (sigsetjmp(termination_buf.buf, 0) == 0) { + th->exit_buf = &termination_buf; +#endif + /* Callback the closure */ + clos = Start_closure(th->descr); + caml_modify(&(Start_closure(th->descr)), Val_unit); + caml_callback_exn(clos, Val_unit); + caml_thread_stop(); + sch = atomic_load(&caml_locking_scheme); + if (sch->thread_stop != NULL) + sch->thread_stop(sch->context, Thread_type_caml); +#ifdef NATIVE_CODE + } +#endif + caml_stop_stack_overflow_detection(signal_stack); + /* The thread now stops running */ + return 0; +} + +CAMLprim value caml_thread_new(value clos) /* ML */ +{ + caml_thread_t th; + st_retcode err; + +#ifndef NATIVE_CODE + if (caml_debugger_in_use) + caml_fatal_error("ocamldebug does not support multithreaded programs"); +#endif + /* Create a thread info block */ + th = caml_thread_new_info(); + if (th == NULL) caml_raise_out_of_memory(); + /* Equip it with a thread descriptor */ + th->descr = caml_thread_new_descriptor(clos); + /* Add thread info block to the list of threads */ + th->next = curr_thread->next; + th->prev = curr_thread; + curr_thread->next->prev = th; + curr_thread->next = th; + /* Create the new thread */ + err = st_thread_create(NULL, caml_thread_start, (void *) th); + if (err != 0) { + /* Creation failed, remove thread info block from list of threads */ + caml_thread_remove_info(th); + st_check_error(err, "Thread.create"); + } + /* Create the tick thread if not already done. + Because of PR#4666, we start the tick thread late, only when we create + the first additional thread in the current process*/ + if (caml_tick_thread_enabled) { + err = start_tick_thread(); + st_check_error(err, "Thread.create"); + } + return th->descr; +} + +/* Register a thread already created from C */ + +CAMLexport int caml_c_thread_register(void) +{ + caml_thread_t th; + struct caml_locking_scheme* sch; +#ifdef NATIVE_CODE + st_retcode err; +#endif + + sch = atomic_load(&caml_locking_scheme); + if (sch->thread_start != NULL) + sch->thread_start(sch->context, Thread_type_c_registered); + + /* Already registered? */ + if (st_tls_get(thread_descriptor_key) != NULL) return 0; + /* Create a thread info block */ + th = caml_thread_new_info(); + if (th == NULL) return 0; +#ifdef NATIVE_CODE + th->top_of_stack = (char *) &err; +#endif + /* Take master lock to protect access to the chaining of threads */ + acquire_runtime_lock(); + /* Add thread info block to the list of threads */ + if (all_threads == NULL) { + th->next = th; + th->prev = th; + all_threads = th; + } else { + th->next = all_threads->next; + th->prev = all_threads; + all_threads->next->prev = th; + all_threads->next = th; + } + /* Associate the thread descriptor with the thread */ + st_tls_set(thread_descriptor_key, (void *) th); + /* Release the master lock */ + release_runtime_lock(); + /* Now we can re-enter the run-time system and heap-allocate the descriptor */ + caml_leave_blocking_section(); + th->descr = caml_thread_new_descriptor(Val_unit); /* no closure */ + st_thread_set_id(Ident(th->descr)); + /* Create the tick thread if not already done. */ + if (caml_tick_thread_enabled) start_tick_thread(); + /* Exit the run-time system */ + caml_enter_blocking_section(); + return 1; +} + +/* Unregister a thread that was created from C and registered with + the function above */ + +CAMLexport int caml_c_thread_unregister(void) +{ + struct caml_locking_scheme* sch; + caml_thread_t th = st_tls_get(thread_descriptor_key); + /* Not registered? */ + if (th == NULL) return 0; + /* Wait until the runtime is available */ + acquire_runtime_lock(); + /* Forget the thread descriptor */ + st_tls_set(thread_descriptor_key, NULL); + /* Remove thread info block from list of threads, and free it */ + caml_thread_remove_info(th); + /* If no other OCaml thread remains, ask the tick thread to stop + so that it does not prevent the whole process from exiting (#9971) */ + if (all_threads == NULL) caml_thread_cleanup(Val_unit); + /* Release the runtime */ + release_runtime_lock(); + sch = atomic_load(&caml_locking_scheme); + if (sch->thread_stop != NULL) + sch->thread_stop(sch->context, Thread_type_c_registered); + return 1; +} + +/* Return the current thread */ + +CAMLprim value caml_thread_self(value unit) /* ML */ +{ + if (curr_thread == NULL) + caml_invalid_argument("Thread.self: not initialized"); + return curr_thread->descr; +} + +/* Return the identifier of a thread */ + +CAMLprim value caml_thread_id(value th) /* ML */ +{ + return Ident(th); +} + +/* Print uncaught exception and backtrace */ + +CAMLprim value caml_thread_uncaught_exception(value exn) /* ML */ +{ + char * msg = caml_format_exception(exn); + fprintf(stderr, "Thread %d killed on uncaught exception %s\n", + Int_val(Ident(curr_thread->descr)), msg); + caml_stat_free(msg); + if (Caml_state->_backtrace_active) caml_print_exception_backtrace(); + fflush(stderr); + return Val_unit; +} + +/* Terminate current thread */ + +CAMLprim value caml_thread_exit(value unit) /* ML */ +{ + struct longjmp_buffer * exit_buf = NULL; + + if (curr_thread == NULL) + caml_invalid_argument("Thread.exit: not initialized"); + + /* In native code, we cannot call pthread_exit here because on some + systems this raises a C++ exception, and ocamlopt-generated stack + frames cannot be unwound. Instead, we longjmp to the thread + creation point (in caml_thread_start) or to the point in + caml_main where caml_termination_hook will be called. + Note that threads created in C then registered do not have + a creation point (exit_buf == NULL). + */ +#ifdef NATIVE_CODE + exit_buf = curr_thread->exit_buf; +#endif + caml_thread_stop(); + if (exit_buf != NULL) { + /* Native-code and (main thread or thread created by OCaml) */ + siglongjmp(exit_buf->buf, 1); + } else { + /* Bytecode, or thread created from C */ + st_thread_exit(); + } + return Val_unit; /* not reached */ +} + +/* Allow re-scheduling */ + +CAMLprim value caml_thread_yield(value unit) /* ML */ +{ + struct caml_locking_scheme* s; + + s = atomic_load(&caml_locking_scheme); + if (s->can_skip_yield != NULL && s->can_skip_yield(s->context)) + return Val_unit; + + /* Do all the parts of a blocking section enter/leave except lock + manipulation, which we'll do more efficiently in st_thread_yield. (Since + our blocking section doesn't contain anything interesting, don't bother + with saving errno.) + */ + caml_raise_async_if_exception(caml_process_pending_signals_exn(), + "signal handler"); + caml_thread_save_runtime_state(); + /* caml_locking_scheme may have changed in caml_process_pending_signals_exn */ + s = atomic_load(&caml_locking_scheme); + s->yield(s->context); + if (atomic_load(&caml_locking_scheme) != s) { + /* The lock we have is no longer the runtime lock */ + s->unlock(s->context); + acquire_runtime_lock(); + } + caml_thread_restore_runtime_state(); + caml_raise_async_if_exception(caml_process_pending_signals_exn(), + "signal handler"); + + return Val_unit; +} + +/* Suspend the current thread until another thread terminates */ + +CAMLprim value caml_thread_join(value th) /* ML */ +{ + st_retcode rc = caml_threadstatus_wait(Terminated(th)); + st_check_error(rc, "Thread.join"); + return Val_unit; +} + +/* Mutex operations */ + +#define Mutex_val(v) (* ((st_mutex *) Data_custom_val(v))) + +static void caml_mutex_finalize(value wrapper) +{ + st_mutex_destroy(Mutex_val(wrapper)); +} + +static int caml_mutex_compare(value wrapper1, value wrapper2) +{ + st_mutex mut1 = Mutex_val(wrapper1); + st_mutex mut2 = Mutex_val(wrapper2); + return mut1 == mut2 ? 0 : mut1 < mut2 ? -1 : 1; +} + +static intnat caml_mutex_hash(value wrapper) +{ + return (intnat) (Mutex_val(wrapper)); +} + +static struct custom_operations caml_mutex_ops = { + "_mutex", + caml_mutex_finalize, + caml_mutex_compare, + caml_mutex_hash, + custom_serialize_default, + custom_deserialize_default, + custom_compare_ext_default, + custom_fixed_length_default +}; + +CAMLprim value caml_mutex_new(value unit) /* ML */ +{ + st_mutex mut = NULL; /* suppress warning */ + value wrapper; + st_check_error(st_mutex_create(&mut), "Mutex.create"); + wrapper = caml_alloc_custom(&caml_mutex_ops, sizeof(st_mutex *), + 0, 1); + Mutex_val(wrapper) = mut; + return wrapper; +} + +CAMLprim value caml_mutex_lock(value wrapper) /* ML */ +{ + st_mutex mut = Mutex_val(wrapper); + st_retcode retcode; + + /* PR#4351: first try to acquire mutex without releasing the master lock */ + if (st_mutex_trylock(mut) == MUTEX_PREVIOUSLY_UNLOCKED) return Val_unit; + /* If unsuccessful, block on mutex */ + Begin_root(wrapper) /* prevent the deallocation of mutex */ + caml_enter_blocking_section(); + retcode = st_mutex_lock(mut); + caml_leave_blocking_section(); + End_roots(); + st_check_error(retcode, "Mutex.lock"); + return Val_unit; +} + +CAMLprim value caml_mutex_unlock(value wrapper) /* ML */ +{ + st_mutex mut = Mutex_val(wrapper); + st_retcode retcode; + /* PR#4351: no need to release and reacquire master lock */ + retcode = st_mutex_unlock(mut); + st_check_error(retcode, "Mutex.unlock"); + return Val_unit; +} + +CAMLprim value caml_mutex_try_lock(value wrapper) /* ML */ +{ + st_mutex mut = Mutex_val(wrapper); + st_retcode retcode; + retcode = st_mutex_trylock(mut); + if (retcode == MUTEX_ALREADY_LOCKED) return Val_false; + st_check_error(retcode, "Mutex.try_lock"); + return Val_true; +} + +/* Conditions operations */ + +#define Condition_val(v) (* (st_condvar *) Data_custom_val(v)) + +static void caml_condition_finalize(value wrapper) +{ + st_condvar_destroy(Condition_val(wrapper)); +} + +static int caml_condition_compare(value wrapper1, value wrapper2) +{ + st_condvar cond1 = Condition_val(wrapper1); + st_condvar cond2 = Condition_val(wrapper2); + return cond1 == cond2 ? 0 : cond1 < cond2 ? -1 : 1; +} + +static intnat caml_condition_hash(value wrapper) +{ + return (intnat) (Condition_val(wrapper)); +} + +static struct custom_operations caml_condition_ops = { + "_condition", + caml_condition_finalize, + caml_condition_compare, + caml_condition_hash, + custom_serialize_default, + custom_deserialize_default, + custom_compare_ext_default, + custom_fixed_length_default +}; + +CAMLprim value caml_condition_new(value unit) /* ML */ +{ + st_condvar cond = NULL; /* suppress warning */ + value wrapper; + st_check_error(st_condvar_create(&cond), "Condition.create"); + wrapper = caml_alloc_custom(&caml_condition_ops, sizeof(st_condvar *), + 0, 1); + Condition_val(wrapper) = cond; + return wrapper; +} + +CAMLprim value caml_condition_wait(value wcond, value wmut) /* ML */ +{ + st_condvar cond = Condition_val(wcond); + st_mutex mut = Mutex_val(wmut); + st_retcode retcode; + + Begin_roots2(wcond, wmut) /* prevent deallocation of cond and mutex */ + caml_enter_blocking_section(); + retcode = st_condvar_wait(cond, mut); + caml_leave_blocking_section(); + End_roots(); + st_check_error(retcode, "Condition.wait"); + return Val_unit; +} + +CAMLprim value caml_condition_signal(value wrapper) /* ML */ +{ + st_check_error(st_condvar_signal(Condition_val(wrapper)), + "Condition.signal"); + return Val_unit; +} + +CAMLprim value caml_condition_broadcast(value wrapper) /* ML */ +{ + st_check_error(st_condvar_broadcast(Condition_val(wrapper)), + "Condition.broadcast"); + return Val_unit; +} + +/* Thread status blocks */ + +#define Threadstatus_val(v) (* ((st_event *) Data_custom_val(v))) + +static void caml_threadstatus_finalize(value wrapper) +{ + st_event_destroy(Threadstatus_val(wrapper)); +} + +static int caml_threadstatus_compare(value wrapper1, value wrapper2) +{ + st_event ts1 = Threadstatus_val(wrapper1); + st_event ts2 = Threadstatus_val(wrapper2); + return ts1 == ts2 ? 0 : ts1 < ts2 ? -1 : 1; +} + +static struct custom_operations caml_threadstatus_ops = { + "_threadstatus", + caml_threadstatus_finalize, + caml_threadstatus_compare, + custom_hash_default, + custom_serialize_default, + custom_deserialize_default, + custom_compare_ext_default, + custom_fixed_length_default +}; + +static value caml_threadstatus_new (void) +{ + st_event ts = NULL; /* suppress warning */ + value wrapper; + st_check_error(st_event_create(&ts), "Thread.create"); + wrapper = caml_alloc_custom(&caml_threadstatus_ops, sizeof(st_event *), + 0, 1); + Threadstatus_val(wrapper) = ts; + return wrapper; +} + +static void caml_threadstatus_terminate (value wrapper) +{ + st_event_trigger(Threadstatus_val(wrapper)); +} + +static st_retcode caml_threadstatus_wait (value wrapper) +{ + st_event ts = Threadstatus_val(wrapper); + st_retcode retcode; + + Begin_roots1(wrapper) /* prevent deallocation of ts */ + caml_enter_blocking_section(); + retcode = st_event_wait(ts); + caml_leave_blocking_section(); + End_roots(); + return retcode; +} diff --git a/ocaml/otherlibs/systhreads4/st_win32.h b/ocaml/otherlibs/systhreads4/st_win32.h new file mode 100644 index 00000000000..3f598a715de --- /dev/null +++ b/ocaml/otherlibs/systhreads4/st_win32.h @@ -0,0 +1,538 @@ +/**************************************************************************/ +/* */ +/* OCaml */ +/* */ +/* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */ +/* */ +/* Copyright 2009 Institut National de Recherche en Informatique et */ +/* en Automatique. */ +/* */ +/* All rights reserved. This file is distributed under the terms of */ +/* the GNU Lesser General Public License version 2.1, with the */ +/* special exception on linking described in the file LICENSE. */ +/* */ +/**************************************************************************/ + +/* Win32 implementation of the "st" interface */ + +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0400 +#include +#include +#include +#include + +#include + +#if 1 +#define TRACE(x) +#define TRACE1(x,y) +#else +#include +#define TRACE(x) printf("%d: %s\n", GetCurrentThreadId(), x); fflush(stdout) +#define TRACE1(x,y) printf("%d: %s %p\n", GetCurrentThreadId(), x, (void *)y); \ + fflush(stdout) +#endif + +typedef DWORD st_retcode; + +#define SIGPREEMPTION SIGTERM + +/* Unique thread identifiers and atomic operations over them */ +#ifdef ARCH_SIXTYFOUR +typedef LONG64 st_tid; +#define Tid_Atomic_Exchange InterlockedExchange64 +#define Tid_Atomic_Compare_Exchange InterlockedCompareExchange64 +#else +typedef LONG st_tid; +#define Tid_Atomic_Exchange InterlockedExchange +#define Tid_Atomic_Compare_Exchange InterlockedCompareExchange +#endif + +/* Thread-local storage associating a Win32 event to every thread. */ +static DWORD st_thread_sem_key; + +/* Thread-local storage for the OCaml thread ID. */ +static DWORD st_thread_id_key; + +/* OS-specific initialization */ + +static DWORD st_initialize(void) +{ + DWORD result = 0; + st_thread_sem_key = TlsAlloc(); + if (st_thread_sem_key == TLS_OUT_OF_INDEXES) + return GetLastError(); + st_thread_id_key = TlsAlloc(); + if (st_thread_id_key == TLS_OUT_OF_INDEXES) { + result = GetLastError(); + TlsFree(st_thread_sem_key); + } + return result; +} + +/* Thread creation. Created in detached mode if [res] is NULL. */ + +typedef HANDLE st_thread_id; + +static DWORD st_thread_create(st_thread_id * res, + LPTHREAD_START_ROUTINE fn, void * arg) +{ + HANDLE h = CreateThread(NULL, 0, fn, arg, 0, NULL); + TRACE1("st_thread_create", h); + if (h == NULL) return GetLastError(); + if (res == NULL) + CloseHandle(h); + else + *res = h; + return 0; +} + +#define ST_THREAD_FUNCTION DWORD WINAPI + +/* Cleanup at thread exit */ + +static void st_thread_cleanup(void) +{ + HANDLE ev = (HANDLE) TlsGetValue(st_thread_sem_key); + if (ev != NULL) CloseHandle(ev); +} + +/* Thread termination */ + +CAMLnoreturn_start +static void st_thread_exit(void) +CAMLnoreturn_end; + +static void st_thread_exit(void) +{ + TRACE("st_thread_exit"); + ExitThread(0); +} + +static void st_thread_join(st_thread_id thr) +{ + TRACE1("st_thread_join", h); + WaitForSingleObject(thr, INFINITE); +} + +/* Thread-specific state */ + +typedef DWORD st_tlskey; + +static DWORD st_tls_newkey(st_tlskey * res) +{ + *res = TlsAlloc(); + if (*res == TLS_OUT_OF_INDEXES) + return GetLastError(); + else + return 0; +} + +Caml_inline void * st_tls_get(st_tlskey k) +{ + return TlsGetValue(k); +} + +Caml_inline void st_tls_set(st_tlskey k, void * v) +{ + TlsSetValue(k, v); +} + +/* OS-specific handling of the OCaml thread ID (must be called with the runtime + lock). */ +Caml_inline void st_thread_set_id(intnat id) +{ + CAMLassert(id != 0); + st_tls_set(st_thread_id_key, (void *)id); +} + +/* Return the identifier for the current thread. The 0 value is reserved. */ +Caml_inline intnat st_current_thread_id(void) +{ + intnat id = (intnat)st_tls_get(st_thread_id_key); + CAMLassert(id != 0); + return id; +} + +/* The master lock. */ + +typedef CRITICAL_SECTION st_masterlock; + +static void st_masterlock_init(st_masterlock * m) +{ + TRACE("st_masterlock_init"); + InitializeCriticalSection(m); + EnterCriticalSection(m); +} + +Caml_inline void st_masterlock_acquire(st_masterlock * m) +{ + TRACE("st_masterlock_acquire"); + EnterCriticalSection(m); + TRACE("st_masterlock_acquire (done)"); +} + +Caml_inline void st_masterlock_release(st_masterlock * m) +{ + LeaveCriticalSection(m); + TRACE("st_masterlock_released"); +} + +Caml_inline int st_masterlock_waiters(st_masterlock * m) +{ + return 1; /* info not maintained */ +} + +/* Scheduling hints */ + +Caml_inline void st_thread_yield(st_masterlock * m) +{ + LeaveCriticalSection(m); + Sleep(0); + EnterCriticalSection(m); +} + +/* Mutexes */ + +struct st_mutex_ { + CRITICAL_SECTION crit; + volatile st_tid owner; /* 0 if unlocked */ + /* The "owner" field is not always protected by "crit"; it is also + accessed without holding "crit", using the Interlocked API for + atomic accesses */ +}; + +typedef struct st_mutex_ * st_mutex; + +static DWORD st_mutex_create(st_mutex * res) +{ + st_mutex m = caml_stat_alloc_noexc(sizeof(struct st_mutex_)); + if (m == NULL) return ERROR_NOT_ENOUGH_MEMORY; + InitializeCriticalSection(&m->crit); + m->owner = 0; + *res = m; + return 0; +} + +static DWORD st_mutex_destroy(st_mutex m) +{ + DeleteCriticalSection(&m->crit); + caml_stat_free(m); + return 0; +} + +/* Error codes with the 29th bit set are reserved for the application */ + +#define MUTEX_DEADLOCK (1<<29 | 1) +#define MUTEX_PREVIOUSLY_UNLOCKED 0 +#define MUTEX_ALREADY_LOCKED (1 << 29) +#define MUTEX_NOT_OWNED (1<<29 | 2) + +Caml_inline DWORD st_mutex_lock(st_mutex m) +{ + st_tid self, prev; + TRACE1("st_mutex_lock", m); + self = st_current_thread_id(); + /* Critical sections are recursive locks, so this will succeed + if we already own the lock */ + EnterCriticalSection(&m->crit); + /* Record that we are the owner of the lock */ + prev = Tid_Atomic_Exchange(&m->owner, self); + if (prev != 0) { + /* The mutex was already locked by ourselves. + Cancel the EnterCriticalSection above and return an error. */ + TRACE1("st_mutex_lock (deadlock)", m); + LeaveCriticalSection(&m->crit); + return MUTEX_DEADLOCK; + } + TRACE1("st_mutex_lock (done)", m); + return 0; +} + +Caml_inline DWORD st_mutex_trylock(st_mutex m) +{ + st_tid self, prev; + TRACE1("st_mutex_trylock", m); + self = st_current_thread_id(); + if (! TryEnterCriticalSection(&m->crit)) { + TRACE1("st_mutex_trylock (failure)", m); + return MUTEX_ALREADY_LOCKED; + } + /* Record that we are the owner of the lock */ + prev = Tid_Atomic_Exchange(&m->owner, self); + if (prev != 0) { + /* The mutex was already locked by ourselves. + Cancel the EnterCriticalSection above and return "already locked". */ + TRACE1("st_mutex_trylock (already locked by self)", m); + LeaveCriticalSection(&m->crit); + return MUTEX_ALREADY_LOCKED; + } + TRACE1("st_mutex_trylock (done)", m); + return MUTEX_PREVIOUSLY_UNLOCKED; +} + +Caml_inline DWORD st_mutex_unlock(st_mutex m) +{ + st_tid self, prev; + /* If the calling thread holds the lock, m->owner is stable and equal + to st_current_thread_id(). + Otherwise, the value of m->owner can be 0 (if the mutex is unlocked) + or some other thread ID (if the mutex is held by another thread), + but is never equal to st_current_thread_id(). */ + self = st_current_thread_id(); + prev = Tid_Atomic_Compare_Exchange(&m->owner, 0, self); + if (prev != self) { + /* The value of m->owner is unchanged */ + TRACE1("st_mutex_unlock (error)", m); + return MUTEX_NOT_OWNED; + } + TRACE1("st_mutex_unlock", m); + LeaveCriticalSection(&m->crit); + return 0; +} + +/* Condition variables */ + +/* A condition variable is just a list of threads currently + waiting on this c.v. Each thread is represented by its + associated event. */ + +struct st_wait_list { + HANDLE event; /* event of the first waiting thread */ + struct st_wait_list * next; +}; + +typedef struct st_condvar_struct { + CRITICAL_SECTION lock; /* protect the data structure */ + struct st_wait_list * waiters; /* list of threads waiting */ +} * st_condvar; + +static DWORD st_condvar_create(st_condvar * res) +{ + st_condvar c = caml_stat_alloc_noexc(sizeof(struct st_condvar_struct)); + if (c == NULL) return ERROR_NOT_ENOUGH_MEMORY; + InitializeCriticalSection(&c->lock); + c->waiters = NULL; + *res = c; + return 0; +} + +static DWORD st_condvar_destroy(st_condvar c) +{ + TRACE1("st_condvar_destroy", c); + DeleteCriticalSection(&c->lock); + caml_stat_free(c); + return 0; +} + +static DWORD st_condvar_signal(st_condvar c) +{ + DWORD rc = 0; + struct st_wait_list * curr, * next; + + TRACE1("st_condvar_signal", c); + EnterCriticalSection(&c->lock); + curr = c->waiters; + if (curr != NULL) { + next = curr->next; + /* Wake up the first waiting thread */ + TRACE1("st_condvar_signal: waking up", curr->event); + if (! SetEvent(curr->event)) rc = GetLastError(); + /* Remove it from the waiting list */ + c->waiters = next; + } + LeaveCriticalSection(&c->lock); + return rc; +} + +static DWORD st_condvar_broadcast(st_condvar c) +{ + DWORD rc = 0; + struct st_wait_list * curr, * next; + + TRACE1("st_condvar_broadcast", c); + EnterCriticalSection(&c->lock); + /* Wake up all waiting threads */ + curr = c->waiters; + while (curr != NULL) { + next = curr->next; + TRACE1("st_condvar_signal: waking up", curr->event); + if (! SetEvent(curr->event)) rc = GetLastError(); + curr = next; + } + /* Remove them all from the waiting list */ + c->waiters = NULL; + LeaveCriticalSection(&c->lock); + return rc; +} + +static DWORD st_condvar_wait(st_condvar c, st_mutex m) +{ + HANDLE ev; + struct st_wait_list wait; + DWORD rc; + st_tid self, prev; + + TRACE1("st_condvar_wait", c); + /* Recover (or create) the event associated with the calling thread */ + ev = (HANDLE) TlsGetValue(st_thread_sem_key); + if (ev == 0) { + ev = CreateEvent(NULL, + FALSE /*auto reset*/, + FALSE /*initially unset*/, + NULL); + if (ev == NULL) return GetLastError(); + TlsSetValue(st_thread_sem_key, (void *) ev); + } + /* Get ready to release the mutex */ + self = st_current_thread_id(); + prev = Tid_Atomic_Compare_Exchange(&m->owner, 0, self); + if (prev != self) { + /* The value of m->owner is unchanged */ + TRACE1("st_condvar_wait: error: mutex not held", m); + return MUTEX_NOT_OWNED; + } + /* Insert the current thread in the waiting list (atomically) */ + EnterCriticalSection(&c->lock); + wait.event = ev; + wait.next = c->waiters; + c->waiters = &wait; + LeaveCriticalSection(&c->lock); + /* Finish releasing the mutex m (like st_mutex_unlock does, minus + the error checking, which we've already done above). */ + LeaveCriticalSection(&m->crit); + /* Wait for our event to be signaled. There is no risk of lost + wakeup, since we inserted ourselves on the waiting list of c + before releasing m */ + TRACE1("st_condvar_wait: blocking on event", ev); + if (WaitForSingleObject(ev, INFINITE) == WAIT_FAILED) + return GetLastError(); + /* Reacquire the mutex m */ + TRACE1("st_condvar_wait: restarted, acquiring mutex", c); + rc = st_mutex_lock(m); + if (rc != 0) return rc; + TRACE1("st_condvar_wait: acquired mutex", c); + return 0; +} + +/* Triggered events */ + +typedef HANDLE st_event; + +static DWORD st_event_create(st_event * res) +{ + st_event m = + CreateEvent(NULL, TRUE/*manual reset*/, FALSE/*initially unset*/, NULL); + TRACE1("st_event_create", m); + if (m == NULL) return GetLastError(); + *res = m; + return 0; +} + +static DWORD st_event_destroy(st_event e) +{ + TRACE1("st_event_destroy", e); + if (CloseHandle(e)) + return 0; + else + return GetLastError(); +} + +static DWORD st_event_trigger(st_event e) +{ + TRACE1("st_event_trigger", e); + if (SetEvent(e)) + return 0; + else + return GetLastError(); +} + +static DWORD st_event_wait(st_event e) +{ + TRACE1("st_event_wait", e); + if (WaitForSingleObject(e, INFINITE) == WAIT_FAILED) + return GetLastError(); + else + return 0; +} + +/* Reporting errors */ + +static void st_check_error(DWORD retcode, char * msg) +{ + wchar_t err[1024]; + int errlen, msglen, ret; + value str; + + if (retcode == 0) return; + if (retcode == ERROR_NOT_ENOUGH_MEMORY) caml_raise_out_of_memory(); + switch (retcode) { + case MUTEX_DEADLOCK: + ret = swprintf(err, sizeof(err)/sizeof(wchar_t), + L"Mutex is already locked by calling thread"); + break; + case MUTEX_NOT_OWNED: + ret = swprintf(err, sizeof(err)/sizeof(wchar_t), + L"Mutex is not locked by calling thread"); + break; + default: + ret = FormatMessage( + FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, + retcode, + 0, + err, + sizeof(err)/sizeof(wchar_t), + NULL); + if (! ret) { + ret = + swprintf(err, sizeof(err)/sizeof(wchar_t), L"error code %lx", retcode); + } + } + msglen = strlen(msg); + errlen = win_wide_char_to_multi_byte(err, ret, NULL, 0); + str = caml_alloc_string(msglen + 2 + errlen); + memmove (&Byte(str, 0), msg, msglen); + memmove (&Byte(str, msglen), ": ", 2); + win_wide_char_to_multi_byte(err, ret, &Byte(str, msglen + 2), errlen); + caml_raise_sys_error(str); +} + +/* Variable used to stop the "tick" thread */ +static volatile int caml_tick_thread_stop = 0; + +/* The tick thread: posts a SIGPREEMPTION signal periodically */ + +static DWORD WINAPI caml_thread_tick(void * arg) +{ + while(! caml_tick_thread_stop) { + Sleep(Thread_timeout); + /* The preemption signal should never cause a callback, so don't + go through caml_handle_signal(), just record signal delivery via + caml_record_signal(). */ + caml_record_signal(SIGPREEMPTION); + } + return 0; +} + +/* "At fork" processing -- none under Win32 */ + +static DWORD st_atfork(void (*fn)(void)) +{ + return 0; +} + +/* Signal handling -- none under Win32 */ + +value caml_thread_sigmask(value cmd, value sigs) /* ML */ +{ + caml_invalid_argument("Thread.sigmask not implemented"); + return Val_int(0); /* not reached */ +} + +value caml_wait_signal(value sigs) /* ML */ +{ + caml_invalid_argument("Thread.wait_signal not implemented"); + return Val_int(0); /* not reached */ +} diff --git a/ocaml/otherlibs/systhreads4/thread.ml b/ocaml/otherlibs/systhreads4/thread.ml new file mode 100644 index 00000000000..44e3f962bba --- /dev/null +++ b/ocaml/otherlibs/systhreads4/thread.ml @@ -0,0 +1,124 @@ +# 1 "thread.ml" +(**************************************************************************) +(* *) +(* OCaml *) +(* *) +(* Xavier Leroy and Pascal Cuoq, projet Cristal, INRIA Rocquencourt *) +(* *) +(* Copyright 1996 Institut National de Recherche en Informatique et *) +(* en Automatique. *) +(* *) +(* All rights reserved. This file is distributed under the terms of *) +(* the GNU Lesser General Public License version 2.1, with the *) +(* special exception on linking described in the file LICENSE. *) +(* *) +(**************************************************************************) + +(* User-level threads *) + +[@@@ocaml.flambda_o3] + +type t + +external thread_initialize : unit -> unit = "caml_thread_initialize" +external thread_cleanup : unit -> unit = "caml_thread_cleanup" +external thread_new : (unit -> unit) -> t = "caml_thread_new" +external thread_uncaught_exception : exn -> unit = + "caml_thread_uncaught_exception" + +external yield : unit -> unit = "caml_thread_yield" +external self : unit -> t = "caml_thread_self" [@@noalloc] +external id : t -> int = "caml_thread_id" [@@noalloc] +external join : t -> unit = "caml_thread_join" +external exit_stub : unit -> unit = "caml_thread_exit" + +(* For new, make sure the function passed to thread_new never + raises an exception. *) + +let[@inline never] check_memprof_cb () = ref () + +let default_uncaught_exception_handler = thread_uncaught_exception + +let uncaught_exception_handler = ref default_uncaught_exception_handler + +let set_uncaught_exception_handler fn = uncaught_exception_handler := fn + +exception Exit + +let create fn arg = + thread_new + (fun () -> + try + fn arg; + ignore (Sys.opaque_identity (check_memprof_cb ())) + with + | Exit -> + ignore (Sys.opaque_identity (check_memprof_cb ())) + | exn -> + let raw_backtrace = Printexc.get_raw_backtrace () in + flush stdout; flush stderr; + try + !uncaught_exception_handler exn + with + | Exit -> () + | exn' -> + Printf.eprintf + "Thread %d killed on uncaught exception %s\n" + (id (self ())) (Printexc.to_string exn); + Printexc.print_raw_backtrace stderr raw_backtrace; + Printf.eprintf + "Thread %d uncaught exception handler raised %s\n" + (id (self ())) (Printexc.to_string exn'); + Printexc.print_backtrace stdout; + flush stderr) + +let exit () = + ignore (Sys.opaque_identity (check_memprof_cb ())); + exit_stub () + +(* Thread.kill is currently not implemented due to problems with + cleanup handlers on several platforms *) + +let kill th = invalid_arg "Thread.kill: not implemented" + +(* Preemption *) + +let preempt signal = yield() + +(* Initialization of the scheduler *) + +let preempt_signal = + match Sys.os_type with + | "Win32" -> Sys.sigterm + | _ -> Sys.sigvtalrm + +let () = + Sys.set_signal preempt_signal (Sys.Signal_handle preempt); + thread_initialize (); + Callback.register "Thread.at_shutdown" (fun () -> + thread_cleanup(); + (* In case of DLL-embedded OCaml the preempt_signal handler + will point to nowhere after DLL unloading and an accidental + preempt_signal will crash the main program. So restore the + default handler. *) + Sys.set_signal preempt_signal Sys.Signal_default + ) + +(* Wait functions *) + +let delay = Unix.sleepf + +let wait_read fd = () +let wait_write fd = () + +let wait_timed_read fd d = + match Unix.select [fd] [] [] d with ([], _, _) -> false | (_, _, _) -> true +let wait_timed_write fd d = + match Unix.select [] [fd] [] d with (_, [], _) -> false | (_, _, _) -> true +let select = Unix.select + +let wait_pid p = Unix.waitpid [] p + +external sigmask : Unix.sigprocmask_command -> int list -> int list + = "caml_thread_sigmask" +external wait_signal : int list -> int = "caml_wait_signal" diff --git a/ocaml/otherlibs/systhreads4/thread.mli b/ocaml/otherlibs/systhreads4/thread.mli new file mode 100644 index 00000000000..8f9013dd796 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/thread.mli @@ -0,0 +1,172 @@ +(**************************************************************************) +(* *) +(* OCaml *) +(* *) +(* Xavier Leroy and Pascal Cuoq, projet Cristal, INRIA Rocquencourt *) +(* *) +(* Copyright 1995 Institut National de Recherche en Informatique et *) +(* en Automatique. *) +(* *) +(* All rights reserved. This file is distributed under the terms of *) +(* the GNU Lesser General Public License version 2.1, with the *) +(* special exception on linking described in the file LICENSE. *) +(* *) +(**************************************************************************) + +(** Lightweight threads for Posix [1003.1c] and Win32. *) + +type t +(** The type of thread handles. *) + +(** {1 Thread creation and termination} *) + +val create : ('a -> 'b) -> 'a -> t +(** [Thread.create funct arg] creates a new thread of control, + in which the function application [funct arg] + is executed concurrently with the other threads of the program. + The application of [Thread.create] + returns the handle of the newly created thread. + The new thread terminates when the application [funct arg] + returns, either normally or by raising the {!Thread.Exit} exception + or by raising any other uncaught exception. + In the last case, the uncaught exception is printed on standard error, + but not propagated back to the parent thread. Similarly, the + result of the application [funct arg] is discarded and not + directly accessible to the parent thread. *) + +val self : unit -> t +(** Return the handle for the thread currently executing. *) + +val id : t -> int +(** Return the identifier of the given thread. A thread identifier + is an integer that identifies uniquely the thread. + It can be used to build data structures indexed by threads. *) + +exception Exit +(** Exception that can be raised by user code to initiate termination + of the current thread. + Compared to calling the {!Thread.exit} function, raising the + {!Thread.Exit} exception will trigger {!Fun.finally} finalizers + and catch-all exception handlers. + It is the recommended way to terminate threads prematurely. + + @since 4.14.0 +*) + +val exit : unit -> unit +(** Terminate prematurely the currently executing thread. *) + +val kill : t -> unit + [@@ocaml.deprecated "Not implemented, do not use"] +(** This function was supposed to terminate prematurely the thread + whose handle is given. It is not currently implemented due to + problems with cleanup handlers on many POSIX 1003.1c implementations. + It always raises the [Invalid_argument] exception. *) + +(** {1 Suspending threads} *) + +val delay: float -> unit +(** [delay d] suspends the execution of the calling thread for + [d] seconds. The other program threads continue to run during + this time. *) + +val join : t -> unit +(** [join th] suspends the execution of the calling thread + until the thread [th] has terminated. *) + +val yield : unit -> unit +(** Re-schedule the calling thread without suspending it. + This function can be used to give scheduling hints, + telling the scheduler that now is a good time to + switch to other threads. *) + +(** {1 Waiting for file descriptors or processes} *) + +(** The functions below are leftovers from an earlier, VM-based threading + system. The {!Unix} module provides equivalent functionality, in + a more general and more standard-conformant manner. It is recommended + to use {!Unix} functions directly. *) + +val wait_read : Unix.file_descr -> unit + [@@ocaml.deprecated "This function no longer does anything"] +(** This function does nothing in the current implementation of the threading + library and can be removed from all user programs. *) + +val wait_write : Unix.file_descr -> unit + [@@ocaml.deprecated "This function no longer does anything"] +(** This function does nothing in the current implementation of the threading + library and can be removed from all user programs. *) + +val wait_timed_read : Unix.file_descr -> float -> bool +(** See {!Thread.wait_timed_write}.*) + +val wait_timed_write : Unix.file_descr -> float -> bool +(** Suspend the execution of the calling thread until at least + one character or EOF is available for reading ([wait_timed_read]) or + one character can be written without blocking ([wait_timed_write]) + on the given Unix file descriptor. Wait for at most + the amount of time given as second argument (in seconds). + Return [true] if the file descriptor is ready for input/output + and [false] if the timeout expired. + The same functionality can be achieved with {!Unix.select}. +*) + +val select : + Unix.file_descr list -> Unix.file_descr list -> + Unix.file_descr list -> float -> + Unix.file_descr list * Unix.file_descr list * Unix.file_descr list +(** Same function as {!Unix.select}. + Suspend the execution of the calling thread until input/output + becomes possible on the given Unix file descriptors. + The arguments and results have the same meaning as for + {!Unix.select}. *) + +val wait_pid : int -> int * Unix.process_status +(** Same function as {!Unix.waitpid}. + [wait_pid p] suspends the execution of the calling thread + until the process specified by the process identifier [p] + terminates. Returns the pid of the child caught and + its termination status, as per {!Unix.wait}. *) + +(** {1 Management of signals} *) + +(** Signal handling follows the POSIX thread model: signals generated + by a thread are delivered to that thread; signals generated externally + are delivered to one of the threads that does not block it. + Each thread possesses a set of blocked signals, which can be modified + using {!Thread.sigmask}. This set is inherited at thread creation time. + Per-thread signal masks are supported only by the system thread library + under Unix, but not under Win32, nor by the VM thread library. *) + +val sigmask : Unix.sigprocmask_command -> int list -> int list +(** [sigmask cmd sigs] changes the set of blocked signals for the + calling thread. + If [cmd] is [SIG_SETMASK], blocked signals are set to those in + the list [sigs]. + If [cmd] is [SIG_BLOCK], the signals in [sigs] are added to + the set of blocked signals. + If [cmd] is [SIG_UNBLOCK], the signals in [sigs] are removed + from the set of blocked signals. + [sigmask] returns the set of previously blocked signals for the thread. *) + + +val wait_signal : int list -> int +(** [wait_signal sigs] suspends the execution of the calling thread + until the process receives one of the signals specified in the + list [sigs]. It then returns the number of the signal received. + Signal handlers attached to the signals in [sigs] will not + be invoked. The signals [sigs] are expected to be blocked before + calling [wait_signal]. *) + +(** {1 Uncaught exceptions} *) + +val default_uncaught_exception_handler : exn -> unit +(** [Thread.default_uncaught_exception_handler] will print the thread's id, + exception and backtrace (if available). *) + +val set_uncaught_exception_handler : (exn -> unit) -> unit +(** [Thread.set_uncaught_exception_handler fn] registers [fn] as the handler + for uncaught exceptions. + + If the newly set uncaught exception handler raise an exception, + {!default_uncaught_exception_handler} will be called. *) diff --git a/ocaml/otherlibs/systhreads4/threads.h b/ocaml/otherlibs/systhreads4/threads.h new file mode 100644 index 00000000000..df6f7aa0e83 --- /dev/null +++ b/ocaml/otherlibs/systhreads4/threads.h @@ -0,0 +1,115 @@ +/**************************************************************************/ +/* */ +/* OCaml */ +/* */ +/* Xavier Leroy and Damien Doligez, INRIA Rocquencourt */ +/* */ +/* Copyright 1995 Institut National de Recherche en Informatique et */ +/* en Automatique. */ +/* */ +/* All rights reserved. This file is distributed under the terms of */ +/* the GNU Lesser General Public License version 2.1, with the */ +/* special exception on linking described in the file LICENSE. */ +/* */ +/**************************************************************************/ + +#ifndef CAML_THREADS_H +#define CAML_THREADS_H + +#ifdef __cplusplus +extern "C" { +#endif + +CAMLextern void caml_enter_blocking_section (void); +CAMLextern void caml_leave_blocking_section (void); +#define caml_acquire_runtime_system caml_leave_blocking_section +#define caml_release_runtime_system caml_enter_blocking_section + +/* Manage the master lock around the OCaml run-time system. + Only one thread at a time can execute OCaml compiled code or + OCaml run-time system functions. + + When OCaml calls a C function, the current thread holds the master + lock. The C function can release it by calling + [caml_release_runtime_system]. Then, another thread can execute OCaml + code. However, the calling thread must not access any OCaml data, + nor call any runtime system function, nor call back into OCaml. + + Before returning to its OCaml caller, or accessing OCaml data, + or call runtime system functions, the current thread must + re-acquire the master lock by calling [caml_acquire_runtime_system]. + + Symmetrically, if a C function (not called from OCaml) wishes to + call back into OCaml code, it should invoke [caml_acquire_runtime_system] + first, then do the callback, then invoke [caml_release_runtime_system]. + + For historical reasons, alternate names can be used: + [caml_enter_blocking_section] instead of [caml_release_runtime_system] + [caml_leave_blocking_section] instead of [caml_acquire_runtime_system] + Intuition: a ``blocking section'' is a piece of C code that does not + use the runtime system (typically, a blocking I/O operation). +*/ + +/* These functions are defined in the threads library, not the runtime */ +#ifndef CAMLextern_libthreads +#define CAMLextern_libthreads CAMLextern +#endif +CAMLextern_libthreads int caml_c_thread_register(void); +CAMLextern_libthreads int caml_c_thread_unregister(void); + +/* If a thread is created by C code (instead of by OCaml itself), + it must be registered with the OCaml runtime system before + being able to call back into OCaml code or use other runtime system + functions. Just call [caml_c_thread_register] once. + Before the thread finishes, it must call [caml_c_thread_unregister]. + Both functions return 1 on success, 0 on error. +*/ + +enum caml_thread_type { Thread_type_caml, Thread_type_c_registered }; +struct caml_locking_scheme { + void* context; + void (*lock)(void*); + void (*unlock)(void*); + + /* If non-NULL, these functions are called when threads start and stop. + For threads created by OCaml, that's at creation and termination. + For threads created by C, that's at caml_c_thread_register/unregister. + The lock is not held when these functions are called. */ + void (*thread_start)(void*, enum caml_thread_type); + void (*thread_stop)(void*, enum caml_thread_type); + + /* Called after fork(). + The lock should be held after this function returns. */ + void (*reinitialize_after_fork)(void*); + + /* can_skip_yield and yield are both called with the lock held, + and expect it held on return */ + int (*can_skip_yield)(void*); + void (*yield)(void*); +}; + +extern struct caml_locking_scheme caml_default_locking_scheme; + +/* Switch to a new runtime locking scheme. + + The old runtime lock must be held (i.e. not in a blocking section), + and the new runtime lock must not be held. After this function + returns, the old lock is released and the new one is held. + + There is a period during this function when neither lock is held, + so context-switches may occur. */ +CAMLextern_libthreads +void caml_switch_runtime_locking_scheme(struct caml_locking_scheme*); + +CAMLextern_libthreads +void caml_thread_save_runtime_state(void); + +CAMLextern_libthreads +void caml_thread_restore_runtime_state(void); + + +#ifdef __cplusplus +} +#endif + +#endif /* CAML_THREADS_H */