37
37
#include "caml/sys.h"
38
38
#include "caml/memprof.h"
39
39
40
- /* threads.h is *not* included since it contains the _external_ declarations for
41
- the caml_c_thread_register and caml_c_thread_unregister functions. */
40
+ #define CAMLextern_libthreads
41
+ #include "threads.h"
42
42
43
43
#ifndef NATIVE_CODE
44
44
/* Initial size of bytecode stack when a thread is created (4 Ko) */
55
55
#include "st_posix.h"
56
56
#endif
57
57
58
+ /* Atomics */
59
+ #if defined(__GNUC__ ) && __GNUC__ == 4 && __GNUC_MINOR__ == 8
60
+ /* GCC 4.8 shipped with a working implementation of atomics, but no
61
+ stdatomic.h header, so we need to use GCC-specific intrinsics. */
62
+
63
+ #define _Atomic /* GCC intrinsics work on normal variables */
64
+ #define atomic_store (v , x ) \
65
+ __atomic_store_n((v), (x), __ATOMIC_SEQ_CST)
66
+ #define atomic_load (v ) \
67
+ __atomic_load_n((v), __ATOMIC_SEQ_CST)
68
+ #define atomic_exchange (v , x ) \
69
+ __atomic_exchange_n((v), (x), __ATOMIC_SEQ_CST)
70
+ #else
71
+ #include <stdatomic.h>
72
+ #endif
73
+
74
+
58
75
/* The ML value describing a thread (heap-allocated) */
59
76
60
77
struct caml_thread_descr {
@@ -111,7 +128,7 @@ static caml_thread_t all_threads = NULL;
111
128
static caml_thread_t curr_thread = NULL ;
112
129
113
130
/* The master lock protecting the OCaml runtime system */
114
- static st_masterlock caml_master_lock ;
131
+ static struct caml_locking_scheme * _Atomic caml_locking_scheme ;
115
132
116
133
/* Whether the "tick" thread is already running */
117
134
static int caml_tick_thread_running = 0 ;
@@ -143,6 +160,50 @@ extern struct longjmp_buffer caml_termination_jmpbuf;
143
160
extern void (* caml_termination_hook )(void );
144
161
#endif
145
162
163
+ /* The default locking scheme */
164
+ static st_masterlock default_master_lock ;
165
+
166
+ static int default_can_skip_yield (void * m )
167
+ {
168
+ return st_masterlock_waiters (m ) == 0 ;
169
+ }
170
+
171
+ struct caml_locking_scheme caml_default_locking_scheme =
172
+ { & default_master_lock ,
173
+ (void (* )(void * ))& st_masterlock_acquire ,
174
+ (void (* )(void * ))& st_masterlock_release ,
175
+ (void (* )(void * ))& st_masterlock_init ,
176
+ default_can_skip_yield ,
177
+ (void (* )(void * ))& st_thread_yield };
178
+
179
+ static void acquire_runtime_lock ()
180
+ {
181
+ struct caml_locking_scheme * s ;
182
+
183
+ /* The locking scheme may be changed by the thread that currently
184
+ holds it. This means that it may change while we're waiting to
185
+ acquire it, so by the time we acquire it it may no longer be the
186
+ right scheme. */
187
+
188
+ retry :
189
+ s = atomic_load (& caml_locking_scheme );
190
+ s -> lock (s -> context );
191
+ if (atomic_load (& caml_locking_scheme ) != s ) {
192
+ /* This is no longer the right scheme. Unlock and try again */
193
+ s -> unlock (s -> context );
194
+ goto retry ;
195
+ }
196
+ }
197
+
198
+ static void release_runtime_lock ()
199
+ {
200
+ /* There is no tricky case here like in acquire, as only the holder
201
+ of the lock can change it. (Here, that's us) */
202
+ struct caml_locking_scheme * s ;
203
+ s = atomic_load (& caml_locking_scheme );
204
+ s -> unlock (s -> context );
205
+ }
206
+
146
207
/* Hook for scanning the stacks of the other threads */
147
208
148
209
static void (* prev_scan_roots_hook ) (scanning_action );
@@ -182,7 +243,7 @@ static void memprof_ctx_iter(th_ctx_action f, void* data)
182
243
183
244
/* Saving and restoring runtime state in curr_thread */
184
245
185
- Caml_inline void caml_thread_save_runtime_state (void )
246
+ CAMLexport void caml_thread_save_runtime_state (void )
186
247
{
187
248
#ifdef NATIVE_CODE
188
249
curr_thread -> top_of_stack = Caml_state -> top_of_stack ;
@@ -208,8 +269,12 @@ Caml_inline void caml_thread_save_runtime_state(void)
208
269
caml_memprof_leave_thread ();
209
270
}
210
271
211
- Caml_inline void caml_thread_restore_runtime_state (void )
272
+ CAMLexport void caml_thread_restore_runtime_state (void )
212
273
{
274
+ /* Update curr_thread to point to the thread descriptor corresponding
275
+ to the thread currently executing */
276
+ curr_thread = st_tls_get (thread_descriptor_key );
277
+
213
278
#ifdef NATIVE_CODE
214
279
Caml_state -> top_of_stack = curr_thread -> top_of_stack ;
215
280
Caml_state -> bottom_of_stack = curr_thread -> bottom_of_stack ;
@@ -234,6 +299,19 @@ Caml_inline void caml_thread_restore_runtime_state(void)
234
299
caml_memprof_enter_thread (curr_thread -> memprof_ctx );
235
300
}
236
301
302
+ CAMLexport void caml_switch_runtime_locking_scheme (struct caml_locking_scheme * new )
303
+ {
304
+ struct caml_locking_scheme * old ;
305
+
306
+ caml_thread_save_runtime_state ();
307
+ old = atomic_exchange (& caml_locking_scheme , new );
308
+ /* We hold 'old', but it is no longer the runtime lock */
309
+ old -> unlock (old -> context );
310
+ acquire_runtime_lock ();
311
+ caml_thread_restore_runtime_state ();
312
+ }
313
+
314
+
237
315
/* Hooks for caml_enter_blocking_section and caml_leave_blocking_section */
238
316
239
317
@@ -243,7 +321,7 @@ static void caml_thread_enter_blocking_section(void)
243
321
of the current thread */
244
322
caml_thread_save_runtime_state ();
245
323
/* Tell other threads that the runtime is free */
246
- st_masterlock_release ( & caml_master_lock );
324
+ release_runtime_lock ( );
247
325
}
248
326
249
327
static void caml_thread_leave_blocking_section (void )
@@ -255,11 +333,7 @@ static void caml_thread_leave_blocking_section(void)
255
333
DWORD error = GetLastError ();
256
334
#endif
257
335
/* Wait until the runtime is free */
258
- st_masterlock_acquire (& caml_master_lock );
259
- /* Update curr_thread to point to the thread descriptor corresponding
260
- to the thread currently executing */
261
- curr_thread = st_tls_get (thread_descriptor_key );
262
- /* Restore the runtime state from the curr_thread descriptor */
336
+ acquire_runtime_lock ();
263
337
caml_thread_restore_runtime_state ();
264
338
#ifdef _WIN32
265
339
SetLastError (error );
@@ -419,6 +493,7 @@ static void caml_thread_remove_info(caml_thread_t th)
419
493
static void caml_thread_reinitialize (void )
420
494
{
421
495
struct channel * chan ;
496
+ struct caml_locking_scheme * s ;
422
497
423
498
/* Remove all other threads (now nonexistent)
424
499
from the doubly-linked list of threads */
@@ -430,7 +505,8 @@ static void caml_thread_reinitialize(void)
430
505
/* Reinitialize the master lock machinery,
431
506
just in case the fork happened while other threads were doing
432
507
caml_leave_blocking_section */
433
- st_masterlock_init (& caml_master_lock );
508
+ s = atomic_load (& caml_locking_scheme );
509
+ s -> reinitialize_after_fork (s -> context );
434
510
/* Tick thread is not currently running in child process, will be
435
511
re-created at next Thread.create */
436
512
caml_tick_thread_running = 0 ;
@@ -454,7 +530,8 @@ CAMLprim value caml_thread_initialize(value unit) /* ML */
454
530
/* OS-specific initialization */
455
531
st_initialize ();
456
532
/* Initialize and acquire the master lock */
457
- st_masterlock_init (& caml_master_lock );
533
+ st_masterlock_init (& default_master_lock );
534
+ caml_locking_scheme = & caml_default_locking_scheme ;
458
535
/* Initialize the keys */
459
536
st_tls_newkey (& thread_descriptor_key );
460
537
st_tls_newkey (& last_channel_locked_key );
@@ -562,7 +639,7 @@ static void caml_thread_stop(void)
562
639
/* OS-specific cleanups */
563
640
st_thread_cleanup ();
564
641
/* Release the runtime system */
565
- st_masterlock_release ( & caml_master_lock );
642
+ release_runtime_lock ( );
566
643
}
567
644
568
645
/* Create a thread */
@@ -658,7 +735,7 @@ CAMLexport int caml_c_thread_register(void)
658
735
th -> top_of_stack = (char * ) & err ;
659
736
#endif
660
737
/* Take master lock to protect access to the chaining of threads */
661
- st_masterlock_acquire ( & caml_master_lock );
738
+ acquire_runtime_lock ( );
662
739
/* Add thread info block to the list of threads */
663
740
if (all_threads == NULL ) {
664
741
th -> next = th ;
@@ -673,7 +750,7 @@ CAMLexport int caml_c_thread_register(void)
673
750
/* Associate the thread descriptor with the thread */
674
751
st_tls_set (thread_descriptor_key , (void * ) th );
675
752
/* Release the master lock */
676
- st_masterlock_release ( & caml_master_lock );
753
+ release_runtime_lock ( );
677
754
/* Now we can re-enter the run-time system and heap-allocate the descriptor */
678
755
caml_leave_blocking_section ();
679
756
th -> descr = caml_thread_new_descriptor (Val_unit ); /* no closure */
@@ -694,7 +771,7 @@ CAMLexport int caml_c_thread_unregister(void)
694
771
/* Not registered? */
695
772
if (th == NULL ) return 0 ;
696
773
/* Wait until the runtime is available */
697
- st_masterlock_acquire ( & caml_master_lock );
774
+ acquire_runtime_lock ( );
698
775
/* Forget the thread descriptor */
699
776
st_tls_set (thread_descriptor_key , NULL );
700
777
/* Remove thread info block from list of threads, and free it */
@@ -703,7 +780,7 @@ CAMLexport int caml_c_thread_unregister(void)
703
780
so that it does not prevent the whole process from exiting (#9971) */
704
781
if (all_threads == NULL ) caml_thread_cleanup (Val_unit );
705
782
/* Release the runtime */
706
- st_masterlock_release ( & caml_master_lock );
783
+ release_runtime_lock ( );
707
784
return 1 ;
708
785
}
709
786
@@ -771,7 +848,11 @@ CAMLprim value caml_thread_exit(value unit) /* ML */
771
848
772
849
CAMLprim value caml_thread_yield (value unit ) /* ML */
773
850
{
774
- if (st_masterlock_waiters (& caml_master_lock ) == 0 ) return Val_unit ;
851
+ struct caml_locking_scheme * s ;
852
+
853
+ s = atomic_load (& caml_locking_scheme );
854
+ if (s -> can_skip_yield != NULL && s -> can_skip_yield (s -> context ))
855
+ return Val_unit ;
775
856
776
857
/* Do all the parts of a blocking section enter/leave except lock
777
858
manipulation, which we'll do more efficiently in st_thread_yield. (Since
@@ -781,8 +862,12 @@ CAMLprim value caml_thread_yield(value unit) /* ML */
781
862
caml_raise_async_if_exception (caml_process_pending_signals_exn (),
782
863
"signal handler" );
783
864
caml_thread_save_runtime_state ();
784
- st_thread_yield (& caml_master_lock );
785
- curr_thread = st_tls_get (thread_descriptor_key );
865
+ s -> yield (s -> context );
866
+ if (atomic_load (& caml_locking_scheme ) != s ) {
867
+ /* The lock we have is no longer the runtime lock */
868
+ s -> unlock (s -> context );
869
+ acquire_runtime_lock ();
870
+ }
786
871
caml_thread_restore_runtime_state ();
787
872
caml_raise_async_if_exception (caml_process_pending_signals_exn (),
788
873
"signal handler" );
0 commit comments