diff --git a/ocaml/otherlibs/str/str.ml b/ocaml/otherlibs/str/str.ml index 67b862ad1ad..2fb5de9ea64 100644 --- a/ocaml/otherlibs/str/str.ml +++ b/ocaml/otherlibs/str/str.ml @@ -607,16 +607,6 @@ external re_search_forward: regexp -> string -> int -> int array external re_search_backward: regexp -> string -> int -> int array = "re_search_backward" -module Domain = struct - module DLS = struct - - (* CR ocaml 5 domains: Remove this proxy and use the real Domain.DLS *) - let[@inline always] new_key f = ref (f ()) - let[@inline always] set k s = k := s - let[@inline always] get k = !k - end -end - let last_search_result_key = Domain.DLS.new_key (fun () -> [||]) let string_match re s pos = diff --git a/ocaml/runtime/caml/domain.h b/ocaml/runtime/caml/domain.h index fe555fbe63b..930359bbdcc 100644 --- a/ocaml/runtime/caml/domain.h +++ b/ocaml/runtime/caml/domain.h @@ -109,14 +109,14 @@ int caml_try_run_on_all_domains_with_spin_work( int sync, void (*handler)(caml_domain_state*, void*, int, caml_domain_state**), void* data, - void (*leader_setup)(caml_domain_state*), + void (*leader_setup)(caml_domain_state*, void*), /* return nonzero if there may still be useful work to do while spinning */ int (*enter_spin_callback)(caml_domain_state*, void*), void* enter_spin_data); int caml_try_run_on_all_domains( void (*handler)(caml_domain_state*, void*, int, caml_domain_state**), void*, - void (*leader_setup)(caml_domain_state*)); + void (*leader_setup)(caml_domain_state*, void*)); /* Function naming conventions for STW callbacks and STW critical sections. diff --git a/ocaml/runtime/caml/major_gc.h b/ocaml/runtime/caml/major_gc.h index eea353e6c5d..2a5af67d3e5 100644 --- a/ocaml/runtime/caml/major_gc.h +++ b/ocaml/runtime/caml/major_gc.h @@ -30,6 +30,7 @@ extern gc_phase_t caml_gc_phase; Caml_inline int caml_marking_started(void) { return caml_gc_phase != Phase_sweep_main; } +extern atomic_uintnat caml_gc_mark_phase_requested; intnat caml_opportunistic_major_work_available (caml_domain_state*); void caml_opportunistic_major_collection_slice (intnat); @@ -45,7 +46,7 @@ void caml_teardown_major_gc(void); void caml_darken(void*, value, volatile value* ignored); void caml_darken_cont(value); void caml_mark_root(value, value*); -void caml_empty_mark_stack(void); +void caml_mark_roots_stw(int, caml_domain_state**); void caml_finish_major_cycle(int force_compaction); #ifdef DEBUG int caml_mark_stack_is_empty(void); diff --git a/ocaml/runtime/caml/minor_gc.h b/ocaml/runtime/caml/minor_gc.h index 0333340b06d..2d7bf42bab4 100644 --- a/ocaml/runtime/caml/minor_gc.h +++ b/ocaml/runtime/caml/minor_gc.h @@ -87,7 +87,7 @@ extern void caml_realloc_ephe_ref_table (struct caml_ephe_ref_table *); extern void caml_realloc_custom_table (struct caml_custom_table *); struct caml_minor_tables* caml_alloc_minor_tables(void); void caml_free_minor_tables(struct caml_minor_tables*); -void caml_empty_minor_heap_setup(caml_domain_state* domain); +void caml_empty_minor_heap_setup(caml_domain_state* domain, void*); #ifdef DEBUG extern int caml_debug_is_minor(value val); diff --git a/ocaml/runtime/domain.c b/ocaml/runtime/domain.c index eeb4c28285d..8ac41bb1bb4 100644 --- a/ocaml/runtime/domain.c +++ b/ocaml/runtime/domain.c @@ -1031,8 +1031,6 @@ struct domain_startup_params { static void* backup_thread_func(void* v) { - // single-domain hack - caml_fatal_error("backup thread not allowed to run"); dom_internal* di = (dom_internal*)v; uintnat msg; struct interruptor* s = &di->interruptor; @@ -1591,7 +1589,7 @@ int caml_try_run_on_all_domains_with_spin_work( int sync, void (*handler)(caml_domain_state*, void*, int, caml_domain_state**), void* data, - void (*leader_setup)(caml_domain_state*), + void (*leader_setup)(caml_domain_state*, void*), int (*enter_spin_callback)(caml_domain_state*, void*), void* enter_spin_data) { @@ -1659,7 +1657,7 @@ int caml_try_run_on_all_domains_with_spin_work( } if( leader_setup ) { - leader_setup(domain_state); + leader_setup(domain_state, data); } #ifdef DEBUG @@ -1726,7 +1724,7 @@ int caml_try_run_on_all_domains_with_spin_work( int caml_try_run_on_all_domains( void (*handler)(caml_domain_state*, void*, int, caml_domain_state**), void* data, - void (*leader_setup)(caml_domain_state*)) + void (*leader_setup)(caml_domain_state*, void*)) { return caml_try_run_on_all_domains_with_spin_work(1, @@ -1738,7 +1736,7 @@ int caml_try_run_on_all_domains( int caml_try_run_on_all_domains_async( void (*handler)(caml_domain_state*, void*, int, caml_domain_state**), void* data, - void (*leader_setup)(caml_domain_state*)) + void (*leader_setup)(caml_domain_state*, void*)) { return caml_try_run_on_all_domains_with_spin_work(0, diff --git a/ocaml/runtime/major_gc.c b/ocaml/runtime/major_gc.c index e9c295dd525..7b283f7a574 100644 --- a/ocaml/runtime/major_gc.c +++ b/ocaml/runtime/major_gc.c @@ -156,12 +156,6 @@ static atomic_uintnat num_domains_orphaning_finalisers = 0; static atomic_uintnat alloc_counter; static atomic_uintnat work_counter; -enum global_roots_status{ - WORK_UNSTARTED, - WORK_STARTED -}; -static atomic_uintnat domain_global_roots_started; - gc_phase_t caml_gc_phase; /* The caml_gc_phase global is only ever updated at the end of the STW @@ -199,6 +193,9 @@ Caml_inline char caml_gc_phase_char(int may_access_gc_phase) { } } +/* True when some domain wants to enter Phase_sweep_and_mark_main */ +atomic_uintnat caml_gc_mark_phase_requested; + extern value caml_ephe_none; /* See weak.c */ static struct ephe_cycle_info_t { @@ -324,13 +321,26 @@ static void ephe_todo_list_emptied (void) /* Since the todo list is empty, this domain does not need to participate in * further ephemeron cycles. */ - atomic_fetch_add(&ephe_cycle_info.num_domains_todo, -1); + atomic_fetch_add_verify_ge0(&ephe_cycle_info.num_domains_todo, -1); CAMLassert(atomic_load_acquire(&ephe_cycle_info.num_domains_done) <= atomic_load_acquire(&ephe_cycle_info.num_domains_todo)); caml_plat_unlock(&ephe_lock); } +/* Begin ephemeron marking by making all 'live' ephes become 'todo' */ +static void begin_ephe_marking(void) +{ + caml_domain_state* domain = Caml_state; + CAMLassert(domain->ephe_info->todo == (value) NULL); + domain->ephe_info->todo = domain->ephe_info->live; + domain->ephe_info->live = (value) NULL; + domain->ephe_info->must_sweep_ephe = 0; + domain->ephe_info->cycle = 0; + domain->ephe_info->cursor.todop = NULL; + domain->ephe_info->cursor.cycle = 0; +} + /* Record that ephemeron marking was done for the given ephemeron cycle. */ static void record_ephe_marking_done (uintnat ephe_cycle) { @@ -487,12 +497,16 @@ static int no_orphaned_work (void) atomic_load_acquire(&orph_structs.final_info) == NULL; } -static void adopt_orphaned_work (void) +static void adopt_orphaned_work (int expected_status) { caml_domain_state* domain_state = Caml_state; value orph_ephe_list_live, last; struct caml_final_info *f, *myf, *temp; +#ifdef DEBUG + orph_ephe_list_verify_status(expected_status); +#endif + if (no_orphaned_work() || caml_domain_is_terminating()) return; @@ -940,7 +954,7 @@ static intnat mark_stack_push_block(struct mark_stack* stk, value block) /* This function shrinks the mark stack back to the MARK_STACK_INIT_SIZE size and is called at domain termination via caml_finish_marking. */ -void caml_shrink_mark_stack (void) +static void shrink_mark_stack (void) { struct mark_stack* stk = Caml_state->mark_stack; intnat init_stack_bsize = MARK_STACK_INIT_SIZE * sizeof(mark_entry); @@ -1179,6 +1193,7 @@ static inline value_ptr chunk_and_offset_to_ptr(uintnat chunk, uintnat offset) { /* mark until the budget runs out or marking is done */ static intnat mark(intnat budget) { caml_domain_state *domain_state = Caml_state; + CAMLassert(caml_marking_started()); while (budget > 0 && !domain_state->marking_done) { budget = do_some_marking(domain_state->mark_stack, budget); if (budget > 0) { @@ -1382,35 +1397,51 @@ static intnat ephe_sweep (caml_domain_state* domain_state, intnat budget) return budget; } -static void start_marking (int participant_count, caml_domain_state** barrier_participants) +static void request_mark_phase (void) { - caml_domain_state* domain = Caml_state; - /* Need to ensure the minor heap is empty before we snapshot the roots, - because the minor heap may currently point to UNMARKED major blocks */ - if (barrier_participants) { - caml_empty_minor_heap_no_major_slice_from_stw - (domain, (void*)0, participant_count, barrier_participants); - } else { - caml_empty_minor_heaps_once (); + if (caml_gc_phase == Phase_sweep_main && + atomic_load_relaxed(&caml_gc_mark_phase_requested) == 0) + atomic_store_release(&caml_gc_mark_phase_requested, 1); +} + +void caml_mark_roots_stw (int participant_count, caml_domain_state** barrier_participants) +{ + if (caml_gc_phase != Phase_sweep_main) + return; + + enum global_roots_status { + WORK_UNSTARTED, + WORK_STARTED, + WORK_COMPLETE + }; + static atomic_uintnat global_roots_scanned; + + Caml_global_barrier_if_final(participant_count) { + caml_gc_phase = Phase_sweep_and_mark_main; + atomic_store_relaxed(&global_roots_scanned, WORK_UNSTARTED); } - /* CR ocaml 5 domains (sdolan): - Either this transition needs to be synchronised between domains, - or a different write barrier needs to be used while some domains - have started marking and others have not. */ - CAMLassert(caml_domain_alone()); - caml_gc_phase = Phase_sweep_and_mark_main; + caml_domain_state* domain = Caml_state; + + /* Adopt orphaned work from domains that were spawned and terminated in the + previous cycle. */ + adopt_orphaned_work (caml_global_heap_state.UNMARKED); + + begin_ephe_marking(); CAML_EV_BEGIN(EV_MAJOR_MARK_ROOTS); - caml_do_roots (&caml_darken, darken_scanning_flags, domain, domain, 0); { uintnat work_unstarted = WORK_UNSTARTED; - if(atomic_compare_exchange_strong(&domain_global_roots_started, - &work_unstarted, - WORK_STARTED)){ - caml_scan_global_roots(&caml_darken, domain); + if (atomic_load_relaxed(&global_roots_scanned) == WORK_UNSTARTED && + atomic_compare_exchange_strong(&global_roots_scanned, + &work_unstarted, WORK_STARTED)) { + /* This domain did the CAS, so this domain marks the roots */ + caml_scan_global_roots(&caml_darken, domain); + atomic_store_release(&global_roots_scanned, WORK_COMPLETE); } } + /* Locals, C locals, systhreads & finalisers */ + caml_do_roots (&caml_darken, darken_scanning_flags, domain, domain, 0); CAML_EV_END(EV_MAJOR_MARK_ROOTS); CAML_EV_BEGIN(EV_MAJOR_MEMPROF_ROOTS); @@ -1421,16 +1452,20 @@ static void start_marking (int participant_count, caml_domain_state** barrier_pa caml_gc_log("Marking started, %ld entries on mark stack", (long)domain->mark_stack->count); - if (domain->mark_stack->count == 0 && - !caml_addrmap_iter_ok(&domain->mark_stack->compressed_stack, - domain->mark_stack->compressed_stack_iter) - ) { - atomic_fetch_add_verify_ge0(&num_domains_to_mark, -1); - domain->marking_done = 1; - } - if (domain->ephe_info->todo == (value) NULL) ephe_todo_list_emptied(); + + /* Wait until global roots are marked. It's fine if other domains are still + marking their local roots, as long as the globals are done */ + if (atomic_load_acquire(&global_roots_scanned) != WORK_COMPLETE) { + CAML_EV_BEGIN(EV_MAJOR_MARK_OPPORTUNISTIC); + SPIN_WAIT { + caml_opportunistic_major_collection_slice(1000); + if (atomic_load_acquire(&global_roots_scanned) == WORK_COMPLETE) + break; + } + CAML_EV_END(EV_MAJOR_MARK_OPPORTUNISTIC); + } } static void cycle_major_heap_from_stw_single( @@ -1504,10 +1539,11 @@ static void cycle_major_heap_from_stw_single( atomic_store_release(&num_domains_to_sweep, num_domains_in_stw); atomic_store_release(&num_domains_to_mark, num_domains_in_stw); - caml_gc_phase = Phase_sweep_main; - atomic_store(&ephe_cycle_info.num_domains_todo, num_domains_in_stw); - atomic_store(&ephe_cycle_info.ephe_cycle, 1); - atomic_store(&ephe_cycle_info.num_domains_done, 0); + caml_gc_phase = Phase_sweep_main; + atomic_store(&caml_gc_mark_phase_requested, 0); + atomic_store(&ephe_cycle_info.num_domains_todo, num_domains_in_stw); + atomic_store(&ephe_cycle_info.ephe_cycle, 1); + atomic_store(&ephe_cycle_info.num_domains_done, 0); atomic_store_release(&num_domains_to_ephe_sweep, 0); /* Will be set to the correct number when switching to @@ -1518,8 +1554,6 @@ static void cycle_major_heap_from_stw_single( atomic_store_release(&num_domains_to_final_update_last, num_domains_in_stw); - atomic_store(&domain_global_roots_started, WORK_UNSTARTED); - caml_code_fragment_cleanup_from_stw_single(); } @@ -1606,21 +1640,6 @@ static void stw_cycle_all_domains( domain->sweeping_done = 0; domain->marking_done = 0; - /* Ephemerons */ -#ifdef DEBUG - orph_ephe_list_verify_status (caml_global_heap_state.UNMARKED); -#endif - /* Adopt orphaned work from domains that were spawned and terminated in the - previous cycle. */ - adopt_orphaned_work (); - CAMLassert(domain->ephe_info->todo == (value) NULL); - domain->ephe_info->todo = domain->ephe_info->live; - domain->ephe_info->live = (value) NULL; - domain->ephe_info->must_sweep_ephe = 0; - domain->ephe_info->cycle = 0; - domain->ephe_info->cursor.todop = NULL; - domain->ephe_info->cursor.cycle = 0; - /* Finalisers */ domain->final_info->updated_first = 0; domain->final_info->updated_last = 0; @@ -1713,7 +1732,9 @@ static void stw_try_complete_gc_phase( intnat caml_opportunistic_major_work_available (caml_domain_state* domain_state) { - return !domain_state->sweeping_done || !domain_state->marking_done; + return + !domain_state->sweeping_done || + (caml_marking_started() && !domain_state->marking_done); } static char collection_slice_mode_char(collection_slice_mode mode) @@ -1785,11 +1806,11 @@ static void major_collection_slice(intnat howmuch, if (log_events) CAML_EV_END(EV_MAJOR_SWEEP); } - if (domain_state->sweeping_done && - caml_gc_phase == Phase_sweep_main && - get_major_slice_work(mode) > 0 && - mode != Slice_opportunistic) { - start_marking(participant_count, barrier_participants); + if (domain_state->sweeping_done) { + /* We do not immediately trigger a minor GC, but instead wait for + the next one to happen normally. This gives some chance that + other domains will finish sweeping as well. */ + request_mark_phase(); } @@ -1830,10 +1851,7 @@ static void major_collection_slice(intnat howmuch, /* Nothing has been marked while updating last */ } -#ifdef DEBUG - orph_ephe_list_verify_status (caml_global_heap_state.MARKED); -#endif - adopt_orphaned_work(); + adopt_orphaned_work(caml_global_heap_state.MARKED); /* Ephemerons */ if (caml_gc_phase != Phase_sweep_ephe) { @@ -2025,8 +2043,10 @@ static void stw_finish_major_cycle (caml_domain_state* domain, void* arg, will only terminate when [caml_major_cycles_completed] is incremented, and this happens in [cycle_all_domains] inside a barrier. */ + request_mark_phase(); caml_empty_minor_heap_no_major_slice_from_stw (domain, (void*)0, participating_count, participating); + CAMLassert (caml_marking_started()); CAML_EV_BEGIN(EV_MAJOR_FINISH_CYCLE); while (params.saved_major_cycles == caml_major_cycles_completed) { @@ -2056,9 +2076,17 @@ int caml_mark_stack_is_empty(void) } #endif -void caml_empty_mark_stack (void) +static void empty_mark_stack (void) { while (!Caml_state->marking_done){ + /* while, not if: it is possible for caml_empty_minor_heaps_once + to actually do a full major GC cycle, and end up returning with + caml_marking_started false, because the next cycle has started */ + while (!caml_marking_started()) { + request_mark_phase(); + /* This calls caml_mark_roots_stw with the minor heap empty */ + caml_empty_minor_heaps_once(); + } mark(1000); caml_handle_incoming_interrupts(); } @@ -2073,14 +2101,12 @@ void caml_finish_marking (void) { if (!Caml_state->marking_done) { CAML_EV_BEGIN(EV_MAJOR_FINISH_MARKING); - if (!caml_marking_started()) { - start_marking(0, NULL); - } - caml_empty_mark_stack(); - caml_shrink_mark_stack(); + empty_mark_stack(); + shrink_mark_stack(); Caml_state->stat_major_words += Caml_state->allocated_words; Caml_state->allocated_words = 0; Caml_state->allocated_words_direct = 0; + CAMLassert(Caml_state->marking_done); CAML_EV_END(EV_MAJOR_FINISH_MARKING); } } @@ -2197,9 +2223,16 @@ int caml_init_major_gc(caml_domain_state* d) { caml_addrmap_iterator(&d->mark_stack->compressed_stack); /* Fresh domains do not need to performing marking or sweeping. */ - /* CR ocaml 5 domains: how does this interact with Phase_sweep_main? */ - d->sweeping_done = 1; - d->marking_done = 1; + if (caml_gc_phase == Phase_sweep_main) { + d->sweeping_done = 1; + d->marking_done = 0; + atomic_fetch_add(&num_domains_to_mark, 1); + atomic_fetch_add(&ephe_cycle_info.num_domains_todo, 1); + } else { + d->sweeping_done = 1; + d->marking_done = 1; + } + /* Finalisers. Fresh domains participate in updating finalisers. */ d->final_info = caml_alloc_final_info (); if(d->final_info == NULL) { diff --git a/ocaml/runtime/minor_gc.c b/ocaml/runtime/minor_gc.c index 16a292f3c38..33177aa8206 100644 --- a/ocaml/runtime/minor_gc.c +++ b/ocaml/runtime/minor_gc.c @@ -829,8 +829,19 @@ int caml_do_opportunistic_major_slice /* Make sure the minor heap is empty by performing a minor collection if needed. + + This function also samples [caml_gc_mark_phase_requested] to see whether + [caml_mark_roots_stw] should be called. To guarantee that all domains + agree on whether the roots should be marked, this variable is sampled + only once, instead of having domains check it individually. */ -void caml_empty_minor_heap_setup(caml_domain_state* domain_unused) { +void caml_empty_minor_heap_setup(caml_domain_state* domain_unused, + void* mark_requested) { + /* Check whether the mark phase has been requested */ + *((uintnat*)mark_requested) = + atomic_load_relaxed(&caml_gc_mark_phase_requested) + ? atomic_exchange(&caml_gc_mark_phase_requested, 0) + : 0; /* Increment the total number of minor collections done in the program */ nonatomic_increment_counter (&caml_minor_collections_count); caml_plat_barrier_reset(&minor_gc_end_barrier); @@ -839,7 +850,7 @@ void caml_empty_minor_heap_setup(caml_domain_state* domain_unused) { /* must be called within a STW section */ static void caml_stw_empty_minor_heap_no_major_slice(caml_domain_state* domain, - void* unused, + void* mark_requested_ptr, int participating_count, caml_domain_state** participating) { @@ -851,6 +862,9 @@ caml_stw_empty_minor_heap_no_major_slice(caml_domain_state* domain, caml_fatal_error("Minor GC triggered recursively"); Caml_state->in_minor_collection = 1; + /* mark_requested_ptr must be read before minor GC barrier */ + uintnat mark_requested = *(uintnat*)mark_requested_ptr; + if( participating[0] == domain ) { nonatomic_increment_counter(&caml_minor_cycles_started); } @@ -858,6 +872,10 @@ caml_stw_empty_minor_heap_no_major_slice(caml_domain_state* domain, caml_gc_log("running stw empty_minor_heap_promote"); caml_empty_minor_heap_promote(domain, participating_count, participating); + /* while the minor heap is empty, allow the major GC to mark roots */ + if (mark_requested) + caml_mark_roots_stw(participating_count, participating); + CAML_EV_BEGIN(EV_MINOR_FINALIZED); caml_gc_log("finalizing dead minor custom blocks"); custom_finalize_minor(domain); @@ -887,11 +905,12 @@ caml_stw_empty_minor_heap_no_major_slice(caml_domain_state* domain, Caml_state->in_minor_collection = 0; } -static void caml_stw_empty_minor_heap (caml_domain_state* domain, void* unused, +static void caml_stw_empty_minor_heap (caml_domain_state* domain, + void* mark_requested, int participating_count, caml_domain_state** participating) { - caml_stw_empty_minor_heap_no_major_slice(domain, unused, + caml_stw_empty_minor_heap_no_major_slice(domain, mark_requested, participating_count, participating); } @@ -902,13 +921,14 @@ void caml_empty_minor_heap_no_major_slice_from_stw( int participating_count, caml_domain_state** participating) { + static uintnat mark_requested; /* written by only one domain */ Caml_global_barrier_if_final(participating_count) { - caml_empty_minor_heap_setup(domain); + caml_empty_minor_heap_setup(domain, &mark_requested); } /* if we are entering from within a major GC STW section then we do not schedule another major collection slice */ - caml_stw_empty_minor_heap_no_major_slice(domain, (void*)0, + caml_stw_empty_minor_heap_no_major_slice(domain, &mark_requested, participating_count, participating); } @@ -920,9 +940,11 @@ int caml_try_empty_minor_heap_on_all_domains (void) #endif caml_gc_log("requesting stw empty_minor_heap"); + uintnat mark_requested; return caml_try_run_on_all_domains_with_spin_work( 1, /* synchronous */ - &caml_stw_empty_minor_heap, 0, /* stw handler */ + &caml_stw_empty_minor_heap, /* stw handler */ + &mark_requested, &caml_empty_minor_heap_setup, /* leader setup */ &caml_do_opportunistic_major_slice, 0 /* enter spin work */); /* leaves when done by default*/ diff --git a/ocaml/runtime/signals.c b/ocaml/runtime/signals.c index f505ff302c1..e466b218559 100644 --- a/ocaml/runtime/signals.c +++ b/ocaml/runtime/signals.c @@ -291,6 +291,9 @@ void caml_request_major_slice (int global) }else{ Caml_state->requested_major_slice = 1; } + /* Beginning the mark phase requires emptying the minor heap */ + if (atomic_load_relaxed(&caml_gc_mark_phase_requested)) + Caml_state->requested_minor_gc = 1; caml_interrupt_self(); } diff --git a/ocaml/runtime/weak.c b/ocaml/runtime/weak.c index 9b313c5b4ed..22c2a9d2910 100644 --- a/ocaml/runtime/weak.c +++ b/ocaml/runtime/weak.c @@ -59,16 +59,8 @@ CAMLprim value caml_ephe_create (value len) caml_invalid_argument ("Weak.create"); res = caml_alloc_shr (size, Abstract_tag); - /* The new ephemeron needs to be added to: - live, if marking has started, to be marked next cycle - todo, if marking has not started, to be marked this cycle */ - if (caml_marking_started()) { - Ephe_link(res) = domain_state->ephe_info->live; - domain_state->ephe_info->live = res; - } else { - Ephe_link(res) = domain_state->ephe_info->todo; - domain_state->ephe_info->todo = res; - } + Ephe_link(res) = domain_state->ephe_info->live; + domain_state->ephe_info->live = res; for (i = CAML_EPHE_DATA_OFFSET; i < size; i++) Field(res, i) = caml_ephe_none; /* run memprof callbacks */ diff --git a/ocaml/testsuite/tests/gc-roots/globroots_parallel.ml b/ocaml/testsuite/tests/gc-roots/globroots_parallel.ml index 74f6808682c..71921d16fd8 100644 --- a/ocaml/testsuite/tests/gc-roots/globroots_parallel.ml +++ b/ocaml/testsuite/tests/gc-roots/globroots_parallel.ml @@ -1,8 +1,9 @@ (* TEST flags += " -w a "; modules = "globrootsprim.c globroots.ml"; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) open Globroots diff --git a/ocaml/testsuite/tests/gc-roots/globroots_parallel_spawn_burn.ml b/ocaml/testsuite/tests/gc-roots/globroots_parallel_spawn_burn.ml index bdafc0da706..e72c81f10b0 100644 --- a/ocaml/testsuite/tests/gc-roots/globroots_parallel_spawn_burn.ml +++ b/ocaml/testsuite/tests/gc-roots/globroots_parallel_spawn_burn.ml @@ -1,8 +1,9 @@ (* TEST flags += " -w a "; modules = "globrootsprim.c globroots.ml"; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) open Globroots diff --git a/ocaml/testsuite/tests/lazy/lazy2.ml b/ocaml/testsuite/tests/lazy/lazy2.ml index 478cf5acbe0..e7d7b408a7b 100644 --- a/ocaml/testsuite/tests/lazy/lazy2.ml +++ b/ocaml/testsuite/tests/lazy/lazy2.ml @@ -1,7 +1,8 @@ (* TEST ocamlopt_flags += " -O3 "; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) open Domain diff --git a/ocaml/testsuite/tests/lazy/lazy3.ml b/ocaml/testsuite/tests/lazy/lazy3.ml index 76843888e9e..a97265e84d3 100644 --- a/ocaml/testsuite/tests/lazy/lazy3.ml +++ b/ocaml/testsuite/tests/lazy/lazy3.ml @@ -1,7 +1,8 @@ (* TEST ocamlopt_flags += " -O3 "; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let f count = diff --git a/ocaml/testsuite/tests/lazy/lazy5.ml b/ocaml/testsuite/tests/lazy/lazy5.ml index b220581ecc1..7545d7f5b67 100644 --- a/ocaml/testsuite/tests/lazy/lazy5.ml +++ b/ocaml/testsuite/tests/lazy/lazy5.ml @@ -1,7 +1,8 @@ (* TEST ocamlopt_flags += " -O3 "; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let rec safe_force l = try Lazy.force l with diff --git a/ocaml/testsuite/tests/lazy/lazy6.ml b/ocaml/testsuite/tests/lazy/lazy6.ml index e07b60c3156..60a110348c2 100644 --- a/ocaml/testsuite/tests/lazy/lazy6.ml +++ b/ocaml/testsuite/tests/lazy/lazy6.ml @@ -1,7 +1,8 @@ (* TEST ocamlopt_flags += " -O3 "; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let flag1 = Atomic.make false diff --git a/ocaml/testsuite/tests/lazy/lazy7.ml b/ocaml/testsuite/tests/lazy/lazy7.ml index d27aa248db7..37c23003b31 100644 --- a/ocaml/testsuite/tests/lazy/lazy7.ml +++ b/ocaml/testsuite/tests/lazy/lazy7.ml @@ -1,7 +1,8 @@ (* TEST ocamlopt_flags += " -O3 "; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let num_domains = 4 diff --git a/ocaml/testsuite/tests/lazy/lazy8.ml b/ocaml/testsuite/tests/lazy/lazy8.ml index 48c5bca29e7..a562a91ffaf 100644 --- a/ocaml/testsuite/tests/lazy/lazy8.ml +++ b/ocaml/testsuite/tests/lazy/lazy8.ml @@ -1,7 +1,8 @@ (* TEST ocamlopt_flags += " -O3 "; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) exception E diff --git a/ocaml/testsuite/tests/lf_skiplist/test_parallel.ml b/ocaml/testsuite/tests/lf_skiplist/test_parallel.ml index b218979d04f..45cb89f0df7 100644 --- a/ocaml/testsuite/tests/lf_skiplist/test_parallel.ml +++ b/ocaml/testsuite/tests/lf_skiplist/test_parallel.ml @@ -1,8 +1,9 @@ (* TEST modules = "stubs.c"; - no-tsan; (* Takes too much time and memory with tsan *) - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + no-tsan; + runtime5; + { bytecode; } + { native; } *) external init_skiplist : unit -> unit = "init_skiplist" diff --git a/ocaml/testsuite/tests/lib-channels/refcounting.ml b/ocaml/testsuite/tests/lib-channels/refcounting.ml index 84776f440b4..b80a7bb64be 100644 --- a/ocaml/testsuite/tests/lib-channels/refcounting.ml +++ b/ocaml/testsuite/tests/lib-channels/refcounting.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; expect; *) diff --git a/ocaml/testsuite/tests/lib-dynlink-domains/main.ml b/ocaml/testsuite/tests/lib-dynlink-domains/main.ml index e5666c8897e..b7e83c64e4c 100644 --- a/ocaml/testsuite/tests/lib-dynlink-domains/main.ml +++ b/ocaml/testsuite/tests/lib-dynlink-domains/main.ml @@ -1,10 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; -*) - -(* BACKPORT -(* TEST + runtime5; include dynlink; libraries = ""; readonly_files = "store.ml main.ml Plugin_0.ml Plugin_0_0.ml Plugin_0_0_0.ml Plugin_0_0_0_0.ml Plugin_0_0_0_1.ml Plugin_0_0_0_2.ml Plugin_1.ml Plugin_1_0.ml Plugin_1_0_0.ml Plugin_1_0_0_0.ml Plugin_1_1.ml Plugin_1_2.ml Plugin_1_2_0.ml Plugin_1_2_0_0.ml Plugin_1_2_1.ml Plugin_1_2_2.ml Plugin_1_2_2_0.ml Plugin_1_2_3.ml Plugin_1_2_3_0.ml"; @@ -228,5 +223,3 @@ let stored = Atomic.get Store.store let stored_set = String_set.of_list stored let () = List.iter (Printf.printf "%s\n") (String_set.elements stored_set) - -*) diff --git a/ocaml/testsuite/tests/lib-format/domains.ml b/ocaml/testsuite/tests/lib-format/domains.ml index 4bfaa594124..896675f5ac3 100644 --- a/ocaml/testsuite/tests/lib-format/domains.ml +++ b/ocaml/testsuite/tests/lib-format/domains.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (** Test that domains stdout and stderr are flushed at domain exit *) diff --git a/ocaml/testsuite/tests/lib-format/mc_pr586_par.ml b/ocaml/testsuite/tests/lib-format/mc_pr586_par.ml index c8475c37a38..87b1d2f8210 100644 --- a/ocaml/testsuite/tests/lib-format/mc_pr586_par.ml +++ b/ocaml/testsuite/tests/lib-format/mc_pr586_par.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let () = diff --git a/ocaml/testsuite/tests/lib-format/mc_pr586_par2.ml b/ocaml/testsuite/tests/lib-format/mc_pr586_par2.ml index d2dd7b06a69..23bb3e9bb34 100644 --- a/ocaml/testsuite/tests/lib-format/mc_pr586_par2.ml +++ b/ocaml/testsuite/tests/lib-format/mc_pr586_par2.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let () = diff --git a/ocaml/testsuite/tests/lib-marshal/intext_par.ml b/ocaml/testsuite/tests/lib-marshal/intext_par.ml index e505f82914c..93bec50489b 100644 --- a/ocaml/testsuite/tests/lib-marshal/intext_par.ml +++ b/ocaml/testsuite/tests/lib-marshal/intext_par.ml @@ -1,8 +1,9 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; modules = "intextaux_par.c"; - no-tsan; (* Takes too much time and memory with tsan *) - skip; + no-tsan; + runtime5; + { bytecode; } + { native; } *) (* Test for output_value / input_value *) diff --git a/ocaml/testsuite/tests/lib-random/parallel.ml b/ocaml/testsuite/tests/lib-random/parallel.ml index 07805a0e878..a58bfa7c159 100644 --- a/ocaml/testsuite/tests/lib-random/parallel.ml +++ b/ocaml/testsuite/tests/lib-random/parallel.ml @@ -1,7 +1,6 @@ (* TEST include unix; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; libunix; { bytecode; diff --git a/ocaml/testsuite/tests/lib-str/parallel.ml b/ocaml/testsuite/tests/lib-str/parallel.ml index 1d9f2f5d388..4c4926ca3ea 100644 --- a/ocaml/testsuite/tests/lib-str/parallel.ml +++ b/ocaml/testsuite/tests/lib-str/parallel.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include str; hasstr; { diff --git a/ocaml/testsuite/tests/lib-sync/prodcons.ml b/ocaml/testsuite/tests/lib-sync/prodcons.ml index f02257b7547..19f5e1607a8 100644 --- a/ocaml/testsuite/tests/lib-sync/prodcons.ml +++ b/ocaml/testsuite/tests/lib-sync/prodcons.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* Classic producer-consumer *) diff --git a/ocaml/testsuite/tests/lib-systhreads/multicore_lifecycle.ml b/ocaml/testsuite/tests/lib-systhreads/multicore_lifecycle.ml index 018b8548e5d..f1ea4dd0f1d 100644 --- a/ocaml/testsuite/tests/lib-systhreads/multicore_lifecycle.ml +++ b/ocaml/testsuite/tests/lib-systhreads/multicore_lifecycle.ml @@ -1,8 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; include systhreads; hassysthreads; + runtime5; { bytecode; }{ diff --git a/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone.ml b/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone.ml index 66ccfc2dd8a..99c10547702 100644 --- a/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone.ml +++ b/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone.ml @@ -1,7 +1,6 @@ (* TEST + runtime5; include unix; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; hasunix; not-windows; { diff --git a/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone2.ml b/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone2.ml index c3751f2b949..c87e8006f2b 100644 --- a/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone2.ml +++ b/ocaml/testsuite/tests/lib-unix/common/multicore_fork_domain_alone2.ml @@ -1,7 +1,6 @@ (* TEST + runtime5; include unix; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; hasunix; not-windows; { diff --git a/ocaml/testsuite/tests/memory-model/forbidden.ml b/ocaml/testsuite/tests/memory-model/forbidden.ml index fd97ce536ce..9b9a59274dc 100644 --- a/ocaml/testsuite/tests/memory-model/forbidden.ml +++ b/ocaml/testsuite/tests/memory-model/forbidden.ml @@ -1,9 +1,8 @@ (* TEST modules = "opt.ml barrier.ml hist.ml shared.ml run.ml outcome.ml"; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; not-bsd; no-tsan; (* tsan detects the intentional data races and fails *) + runtime5; { bytecode; }{ diff --git a/ocaml/testsuite/tests/memory-model/publish.ml b/ocaml/testsuite/tests/memory-model/publish.ml index da486ce5b3f..f7b4b7539d8 100644 --- a/ocaml/testsuite/tests/memory-model/publish.ml +++ b/ocaml/testsuite/tests/memory-model/publish.ml @@ -1,9 +1,8 @@ (* TEST modules = "opt.ml barrier.ml hist.ml shared.ml run.ml outcome.ml"; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; not-bsd; no-tsan; (* tsan detects data races and fails *) + runtime5; { not-windows; bytecode; diff --git a/ocaml/testsuite/tests/parallel/atomics.ml b/ocaml/testsuite/tests/parallel/atomics.ml index 6f092ca6233..e0ace87a100 100644 --- a/ocaml/testsuite/tests/parallel/atomics.ml +++ b/ocaml/testsuite/tests/parallel/atomics.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) type u = U of unit diff --git a/ocaml/testsuite/tests/parallel/backup_thread.ml b/ocaml/testsuite/tests/parallel/backup_thread.ml index d3d3a19581b..7b0e4525368 100644 --- a/ocaml/testsuite/tests/parallel/backup_thread.ml +++ b/ocaml/testsuite/tests/parallel/backup_thread.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include unix; hasunix; { diff --git a/ocaml/testsuite/tests/parallel/backup_thread_pipe.ml b/ocaml/testsuite/tests/parallel/backup_thread_pipe.ml index e063b4843fb..c558ba0a854 100644 --- a/ocaml/testsuite/tests/parallel/backup_thread_pipe.ml +++ b/ocaml/testsuite/tests/parallel/backup_thread_pipe.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include unix; hasunix; { diff --git a/ocaml/testsuite/tests/parallel/catch_break.ml b/ocaml/testsuite/tests/parallel/catch_break.ml index 0b6c4d9fa22..8176a50684d 100644 --- a/ocaml/testsuite/tests/parallel/catch_break.ml +++ b/ocaml/testsuite/tests/parallel/catch_break.ml @@ -1,13 +1,10 @@ (* TEST -reason = "CR ocaml 5 domains: re-enable this test"; -skip; -*) - -(* hassysthreads; include systhreads; not-windows; +poll-insertion; no-tsan; +runtime5; { bytecode; }{ @@ -79,7 +76,7 @@ let rec wait n = callback implementation than we want.*) let break_trap s = begin - try Atomic.incr ready_count; while true do () done + try Sys.with_async_exns (fun () -> Atomic.incr ready_count; while true do () done) with Sys.Break -> print "[Sys.Break caught]" end; print s; @@ -112,7 +109,7 @@ let run () = let () = Sys.catch_break true; - (try run () with Sys.Break -> + (try Sys.with_async_exns run with Sys.Break -> print ("Test could not complete due to scheduling hazard" ^ " (possible false positive).")); print "Success."; diff --git a/ocaml/testsuite/tests/parallel/churn.ml b/ocaml/testsuite/tests/parallel/churn.ml new file mode 100644 index 00000000000..1472b216e5f --- /dev/null +++ b/ocaml/testsuite/tests/parallel/churn.ml @@ -0,0 +1,69 @@ +(* TEST + runtime5; + { bytecode; } + { native; } +*) + +type t = { + sender: int; + code: int; + msgsize: int; + message: int array; +} + +let size = 20_000 +let table = Array.init size (fun _ -> Atomic.make None) +let go = Atomic.make true +let log = false + +let run me msgsize iters = + (* domain 0 keeps a bunch of extra local data, + to unbalance sweeping loads *) + let kept = ref [] in + if me = 0 then kept := [Array.init 10000 ref]; + let count = ref iters in + let from0 = ref 0 in + while !count > 0 && Atomic.get go do + ignore (Sys.opaque_identity (ref [])); + let slot = Random.int size in + match Atomic.get table.(slot) with + | None as prev -> + let code = Random.bits () in + let msg = {sender = me; code; msgsize; message = Array.make msgsize code} in + if me = 0 then kept := Array.init 5 ref :: !kept; + (* pointless string formatting to create minor garbage *) + let dbg = + Printf.sprintf "[%d]: %03d: %d %08x --->\n" me slot msg.msgsize msg.code in + if Sys.opaque_identity log then print_string dbg; + if Atomic.compare_and_set table.(slot) prev (Some msg) then + decr count + | Some msg as prev when + msg.sender <> me && + Atomic.compare_and_set table.(slot) prev None -> + + let dbg = Printf.sprintf "[%d]: ---> %03d: %d %08x\n" me slot msg.msgsize msg.code in + if Sys.opaque_identity log then print_string dbg; + assert (Array.length msg.message = msg.msgsize); + for i = 0 to msg.msgsize - 1 do + assert (msg.message.(i) = msg.code) + done; + if msg.sender = 0 then incr from0; + | Some _ -> () + done; + ignore (Sys.opaque_identity !kept); + !from0 + +let () = + let iters = 200_000 in + let d1 = Domain.spawn (fun () -> run 1 100 max_int) in + let d2 = Domain.spawn (fun () -> run 2 5 max_int) in + let recv_local = run 0 20 iters in + assert (recv_local = 0); + Atomic.set go false; + let r = Domain.join d1 + Domain.join d2 in + let remaining = + table + |> Array.to_list + |> List.filter (fun x -> match Atomic.get x with Some {sender=0; _} -> true | _ -> false) + |> List.length in + Printf.printf "%d\n" (r+remaining) diff --git a/ocaml/testsuite/tests/parallel/churn.reference b/ocaml/testsuite/tests/parallel/churn.reference new file mode 100644 index 00000000000..87766d889a3 --- /dev/null +++ b/ocaml/testsuite/tests/parallel/churn.reference @@ -0,0 +1 @@ +200000 diff --git a/ocaml/testsuite/tests/parallel/constpromote.ml b/ocaml/testsuite/tests/parallel/constpromote.ml index c8a0e6b2565..6e76b106aa5 100644 --- a/ocaml/testsuite/tests/parallel/constpromote.ml +++ b/ocaml/testsuite/tests/parallel/constpromote.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* when run with the bytecode debug runtime, this test diff --git a/ocaml/testsuite/tests/parallel/deadcont.ml b/ocaml/testsuite/tests/parallel/deadcont.ml index d4180fe4a36..f3726e263d1 100644 --- a/ocaml/testsuite/tests/parallel/deadcont.ml +++ b/ocaml/testsuite/tests/parallel/deadcont.ml @@ -1,6 +1,8 @@ (* TEST - reason = "CR ocaml 5 effects: re-enable this test"; skip; + runtime5; + { bytecode; } + { native; } *) (* diff --git a/ocaml/testsuite/tests/parallel/domain_dls.ml b/ocaml/testsuite/tests/parallel/domain_dls.ml index c9c6629b8fc..77ca1b71650 100644 --- a/ocaml/testsuite/tests/parallel/domain_dls.ml +++ b/ocaml/testsuite/tests/parallel/domain_dls.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let check_dls () = diff --git a/ocaml/testsuite/tests/parallel/domain_dls2.ml b/ocaml/testsuite/tests/parallel/domain_dls2.ml index 97a8c6aff44..fde7279024b 100644 --- a/ocaml/testsuite/tests/parallel/domain_dls2.ml +++ b/ocaml/testsuite/tests/parallel/domain_dls2.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let _ = diff --git a/ocaml/testsuite/tests/parallel/domain_id.ml b/ocaml/testsuite/tests/parallel/domain_id.ml index 47cd6ccbe4a..f48fca37bfe 100644 --- a/ocaml/testsuite/tests/parallel/domain_id.ml +++ b/ocaml/testsuite/tests/parallel/domain_id.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) open Domain diff --git a/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn.ml b/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn.ml index 16fe1279bc9..b3d2517848b 100644 --- a/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn.ml +++ b/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) open Domain diff --git a/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn_gc_set.ml b/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn_gc_set.ml index d07187de741..bc33818b2de 100644 --- a/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn_gc_set.ml +++ b/ocaml/testsuite/tests/parallel/domain_parallel_spawn_burn_gc_set.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) open Domain diff --git a/ocaml/testsuite/tests/parallel/domain_serial_spawn_burn.ml b/ocaml/testsuite/tests/parallel/domain_serial_spawn_burn.ml index c51ec2e16cd..c1b4b28f7b3 100644 --- a/ocaml/testsuite/tests/parallel/domain_serial_spawn_burn.ml +++ b/ocaml/testsuite/tests/parallel/domain_serial_spawn_burn.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include unix; hasunix; { diff --git a/ocaml/testsuite/tests/parallel/fib_threads.ml b/ocaml/testsuite/tests/parallel/fib_threads.ml index b6f68ca3c40..4a4c33d2ae0 100644 --- a/ocaml/testsuite/tests/parallel/fib_threads.ml +++ b/ocaml/testsuite/tests/parallel/fib_threads.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include systhreads; hassysthreads; { diff --git a/ocaml/testsuite/tests/parallel/join.ml b/ocaml/testsuite/tests/parallel/join.ml index b42f613f253..5db4fc77fca 100644 --- a/ocaml/testsuite/tests/parallel/join.ml +++ b/ocaml/testsuite/tests/parallel/join.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let test_size = diff --git a/ocaml/testsuite/tests/parallel/major_gc_wait_backup.ml b/ocaml/testsuite/tests/parallel/major_gc_wait_backup.ml index 2f39b4d5441..6941ca7f731 100644 --- a/ocaml/testsuite/tests/parallel/major_gc_wait_backup.ml +++ b/ocaml/testsuite/tests/parallel/major_gc_wait_backup.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include unix; hasunix; { diff --git a/ocaml/testsuite/tests/parallel/mctest.ml b/ocaml/testsuite/tests/parallel/mctest.ml index e6be5b08a64..985bd647de5 100644 --- a/ocaml/testsuite/tests/parallel/mctest.ml +++ b/ocaml/testsuite/tests/parallel/mctest.ml @@ -1,6 +1,6 @@ (* TEST - reason = "CR ocaml 5 effects: re-enable this test"; skip; + runtime5; include unix; hasunix; { diff --git a/ocaml/testsuite/tests/parallel/multicore_systhreads.ml b/ocaml/testsuite/tests/parallel/multicore_systhreads.ml index ddc82a7356f..b100efa879c 100644 --- a/ocaml/testsuite/tests/parallel/multicore_systhreads.ml +++ b/ocaml/testsuite/tests/parallel/multicore_systhreads.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include systhreads; hassysthreads; { diff --git a/ocaml/testsuite/tests/parallel/pingpong.ml b/ocaml/testsuite/tests/parallel/pingpong.ml index a38d6bab555..2074b9eac43 100644 --- a/ocaml/testsuite/tests/parallel/pingpong.ml +++ b/ocaml/testsuite/tests/parallel/pingpong.ml @@ -1,13 +1,8 @@ (* TEST no-tsan; (* TSan detects the intentional data race *) - reason = "CR ocaml 5 domains: re-enable this test"; - skip; - { - bytecode; - } - { - native; - } + runtime5; + { bytecode; } + { native; } *) let r = ref (Some 0) diff --git a/ocaml/testsuite/tests/parallel/poll.ml b/ocaml/testsuite/tests/parallel/poll.ml index a2a219f2f9b..087790b1f2c 100644 --- a/ocaml/testsuite/tests/parallel/poll.ml +++ b/ocaml/testsuite/tests/parallel/poll.ml @@ -1,8 +1,8 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + poll-insertion; include unix; hasunix; + runtime5; { bytecode; }{ diff --git a/ocaml/testsuite/tests/parallel/prodcons_domains.ml b/ocaml/testsuite/tests/parallel/prodcons_domains.ml index f02257b7547..19f5e1607a8 100644 --- a/ocaml/testsuite/tests/parallel/prodcons_domains.ml +++ b/ocaml/testsuite/tests/parallel/prodcons_domains.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* Classic producer-consumer *) diff --git a/ocaml/testsuite/tests/parallel/recommended_domain_count.ml b/ocaml/testsuite/tests/parallel/recommended_domain_count.ml index 3e04ea9ebff..79358b243c5 100644 --- a/ocaml/testsuite/tests/parallel/recommended_domain_count.ml +++ b/ocaml/testsuite/tests/parallel/recommended_domain_count.ml @@ -1,7 +1,8 @@ (* TEST modules = "recommended_domain_count_cstubs.c"; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) external get_max_domains : unit -> int = "caml_get_max_domains" diff --git a/ocaml/testsuite/tests/parallel/recommended_domain_count_unix.ml b/ocaml/testsuite/tests/parallel/recommended_domain_count_unix.ml index eea7672d925..c76da9cef15 100644 --- a/ocaml/testsuite/tests/parallel/recommended_domain_count_unix.ml +++ b/ocaml/testsuite/tests/parallel/recommended_domain_count_unix.ml @@ -1,6 +1,5 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include unix; hasunix; { diff --git a/ocaml/testsuite/tests/parallel/tak.ml b/ocaml/testsuite/tests/parallel/tak.ml index ce81c1e8a10..2821fbb1146 100644 --- a/ocaml/testsuite/tests/parallel/tak.ml +++ b/ocaml/testsuite/tests/parallel/tak.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* filling minor heaps in parallel to trigger diff --git a/ocaml/testsuite/tests/parallel/test_c_thread_register.ml b/ocaml/testsuite/tests/parallel/test_c_thread_register.ml index 5e888c8b4c9..616053640bd 100644 --- a/ocaml/testsuite/tests/parallel/test_c_thread_register.ml +++ b/ocaml/testsuite/tests/parallel/test_c_thread_register.ml @@ -1,7 +1,6 @@ (* TEST modules = "test_c_thread_register_cstubs.c"; - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; include systhreads; hassysthreads; { diff --git a/ocaml/testsuite/tests/parallel/test_issue_11094.ml b/ocaml/testsuite/tests/parallel/test_issue_11094.ml index 12060d1d837..98cacc15ab3 100644 --- a/ocaml/testsuite/tests/parallel/test_issue_11094.ml +++ b/ocaml/testsuite/tests/parallel/test_issue_11094.ml @@ -1,6 +1,7 @@ (* TEST reason = "CR ocaml 5 effects: re-enable this test"; skip; + runtime5; { bytecode; }{ diff --git a/ocaml/testsuite/tests/weak-ephe-final/ephetest_par.ml b/ocaml/testsuite/tests/weak-ephe-final/ephetest_par.ml index 2eff91f62d8..f30b7b5bacd 100644 --- a/ocaml/testsuite/tests/weak-ephe-final/ephetest_par.ml +++ b/ocaml/testsuite/tests/weak-ephe-final/ephetest_par.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* Due to GCs running at non-deterministic places, the output from these tests diff --git a/ocaml/testsuite/tests/weak-ephe-final/finaliser2.ml b/ocaml/testsuite/tests/weak-ephe-final/finaliser2.ml index 4a4dc3e0fa8..e10f4906239 100644 --- a/ocaml/testsuite/tests/weak-ephe-final/finaliser2.ml +++ b/ocaml/testsuite/tests/weak-ephe-final/finaliser2.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) let () = Out_channel.set_buffered stdout false diff --git a/ocaml/testsuite/tests/weak-ephe-final/finaliser_handover.ml b/ocaml/testsuite/tests/weak-ephe-final/finaliser_handover.ml index 7f97a43ddf5..5d487ae62cd 100644 --- a/ocaml/testsuite/tests/weak-ephe-final/finaliser_handover.ml +++ b/ocaml/testsuite/tests/weak-ephe-final/finaliser_handover.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* ocaml-multicore issues 528 and 468 *) diff --git a/ocaml/testsuite/tests/weak-ephe-final/weaktest_par_load.ml b/ocaml/testsuite/tests/weak-ephe-final/weaktest_par_load.ml index eb39ff9f1fb..59ef21620bb 100644 --- a/ocaml/testsuite/tests/weak-ephe-final/weaktest_par_load.ml +++ b/ocaml/testsuite/tests/weak-ephe-final/weaktest_par_load.ml @@ -1,6 +1,7 @@ (* TEST - reason = "CR ocaml 5 domains: re-enable this test"; - skip; + runtime5; + { bytecode; } + { native; } *) (* Testing unsynchronized, parallel Weak usage *)