@@ -231,21 +231,20 @@ void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
231
231
kmp_int32 num_threads) {
232
232
KA_TRACE (20 , (" __kmpc_push_num_threads: enter T#%d num_threads=%d\n " ,
233
233
global_tid, num_threads));
234
-
234
+ __kmp_assert_valid_gtid (global_tid);
235
235
__kmp_push_num_threads (loc, global_tid, num_threads);
236
236
}
237
237
238
238
void __kmpc_pop_num_threads (ident_t *loc, kmp_int32 global_tid) {
239
239
KA_TRACE (20 , (" __kmpc_pop_num_threads: enter\n " ));
240
-
241
240
/* the num_threads are automatically popped */
242
241
}
243
242
244
243
void __kmpc_push_proc_bind (ident_t *loc, kmp_int32 global_tid,
245
244
kmp_int32 proc_bind) {
246
245
KA_TRACE (20 , (" __kmpc_push_proc_bind: enter T#%d proc_bind=%d\n " , global_tid,
247
246
proc_bind));
248
-
247
+ __kmp_assert_valid_gtid (global_tid);
249
248
__kmp_push_proc_bind (loc, global_tid, (kmp_proc_bind_t )proc_bind);
250
249
}
251
250
@@ -353,7 +352,7 @@ void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
353
352
KA_TRACE (20 ,
354
353
(" __kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n " ,
355
354
global_tid, num_teams, num_threads));
356
-
355
+ __kmp_assert_valid_gtid (global_tid);
357
356
__kmp_push_num_teams (loc, global_tid, num_teams, num_threads);
358
357
}
359
358
@@ -474,9 +473,10 @@ conditional parallel region, like this,
474
473
when the condition is false.
475
474
*/
476
475
void __kmpc_serialized_parallel (ident_t *loc, kmp_int32 global_tid) {
477
- // The implementation is now in kmp_runtime.cpp so that it can share static
478
- // functions with kmp_fork_call since the tasks to be done are similar in
479
- // each case.
476
+ // The implementation is now in kmp_runtime.cpp so that it can share static
477
+ // functions with kmp_fork_call since the tasks to be done are similar in
478
+ // each case.
479
+ __kmp_assert_valid_gtid (global_tid);
480
480
#if OMPT_SUPPORT
481
481
OMPT_STORE_RETURN_ADDRESS (global_tid);
482
482
#endif
@@ -504,6 +504,7 @@ void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
504
504
return ;
505
505
506
506
// Not autopar code
507
+ __kmp_assert_valid_gtid (global_tid);
507
508
if (!TCR_4 (__kmp_init_parallel))
508
509
__kmp_parallel_initialize ();
509
510
@@ -713,6 +714,7 @@ Execute a barrier.
713
714
void __kmpc_barrier (ident_t *loc, kmp_int32 global_tid) {
714
715
KMP_COUNT_BLOCK (OMP_BARRIER);
715
716
KC_TRACE (10 , (" __kmpc_barrier: called T#%d\n " , global_tid));
717
+ __kmp_assert_valid_gtid (global_tid);
716
718
717
719
if (!TCR_4 (__kmp_init_parallel))
718
720
__kmp_parallel_initialize ();
@@ -762,6 +764,7 @@ kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) {
762
764
int status = 0 ;
763
765
764
766
KC_TRACE (10 , (" __kmpc_master: called T#%d\n " , global_tid));
767
+ __kmp_assert_valid_gtid (global_tid);
765
768
766
769
if (!TCR_4 (__kmp_init_parallel))
767
770
__kmp_parallel_initialize ();
@@ -816,7 +819,7 @@ thread that executes the <tt>master</tt> region.
816
819
*/
817
820
void __kmpc_end_master (ident_t *loc, kmp_int32 global_tid) {
818
821
KC_TRACE (10 , (" __kmpc_end_master: called T#%d\n " , global_tid));
819
-
822
+ __kmp_assert_valid_gtid (global_tid);
820
823
KMP_DEBUG_ASSERT (KMP_MASTER_GTID (global_tid));
821
824
KMP_POP_PARTITIONED_TIMER ();
822
825
@@ -833,9 +836,6 @@ void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) {
833
836
#endif
834
837
835
838
if (__kmp_env_consistency_check) {
836
- if (global_tid < 0 )
837
- KMP_WARNING (ThreadIdentInvalid);
838
-
839
839
if (KMP_MASTER_GTID (global_tid))
840
840
__kmp_pop_sync (global_tid, ct_master, loc);
841
841
}
@@ -854,6 +854,7 @@ void __kmpc_ordered(ident_t *loc, kmp_int32 gtid) {
854
854
KMP_DEBUG_ASSERT (__kmp_init_serial);
855
855
856
856
KC_TRACE (10 , (" __kmpc_ordered: called T#%d\n " , gtid));
857
+ __kmp_assert_valid_gtid (gtid);
857
858
858
859
if (!TCR_4 (__kmp_init_parallel))
859
860
__kmp_parallel_initialize ();
@@ -925,6 +926,7 @@ void __kmpc_end_ordered(ident_t *loc, kmp_int32 gtid) {
925
926
kmp_info_t *th;
926
927
927
928
KC_TRACE (10 , (" __kmpc_end_ordered: called T#%d\n " , gtid));
929
+ __kmp_assert_valid_gtid (gtid);
928
930
929
931
#if USE_ITT_BUILD
930
932
__kmp_itt_ordered_end (gtid);
@@ -1147,7 +1149,7 @@ static kmp_user_lock_p __kmp_get_critical_section_ptr(kmp_critical_name *crit,
1147
1149
/* !
1148
1150
@ingroup WORK_SHARING
1149
1151
@param loc source location information.
1150
- @param global_tid global thread number .
1152
+ @param global_tid global thread number.
1151
1153
@param crit identity of the critical section. This could be a pointer to a lock
1152
1154
associated with the critical section, or some other suitably unique value.
1153
1155
@@ -1170,6 +1172,7 @@ void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1170
1172
kmp_user_lock_p lck;
1171
1173
1172
1174
KC_TRACE (10 , (" __kmpc_critical: called T#%d\n " , global_tid));
1175
+ __kmp_assert_valid_gtid (global_tid);
1173
1176
1174
1177
// TODO: add THR_OVHD_STATE
1175
1178
@@ -1392,6 +1395,7 @@ void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
1392
1395
#endif
1393
1396
1394
1397
KC_TRACE (10 , (" __kmpc_critical: called T#%d\n " , global_tid));
1398
+ __kmp_assert_valid_gtid (global_tid);
1395
1399
1396
1400
kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
1397
1401
// Check if it is initialized.
@@ -1607,8 +1611,8 @@ this function.
1607
1611
*/
1608
1612
kmp_int32 __kmpc_barrier_master (ident_t *loc, kmp_int32 global_tid) {
1609
1613
int status;
1610
-
1611
1614
KC_TRACE (10 , (" __kmpc_barrier_master: called T#%d\n " , global_tid));
1615
+ __kmp_assert_valid_gtid (global_tid);
1612
1616
1613
1617
if (!TCR_4 (__kmp_init_parallel))
1614
1618
__kmp_parallel_initialize ();
@@ -1651,7 +1655,7 @@ still be waiting at the barrier and this call releases them.
1651
1655
*/
1652
1656
void __kmpc_end_barrier_master (ident_t *loc, kmp_int32 global_tid) {
1653
1657
KC_TRACE (10 , (" __kmpc_end_barrier_master: called T#%d\n " , global_tid));
1654
-
1658
+ __kmp_assert_valid_gtid (global_tid);
1655
1659
__kmp_end_split_barrier (bs_plain_barrier, global_tid);
1656
1660
}
1657
1661
@@ -1667,8 +1671,8 @@ There is no equivalent "end" function, since the
1667
1671
*/
1668
1672
kmp_int32 __kmpc_barrier_master_nowait (ident_t *loc, kmp_int32 global_tid) {
1669
1673
kmp_int32 ret;
1670
-
1671
1674
KC_TRACE (10 , (" __kmpc_barrier_master_nowait: called T#%d\n " , global_tid));
1675
+ __kmp_assert_valid_gtid (global_tid);
1672
1676
1673
1677
if (!TCR_4 (__kmp_init_parallel))
1674
1678
__kmp_parallel_initialize ();
@@ -1706,14 +1710,9 @@ kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid) {
1706
1710
if (__kmp_env_consistency_check) {
1707
1711
/* there's no __kmpc_end_master called; so the (stats) */
1708
1712
/* actions of __kmpc_end_master are done here */
1709
-
1710
- if (global_tid < 0 ) {
1711
- KMP_WARNING (ThreadIdentInvalid);
1712
- }
1713
1713
if (ret) {
1714
1714
/* only one thread should do the pop since only */
1715
1715
/* one did the push (see __kmpc_master()) */
1716
-
1717
1716
__kmp_pop_sync (global_tid, ct_master, loc);
1718
1717
}
1719
1718
}
@@ -1734,6 +1733,7 @@ should introduce an explicit barrier if it is required.
1734
1733
*/
1735
1734
1736
1735
kmp_int32 __kmpc_single (ident_t *loc, kmp_int32 global_tid) {
1736
+ __kmp_assert_valid_gtid (global_tid);
1737
1737
kmp_int32 rc = __kmp_enter_single (global_tid, loc, TRUE );
1738
1738
1739
1739
if (rc) {
@@ -1786,6 +1786,7 @@ only be called by the thread that executed the block of code protected
1786
1786
by the `single` construct.
1787
1787
*/
1788
1788
void __kmpc_end_single (ident_t *loc, kmp_int32 global_tid) {
1789
+ __kmp_assert_valid_gtid (global_tid);
1789
1790
__kmp_exit_single (global_tid);
1790
1791
KMP_POP_PARTITIONED_TIMER ();
1791
1792
@@ -2065,8 +2066,8 @@ void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size,
2065
2066
void *cpy_data, void (*cpy_func)(void *, void *),
2066
2067
kmp_int32 didit) {
2067
2068
void **data_ptr;
2068
-
2069
2069
KC_TRACE (10 , (" __kmpc_copyprivate: called T#%d\n " , gtid));
2070
+ __kmp_assert_valid_gtid (gtid);
2070
2071
2071
2072
KMP_MB ();
2072
2073
@@ -3382,6 +3383,7 @@ __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
3382
3383
kmp_team_t *team;
3383
3384
int teams_swapped = 0 , task_state;
3384
3385
KA_TRACE (10 , (" __kmpc_reduce_nowait() enter: called T#%d\n " , global_tid));
3386
+ __kmp_assert_valid_gtid (global_tid);
3385
3387
3386
3388
// why do we need this initialization here at all?
3387
3389
// Reduction clause can not be used as a stand-alone directive.
@@ -3535,6 +3537,7 @@ void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
3535
3537
PACKED_REDUCTION_METHOD_T packed_reduction_method;
3536
3538
3537
3539
KA_TRACE (10 , (" __kmpc_end_reduce_nowait() enter: called T#%d\n " , global_tid));
3540
+ __kmp_assert_valid_gtid (global_tid);
3538
3541
3539
3542
packed_reduction_method = __KMP_GET_REDUCTION_METHOD (global_tid);
3540
3543
@@ -3609,6 +3612,7 @@ kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
3609
3612
int teams_swapped = 0 , task_state;
3610
3613
3611
3614
KA_TRACE (10 , (" __kmpc_reduce() enter: called T#%d\n " , global_tid));
3615
+ __kmp_assert_valid_gtid (global_tid);
3612
3616
3613
3617
// why do we need this initialization here at all?
3614
3618
// Reduction clause can not be a stand-alone directive.
@@ -3727,6 +3731,7 @@ void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
3727
3731
int teams_swapped = 0 , task_state;
3728
3732
3729
3733
KA_TRACE (10 , (" __kmpc_end_reduce() enter: called T#%d\n " , global_tid));
3734
+ __kmp_assert_valid_gtid (global_tid);
3730
3735
3731
3736
th = __kmp_thread_from_gtid (global_tid);
3732
3737
teams_swapped = __kmp_swap_teams_for_teams_reduction (th, &team, &task_state);
@@ -3883,6 +3888,7 @@ e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2.
3883
3888
*/
3884
3889
void __kmpc_doacross_init (ident_t *loc, int gtid, int num_dims,
3885
3890
const struct kmp_dim *dims) {
3891
+ __kmp_assert_valid_gtid (gtid);
3886
3892
int j, idx;
3887
3893
kmp_int64 last, trace_count;
3888
3894
kmp_info_t *th = __kmp_threads[gtid];
@@ -4002,6 +4008,7 @@ void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims,
4002
4008
}
4003
4009
4004
4010
void __kmpc_doacross_wait (ident_t *loc, int gtid, const kmp_int64 *vec) {
4011
+ __kmp_assert_valid_gtid (gtid);
4005
4012
kmp_int32 shft, num_dims, i;
4006
4013
kmp_uint32 flag;
4007
4014
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
@@ -4112,6 +4119,7 @@ void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) {
4112
4119
}
4113
4120
4114
4121
void __kmpc_doacross_post (ident_t *loc, int gtid, const kmp_int64 *vec) {
4122
+ __kmp_assert_valid_gtid (gtid);
4115
4123
kmp_int32 shft, num_dims, i;
4116
4124
kmp_uint32 flag;
4117
4125
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
@@ -4183,6 +4191,7 @@ void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) {
4183
4191
}
4184
4192
4185
4193
void __kmpc_doacross_fini (ident_t *loc, int gtid) {
4194
+ __kmp_assert_valid_gtid (gtid);
4186
4195
kmp_int32 num_done;
4187
4196
kmp_info_t *th = __kmp_threads[gtid];
4188
4197
kmp_team_t *team = th->th .th_team ;
0 commit comments