Skip to content

Commit 170cb56

Browse files
authored
Merge pull request intel#943 from bb-sycl/xmain
Auto pulldown and update tc files for xmain branch on 20220527
2 parents c4dda9c + ff0ae05 commit 170cb56

File tree

119 files changed

+1484
-277
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

119 files changed

+1484
-277
lines changed

SYCL/Assert/assert_in_simultaneously_multiple_tus.cpp

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
11
// FIXME unsupported on CUDA and HIP until fallback libdevice becomes available
2-
// UNSUPPORTED: cuda || hip
3-
// clang-format off
4-
// Failed on Linux on unrelated change (FileCheck error: '.../assert_in_simultaneously_multiple_tus.cpp.tmp.txt' is empty)
5-
// clang-format on
6-
// REQUIRES: TEMPORARILY_DISABLED
2+
// FIXME flaky output on Level Zero
3+
// UNSUPPORTED: cuda || hip || level_zero
74
// RUN: %clangxx -DSYCL_FALLBACK_ASSERT=1 -fsycl -fsycl-targets=%sycl_triple -I %S/Inputs %s %S/Inputs/kernels_in_file2.cpp -o %t.out %threads_lib
85
// RUN: %CPU_RUN_PLACEHOLDER %t.out &> %t.txt || true
96
// RUN: %CPU_RUN_PLACEHOLDER FileCheck %s --input-file %t.txt

SYCL/AtomicRef/add.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-device-code-split=per_kernel -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-device-code-split=per_kernel -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -287,10 +287,6 @@ template <access::address_space space, typename T, typename Difference = T,
287287
void add_test_scopes(queue q, size_t N) {
288288
std::vector<memory_scope> scopes =
289289
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
290-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
291-
scopes.end()) {
292-
add_test<space, T, Difference, order, memory_scope::system>(q, N);
293-
}
294290
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
295291
scopes.end()) {
296292
add_test<space, T, Difference, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/add_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_generic_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_generic_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -136,10 +136,6 @@ template <access::address_space space, typename T,
136136
void and_test_scopes(queue q) {
137137
std::vector<memory_scope> scopes =
138138
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
139-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
140-
scopes.end()) {
141-
and_test<space, T, order, memory_scope::system>(q);
142-
}
143139
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
144140
scopes.end()) {
145141
and_test<space, T, order, memory_scope::work_group>(q);

SYCL/AtomicRef/and_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -155,10 +155,6 @@ template <access::address_space space, typename T,
155155
void compare_exchange_test_scopes(queue q, size_t N) {
156156
std::vector<memory_scope> scopes =
157157
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
158-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
159-
scopes.end()) {
160-
compare_exchange_test<space, T, order, memory_scope::system>(q, N);
161-
}
162158
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
163159
scopes.end()) {
164160
compare_exchange_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/compare_exchange_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -138,10 +138,6 @@ template <access::address_space space, typename T,
138138
void exchange_test_scopes(queue q, size_t N) {
139139
std::vector<memory_scope> scopes =
140140
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
141-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
142-
scopes.end()) {
143-
exchange_test<space, T, order, memory_scope::system>(q, N);
144-
}
145141
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
146142
scopes.end()) {
147143
exchange_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/exchange_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -120,10 +120,6 @@ template <access::address_space space, typename T,
120120
void load_test_scopes(queue q, size_t N) {
121121
std::vector<memory_scope> scopes =
122122
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
123-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
124-
scopes.end()) {
125-
load_test<space, T, order, memory_scope::system>(q, N);
126-
}
127123
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
128124
scopes.end()) {
129125
load_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/load_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -147,10 +147,6 @@ template <access::address_space space, typename T,
147147
void max_test_scopes(queue q, size_t N) {
148148
std::vector<memory_scope> scopes =
149149
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
150-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
151-
scopes.end()) {
152-
max_test<space, T, order, memory_scope::system>(q, N);
153-
}
154150
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
155151
scopes.end()) {
156152
max_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/max_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_generic_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_generic_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -145,10 +145,6 @@ template <access::address_space space, typename T,
145145
void min_test_scopes(queue q, size_t N) {
146146
std::vector<memory_scope> scopes =
147147
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
148-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
149-
scopes.end()) {
150-
min_test<space, T, order, memory_scope::system>(q, N);
151-
}
152148
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
153149
scopes.end()) {
154150
min_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/min_generic.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_generic_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_generic_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_generic_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_local.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_local_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/min_native_fp.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/or.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

0 commit comments

Comments
 (0)