Skip to content
This repository was archived by the owner on Mar 28, 2023. It is now read-only.

Commit a93bc8c

Browse files
committed
Fixing merge conflict
2 parents b9e3bd9 + cc4bf25 commit a93bc8c

File tree

168 files changed

+5756
-1163
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

168 files changed

+5756
-1163
lines changed

.github/CODEOWNERS

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,8 @@
1-
* @vladimirlaz @romanovvlad @bader
1+
* @pvchupin
22

33
# Use runtime team as the umbrella for most of the tests
44
/SYCL/ @intel/llvm-reviewers-runtime
55

6-
76
# SYCL sub-directory matchers are grouped by code owner first, followed by
87
# alphabetical order within the group. Please, keep this ordering.
98

.github/workflows/clang-format.yml

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ jobs:
2020
- name: Run clang-format for the patch
2121
shell: bash {0}
2222
run: |
23+
git config --global --add safe.directory /__w/llvm-test-suite/llvm-test-suite
2324
git clang-format ${GITHUB_SHA}^1
2425
git diff > ./clang-format.patch
2526

SYCL/AtomicRef/add.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-device-code-split=per_kernel -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-device-code-split=per_kernel -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add.h

-4
Original file line numberDiff line numberDiff line change
@@ -287,10 +287,6 @@ template <access::address_space space, typename T, typename Difference = T,
287287
void add_test_scopes(queue q, size_t N) {
288288
std::vector<memory_scope> scopes =
289289
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
290-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
291-
scopes.end()) {
292-
add_test<space, T, Difference, order, memory_scope::system>(q, N);
293-
}
294290
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
295291
scopes.end()) {
296292
add_test<space, T, Difference, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/add_generic.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "add.h"
1111

SYCL/AtomicRef/add_generic_local.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet.
7+
// HIP backend has no support for the generic address space yet.
88
// Host does not support barrier.
9-
// XFAIL: cuda || hip || host
9+
// XFAIL: hip || host
1010

1111
#define TEST_GENERIC_IN_LOCAL 1
1212

SYCL/AtomicRef/add_generic_local_native_fp.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet.
7+
// HIP backend has no support for the generic address space yet.
88
// Host does not support barrier. HIP does not support native floating point
99
// atomics
10-
// XFAIL: cuda, hip, host
10+
// XFAIL: hip, host
1111

1212
#define SYCL_USE_NATIVE_FP_ATOMICS
1313
#define FP_TESTS_ONLY

SYCL/AtomicRef/add_generic_native_fp.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet.
7+
// HIP backend has had no support for the generic address space yet.
88
// HIP does not support native floating point atomics
9-
// XFAIL: cuda, hip
9+
// XFAIL: hip
1010

1111
#define SYCL_USE_NATIVE_FP_ATOMICS
1212
#define FP_TESTS_ONLY

SYCL/AtomicRef/add_local.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// Barrier is not supported on host. HIP and ACC do not support floating
8-
// point atomics.
9-
// XFAIL: host, hip, acc
7+
// Barrier is not supported on host. HIP does not support floating point
8+
// atomics.
9+
// XFAIL: host, hip
1010

1111
#include "add.h"
1212

SYCL/AtomicRef/add_local_native_fp.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/add_native_fp.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/and.h

-4
Original file line numberDiff line numberDiff line change
@@ -136,10 +136,6 @@ template <access::address_space space, typename T,
136136
void and_test_scopes(queue q) {
137137
std::vector<memory_scope> scopes =
138138
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
139-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
140-
scopes.end()) {
141-
and_test<space, T, order, memory_scope::system>(q);
142-
}
143139
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
144140
scopes.end()) {
145141
and_test<space, T, order, memory_scope::work_group>(q);

SYCL/AtomicRef/and_generic.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has had no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "and.h"
1111

SYCL/AtomicRef/and_generic_local.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet.
7+
// HIP backend has no support for the generic address space yet.
88
// Host does not support barrier.
9-
// XFAIL: cuda || hip || host
9+
// XFAIL: hip || host
1010

1111
#define TEST_GENERIC_IN_LOCAL 1
1212

SYCL/AtomicRef/and_local.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/assignment_atomic64_generic.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
// RUN: %GPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA backend has had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "assignment.h"
1111
#include <iostream>

SYCL/AtomicRef/assignment_generic.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,8 @@
44
// RUN: %GPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA backend has had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "assignment.h"
1111
#include <iostream>

SYCL/AtomicRef/compare_exchange.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/compare_exchange.h

-4
Original file line numberDiff line numberDiff line change
@@ -155,10 +155,6 @@ template <access::address_space space, typename T,
155155
void compare_exchange_test_scopes(queue q, size_t N) {
156156
std::vector<memory_scope> scopes =
157157
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
158-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
159-
scopes.end()) {
160-
compare_exchange_test<space, T, order, memory_scope::system>(q, N);
161-
}
162158
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
163159
scopes.end()) {
164160
compare_exchange_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/compare_exchange_generic.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "compare_exchange.h"
1111

SYCL/AtomicRef/compare_exchange_generic_local.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet.
7+
// HIP backend has no support for the generic address space yet.
88
// Host does not support barrier.
9-
// XFAIL: cuda || hip || host
9+
// XFAIL: hip || host
1010

1111
#define TEST_GENERIC_IN_LOCAL 1
1212

SYCL/AtomicRef/compare_exchange_local.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/exchange.h

-4
Original file line numberDiff line numberDiff line change
@@ -138,10 +138,6 @@ template <access::address_space space, typename T,
138138
void exchange_test_scopes(queue q, size_t N) {
139139
std::vector<memory_scope> scopes =
140140
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
141-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
142-
scopes.end()) {
143-
exchange_test<space, T, order, memory_scope::system>(q, N);
144-
}
145141
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
146142
scopes.end()) {
147143
exchange_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/exchange_generic.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "exchange.h"
1111

SYCL/AtomicRef/exchange_generic_local.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet.
7+
// HIP backend has no support for the generic address space yet.
88
// Host does not support barrier.
9-
// XFAIL: cuda || hip || host
9+
// XFAIL: hip || host
1010

1111
#define TEST_GENERIC_IN_LOCAL 1
1212

SYCL/AtomicRef/exchange_local.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/load.h

-4
Original file line numberDiff line numberDiff line change
@@ -120,10 +120,6 @@ template <access::address_space space, typename T,
120120
void load_test_scopes(queue q, size_t N) {
121121
std::vector<memory_scope> scopes =
122122
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
123-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
124-
scopes.end()) {
125-
load_test<space, T, order, memory_scope::system>(q, N);
126-
}
127123
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
128124
scopes.end()) {
129125
load_test<space, T, order, memory_scope::work_group>(q, N);

SYCL/AtomicRef/load_generic.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA and HIP backends have had no support for the generic address space yet
8-
// XFAIL: cuda || hip
7+
// HIP backend has no support for the generic address space yet
8+
// XFAIL: hip
99

1010
#include "load.h"
1111

SYCL/AtomicRef/load_generic_local.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,12 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out
55
// RUN: %ACC_RUN_PLACEHOLDER %t.out
66

7-
// CUDA backend has had no support for the generic address space yet. Barrier is
7+
// HIP backend has no support for the generic address space yet. Barrier is
88
// not supported on host.
9-
// XFAIL: cuda, hip, host
9+
// XFAIL: hip, host
1010

1111
#define TEST_GENERIC_IN_LOCAL 1
1212

SYCL/AtomicRef/load_local.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
1+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
22
// RUN: %HOST_RUN_PLACEHOLDER %t.out
33
// RUN: %GPU_RUN_PLACEHOLDER %t.out
44
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status
22
// UNSUPPORTED: hip
33

4-
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70
4+
// RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out
55
// RUN: %HOST_RUN_PLACEHOLDER %t.out
66
// RUN: %GPU_RUN_PLACEHOLDER %t.out
77
// RUN: %CPU_RUN_PLACEHOLDER %t.out

SYCL/AtomicRef/max.h

-4
Original file line numberDiff line numberDiff line change
@@ -147,10 +147,6 @@ template <access::address_space space, typename T,
147147
void max_test_scopes(queue q, size_t N) {
148148
std::vector<memory_scope> scopes =
149149
q.get_device().get_info<info::device::atomic_memory_scope_capabilities>();
150-
if (std::find(scopes.begin(), scopes.end(), memory_scope::system) !=
151-
scopes.end()) {
152-
max_test<space, T, order, memory_scope::system>(q, N);
153-
}
154150
if (std::find(scopes.begin(), scopes.end(), memory_scope::work_group) !=
155151
scopes.end()) {
156152
max_test<space, T, order, memory_scope::work_group>(q, N);

0 commit comments

Comments
 (0)