@@ -29,26 +29,26 @@ _Z19__spirv_AtomicStorePU3AS3fN5__spv5Scope4FlagENS1_19MemorySemanticsMask4FlagE
29
29
#define FDECL (TYPE , PREFIX , AS , BYTE_SIZE , MEM_ORDER ) \
30
30
TYPE __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_##MEM_ORDER(volatile AS const TYPE *, TYPE);
31
31
32
- #define IMPL (TYPE , TYPE_MANGLED , AS , AS_MANGLED , PREFIX , BYTE_SIZE ) \
33
- FDECL(TYPE, PREFIX, AS, BYTE_SIZE, unordered) \
34
- FDECL(TYPE, PREFIX, AS, BYTE_SIZE, release) \
35
- FDECL(TYPE, PREFIX, AS, BYTE_SIZE, seq_cst) \
36
- _CLC_DEF void \
37
- _Z19__spirv_AtomicStorePU3 ##AS_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagENS1_19MemorySemanticsMask4FlagE ##TYPE_MANGLED( \
38
- volatile AS TYPE *p, enum Scope scope, \
39
- enum MemorySemanticsMask semantics, TYPE val) { \
40
- if (semantics == Release) { \
41
- __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_release(p, val); \
42
- } else if (semantics == SequentiallyConsistent) { \
43
- __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_seq_cst(p, val); \
44
- } else { \
45
- __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_unordered(p, val); \
46
- } \
32
+ #define IMPL (TYPE , TYPE_MANGLED , AS , AS_MANGLED , SUB , PREFIX , BYTE_SIZE ) \
33
+ FDECL(TYPE, PREFIX, AS, BYTE_SIZE, unordered) \
34
+ FDECL(TYPE, PREFIX, AS, BYTE_SIZE, release) \
35
+ FDECL(TYPE, PREFIX, AS, BYTE_SIZE, seq_cst) \
36
+ _CLC_DEF void \
37
+ _Z19__spirv_AtomicStoreP ##AS_MANGLED##TYPE_MANGLED##N5__spv5Scope4FlagENS##SUB##_19MemorySemanticsMask4FlagE ##TYPE_MANGLED( \
38
+ volatile AS TYPE *p, enum Scope scope, \
39
+ enum MemorySemanticsMask semantics, TYPE val) { \
40
+ if (semantics == Release) { \
41
+ __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_release(p, val); \
42
+ } else if (semantics == SequentiallyConsistent) { \
43
+ __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_seq_cst(p, val); \
44
+ } else { \
45
+ __clc__atomic_##PREFIX##store_##AS##_##BYTE_SIZE##_unordered(p, val); \
46
+ } \
47
47
}
48
48
49
- #define IMPL_AS (TYPE , TYPE_MANGLED , PREFIX , BYTE_SIZE ) \
50
- IMPL(TYPE, TYPE_MANGLED, global, AS1, PREFIX, BYTE_SIZE) \
51
- IMPL(TYPE, TYPE_MANGLED, local, AS3 , PREFIX, BYTE_SIZE)
49
+ #define IMPL_AS (TYPE , TYPE_MANGLED , PREFIX , BYTE_SIZE ) \
50
+ IMPL(TYPE, TYPE_MANGLED, global, U3AS1, 1, PREFIX, BYTE_SIZE) \
51
+ IMPL(TYPE, TYPE_MANGLED, local, U3AS3, 1 , PREFIX, BYTE_SIZE)
52
52
53
53
IMPL_AS (int , i , , 4 )
54
54
IMPL_AS (unsigned int , j , u , 4 )
@@ -58,6 +58,21 @@ IMPL_AS(long, l, , 8)
58
58
IMPL_AS (unsigned long, m , u , 8 )
59
59
#endif
60
60
61
+ #if _CLC_GENERIC_AS_SUPPORTED
62
+
63
+ #define IMPL_GENERIC (TYPE , TYPE_MANGLED , PREFIX , BYTE_SIZE ) \
64
+ IMPL(TYPE, TYPE_MANGLED, , , 0, PREFIX, BYTE_SIZE)
65
+
66
+ IMPL_GENERIC (int , i , , 4 )
67
+ IMPL_GENERIC (unsigned int , j , u , 4 )
68
+
69
+ #ifdef cl_khr_int64_base_atomics
70
+ IMPL_GENERIC (long , l , , 8 )
71
+ IMPL_GENERIC (unsigned long, m , u , 8 )
72
+ #endif
73
+
74
+ #endif //_CLC_GENERIC_AS_SUPPORTED
75
+
61
76
#undef FDECL
62
77
#undef IMPL_AS
63
78
#undef IMPL
0 commit comments