From 025cf7ecb4373e248841227d3724ad4f94062b8e Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 25 Jan 2022 14:06:25 +0000 Subject: [PATCH 01/45] Added bfloat16 support for cuda backend. Added bfloat16 in oneapi experimental namespace. Signed-off-by: jack.kirk --- .../sycl/ext/oneapi/experimental/bfloat16.hpp | 161 ++++++++++++++++++ 1 file changed, 161 insertions(+) create mode 100644 sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp diff --git a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp new file mode 100644 index 0000000000000..329094634d9ad --- /dev/null +++ b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp @@ -0,0 +1,161 @@ +//==--------- bfloat16.hpp ------- SYCL bfloat16 conversion ----------------==// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include +#include + +__SYCL_INLINE_NAMESPACE(cl) { +namespace sycl { +namespace ext { +namespace oneapi { +namespace experimental { + +class [[sycl_detail::uses_aspects(ext_intel_bf16_conversion)]] bfloat16 { + using storage_t = uint16_t; + storage_t value; + +public: + bfloat16() = default; + bfloat16(const bfloat16 &) = default; + ~bfloat16() = default; + + // Explicit conversion functions + static storage_t from_float(const float &a) { +#if defined(__SYCL_DEVICE_ONLY__) +#if defined(__NVPTX__) + return __nvvm_f2bf16_rn(a); +#else + return __spirv_ConvertFToBF16INTEL(a); +#endif +#else + throw exception{errc::feature_not_supported, + "Bfloat16 conversion is not supported on host device"}; +#endif + } + static float to_float(const storage_t &a) { +#if defined(__SYCL_DEVICE_ONLY__) +#if defined(__NVPTX__) + unsigned int y = a; + y = y << 16; + float *res = reinterpret_cast(&y); + return *res; +#else + return __spirv_ConvertBF16ToFINTEL(a); +#endif +#else + throw exception{errc::feature_not_supported, + "Bfloat16 conversion is not supported on host device"}; +#endif + } + + static bfloat16 from_bits(const storage_t &a) { + bfloat16 res; + res.value = a; + return res; + } + + // Implicit conversion from float to bfloat16 + bfloat16(const float &a) { value = from_float(a); } + + bfloat16 &operator=(const float &rhs) { + value = from_float(rhs); + return *this; + } + + // Implicit conversion from bfloat16 to float + operator float() const { return to_float(value); } + operator sycl::half() const { return to_float(value); } + + // Get raw bits representation of bfloat16 + storage_t raw() const { return value; } + + // Logical operators (!,||,&&) are covered if we can cast to bool + explicit operator bool() { return to_float(value) != 0.0f; } + + // Unary minus operator overloading + friend bfloat16 operator-(bfloat16 &lhs) { + return bfloat16{-to_float(lhs.value)}; + } + +// Increment and decrement operators overloading +#define OP(op) \ + friend bfloat16 &operator op(bfloat16 &lhs) { \ + float f = to_float(lhs.value); \ + lhs.value = from_float(op f); \ + return lhs; \ + } \ + friend bfloat16 operator op(bfloat16 &lhs, int) { \ + bfloat16 old = lhs; \ + operator op(lhs); \ + return old; \ + } + OP(++) + OP(--) +#undef OP + + // Assignment operators overloading +#define OP(op) \ + friend bfloat16 &operator op(bfloat16 &lhs, const bfloat16 &rhs) { \ + float f = static_cast(lhs); \ + f op static_cast(rhs); \ + return lhs = f; \ + } \ + template \ + friend bfloat16 &operator op(bfloat16 &lhs, const T &rhs) { \ + float f = static_cast(lhs); \ + f op static_cast(rhs); \ + return lhs = f; \ + } \ + template friend T &operator op(T &lhs, const bfloat16 &rhs) { \ + float f = static_cast(lhs); \ + f op static_cast(rhs); \ + return lhs = f; \ + } + OP(+=) + OP(-=) + OP(*=) + OP(/=) +#undef OP + +// Binary operators overloading +#define OP(type, op) \ + friend type operator op(const bfloat16 &lhs, const bfloat16 &rhs) { \ + return type{static_cast(lhs) op static_cast(rhs)}; \ + } \ + template \ + friend type operator op(const bfloat16 &lhs, const T &rhs) { \ + return type{static_cast(lhs) op static_cast(rhs)}; \ + } \ + template \ + friend type operator op(const T &lhs, const bfloat16 &rhs) { \ + return type{static_cast(lhs) op static_cast(rhs)}; \ + } + OP(bfloat16, +) + OP(bfloat16, -) + OP(bfloat16, *) + OP(bfloat16, /) + OP(bool, ==) + OP(bool, !=) + OP(bool, <) + OP(bool, >) + OP(bool, <=) + OP(bool, >=) +#undef OP + + // Bitwise(|,&,~,^), modulo(%) and shift(<<,>>) operations are not supported + // for floating-point types. +}; + +} // namespace experimental +} // namespace intel +} // namespace ext + +} // namespace sycl +} // __SYCL_INLINE_NAMESPACE(cl) From 66b4e3344bc7a9e514d857d4931ba26ed192b3f9 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 25 Jan 2022 14:13:58 +0000 Subject: [PATCH 02/45] deleted intel namespace bfloat16. --- .../sycl/ext/intel/experimental/bfloat16.hpp | 150 ------------------ 1 file changed, 150 deletions(-) delete mode 100644 sycl/include/sycl/ext/intel/experimental/bfloat16.hpp diff --git a/sycl/include/sycl/ext/intel/experimental/bfloat16.hpp b/sycl/include/sycl/ext/intel/experimental/bfloat16.hpp deleted file mode 100644 index 5a51f3746e225..0000000000000 --- a/sycl/include/sycl/ext/intel/experimental/bfloat16.hpp +++ /dev/null @@ -1,150 +0,0 @@ -//==--------- bfloat16.hpp ------- SYCL bfloat16 conversion ----------------==// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#pragma once - -#include -#include - -__SYCL_INLINE_NAMESPACE(cl) { -namespace sycl { -namespace ext { -namespace intel { -namespace experimental { - -class [[sycl_detail::uses_aspects(ext_intel_bf16_conversion)]] bfloat16 { - using storage_t = uint16_t; - storage_t value; - -public: - bfloat16() = default; - bfloat16(const bfloat16 &) = default; - ~bfloat16() = default; - - // Explicit conversion functions - static storage_t from_float(const float &a) { -#if defined(__SYCL_DEVICE_ONLY__) - return __spirv_ConvertFToBF16INTEL(a); -#else - throw exception{errc::feature_not_supported, - "Bfloat16 conversion is not supported on host device"}; -#endif - } - static float to_float(const storage_t &a) { -#if defined(__SYCL_DEVICE_ONLY__) - return __spirv_ConvertBF16ToFINTEL(a); -#else - throw exception{errc::feature_not_supported, - "Bfloat16 conversion is not supported on host device"}; -#endif - } - - static bfloat16 from_bits(const storage_t &a) { - bfloat16 res; - res.value = a; - return res; - } - - // Implicit conversion from float to bfloat16 - bfloat16(const float &a) { value = from_float(a); } - - bfloat16 &operator=(const float &rhs) { - value = from_float(rhs); - return *this; - } - - // Implicit conversion from bfloat16 to float - operator float() const { return to_float(value); } - operator sycl::half() const { return to_float(value); } - - // Get raw bits representation of bfloat16 - storage_t raw() const { return value; } - - // Logical operators (!,||,&&) are covered if we can cast to bool - explicit operator bool() { return to_float(value) != 0.0f; } - - // Unary minus operator overloading - friend bfloat16 operator-(bfloat16 &lhs) { - return bfloat16{-to_float(lhs.value)}; - } - -// Increment and decrement operators overloading -#define OP(op) \ - friend bfloat16 &operator op(bfloat16 &lhs) { \ - float f = to_float(lhs.value); \ - lhs.value = from_float(op f); \ - return lhs; \ - } \ - friend bfloat16 operator op(bfloat16 &lhs, int) { \ - bfloat16 old = lhs; \ - operator op(lhs); \ - return old; \ - } - OP(++) - OP(--) -#undef OP - - // Assignment operators overloading -#define OP(op) \ - friend bfloat16 &operator op(bfloat16 &lhs, const bfloat16 &rhs) { \ - float f = static_cast(lhs); \ - f op static_cast(rhs); \ - return lhs = f; \ - } \ - template \ - friend bfloat16 &operator op(bfloat16 &lhs, const T &rhs) { \ - float f = static_cast(lhs); \ - f op static_cast(rhs); \ - return lhs = f; \ - } \ - template friend T &operator op(T &lhs, const bfloat16 &rhs) { \ - float f = static_cast(lhs); \ - f op static_cast(rhs); \ - return lhs = f; \ - } - OP(+=) - OP(-=) - OP(*=) - OP(/=) -#undef OP - -// Binary operators overloading -#define OP(type, op) \ - friend type operator op(const bfloat16 &lhs, const bfloat16 &rhs) { \ - return type{static_cast(lhs) op static_cast(rhs)}; \ - } \ - template \ - friend type operator op(const bfloat16 &lhs, const T &rhs) { \ - return type{static_cast(lhs) op static_cast(rhs)}; \ - } \ - template \ - friend type operator op(const T &lhs, const bfloat16 &rhs) { \ - return type{static_cast(lhs) op static_cast(rhs)}; \ - } - OP(bfloat16, +) - OP(bfloat16, -) - OP(bfloat16, *) - OP(bfloat16, /) - OP(bool, ==) - OP(bool, !=) - OP(bool, <) - OP(bool, >) - OP(bool, <=) - OP(bool, >=) -#undef OP - - // Bitwise(|,&,~,^), modulo(%) and shift(<<,>>) operations are not supported - // for floating-point types. -}; - -} // namespace experimental -} // namespace intel -} // namespace ext - -} // namespace sycl -} // __SYCL_INLINE_NAMESPACE(cl) From 2d04406d0198b5321cf2aaa870d395e9f042755b Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 25 Jan 2022 14:29:32 +0000 Subject: [PATCH 03/45] Format. --- sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp index 329094634d9ad..ef1f01d5340ae 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp @@ -154,7 +154,7 @@ class [[sycl_detail::uses_aspects(ext_intel_bf16_conversion)]] bfloat16 { }; } // namespace experimental -} // namespace intel +} // namespace oneapi } // namespace ext } // namespace sycl From 9418f74ee1e1a35918f5bcf99a3d0d57f29dac90 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 25 Jan 2022 14:35:02 +0000 Subject: [PATCH 04/45] Changed extension macro name. --- sycl/include/CL/sycl/feature_test.hpp.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sycl/include/CL/sycl/feature_test.hpp.in b/sycl/include/CL/sycl/feature_test.hpp.in index e6053ebf4ff1c..9bd849ca27d8a 100644 --- a/sycl/include/CL/sycl/feature_test.hpp.in +++ b/sycl/include/CL/sycl/feature_test.hpp.in @@ -46,7 +46,7 @@ namespace sycl { #define SYCL_EXT_ONEAPI_USE_PINNED_HOST_MEMORY_PROPERTY 1 #define SYCL_EXT_ONEAPI_SRGB 1 #define SYCL_EXT_ONEAPI_SUB_GROUP 1 -#define SYCL_EXT_INTEL_BF16_CONVERSION 1 +#define SYCL_EXT_ONEAPI_BF16_CONVERSION 1 #define SYCL_EXT_INTEL_BITCAST 1 #define SYCL_EXT_INTEL_DATAFLOW_PIPES 1 #ifdef __clang__ From 4d99f3f97c06dc529d1d6b16df645b1af27fa8a6 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Thu, 17 Feb 2022 10:22:48 +0000 Subject: [PATCH 05/45] fixed test. --- sycl/test/extensions/bfloat16.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sycl/test/extensions/bfloat16.cpp b/sycl/test/extensions/bfloat16.cpp index 6be5459642d0c..dd87806942f8a 100644 --- a/sycl/test/extensions/bfloat16.cpp +++ b/sycl/test/extensions/bfloat16.cpp @@ -2,10 +2,10 @@ // UNSUPPORTED: cuda || hip_amd -#include +#include #include -using sycl::ext::intel::experimental::bfloat16; +using sycl::ext::oneapi::experimental::bfloat16; SYCL_EXTERNAL uint16_t some_bf16_intrinsic(uint16_t x, uint16_t y); SYCL_EXTERNAL void foo(long x, sycl::half y); From 3982001259745c617ad78d57dc67512e8d7ff6e9 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Fri, 4 Mar 2022 15:41:39 +0000 Subject: [PATCH 06/45] Used neg ptx7.0 builtin for unary minus --- clang/include/clang/Basic/BuiltinsNVPTX.def | 5 +++++ llvm/include/llvm/IR/IntrinsicsNVVM.td | 13 +++++++++++++ llvm/lib/Target/NVPTX/NVPTXIntrinsics.td | 9 +++++++++ .../sycl/ext/oneapi/experimental/bfloat16.hpp | 13 +++++++++++-- 4 files changed, 38 insertions(+), 2 deletions(-) diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.def b/clang/include/clang/Basic/BuiltinsNVPTX.def index 449e4d1256944..955dbbaae8f0d 100644 --- a/clang/include/clang/Basic/BuiltinsNVPTX.def +++ b/clang/include/clang/Basic/BuiltinsNVPTX.def @@ -182,6 +182,11 @@ BUILTIN(__nvvm_fabs_ftz_f, "ff", "") BUILTIN(__nvvm_fabs_f, "ff", "") BUILTIN(__nvvm_fabs_d, "dd", "") +// Neg + +TARGET_BUILTIN(__nvvm_neg_bf16, "ZUsZUs", "", AND(SM_80,PTX70)) +TARGET_BUILTIN(__nvvm_neg_bf16x2, "ZUiZUi", "", AND(SM_80,PTX70)) + // Round BUILTIN(__nvvm_round_ftz_f, "ff", "") diff --git a/llvm/include/llvm/IR/IntrinsicsNVVM.td b/llvm/include/llvm/IR/IntrinsicsNVVM.td index 33ba30d782ff3..b7b0813f05292 100644 --- a/llvm/include/llvm/IR/IntrinsicsNVVM.td +++ b/llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -740,6 +740,19 @@ let TargetPrefix = "nvvm" in { def int_nvvm_fabs_d : GCCBuiltin<"__nvvm_fabs_d">, DefaultAttrsIntrinsic<[llvm_double_ty], [llvm_double_ty], [IntrNoMem, IntrSpeculatable]>; +// +// Neg bf16, bf16x2 +// + + foreach unary = ["neg"] in { + def int_nvvm_ # unary # _bf16 : + GCCBuiltin, + DefaultAttrsIntrinsic<[llvm_i16_ty], [llvm_i16_ty], [IntrNoMem]>; + def int_nvvm_ # unary # _bf16x2 : + GCCBuiltin, + DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>; + } + // // Round // diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td index ec004c5923ece..af9e1270bc5f5 100644 --- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td +++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td @@ -719,6 +719,15 @@ def INT_NVVM_FABS_F : F_MATH_1<"abs.f32 \t$dst, $src0;", Float32Regs, def INT_NVVM_FABS_D : F_MATH_1<"abs.f64 \t$dst, $src0;", Float64Regs, Float64Regs, int_nvvm_fabs_d>; +// +// Neg bf16, bf16x2 +// + +def INT_NVVM_NEG_BF16 : F_MATH_1<"neg.bf16 \t$dst, $src0;", Int16Regs, + Int16Regs, int_nvvm_neg_bf16>; +def INT_NVVM_NEG_BF16X2 : F_MATH_1<"neg.bf16x2 \t$dst, $src0;", Int32Regs, + Int32Regs, int_nvvm_neg_bf16x2>; + // // Round // diff --git a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp index ef1f01d5340ae..3768c65aab6a3 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp @@ -42,7 +42,7 @@ class [[sycl_detail::uses_aspects(ext_intel_bf16_conversion)]] bfloat16 { static float to_float(const storage_t &a) { #if defined(__SYCL_DEVICE_ONLY__) #if defined(__NVPTX__) - unsigned int y = a; + uint32_t y = a; y = y << 16; float *res = reinterpret_cast(&y); return *res; @@ -81,7 +81,16 @@ class [[sycl_detail::uses_aspects(ext_intel_bf16_conversion)]] bfloat16 { // Unary minus operator overloading friend bfloat16 operator-(bfloat16 &lhs) { - return bfloat16{-to_float(lhs.value)}; +#if defined(__SYCL_DEVICE_ONLY__) +#if defined(__NVPTX__) + return from_bits(__nvvm_neg_bf16(lhs.value)); +#else + return bfloat16{-__spirv_ConvertBF16ToFINTEL(lhs.value)}; +#endif +#else + throw exception{errc::feature_not_supported, + "Bfloat16 unary minus is not supported on host device"}; +#endif } // Increment and decrement operators overloading From 8d2d11fecfbc1e9caebdc460c7a9cb19b4e18774 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Mon, 7 Mar 2022 16:47:32 +0000 Subject: [PATCH 07/45] Replaced SYCL_EXT_INTEL_BF16_CONVERSION.asciidoc with SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc --- .../SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc | 336 ++++++++++++++++++ 1 file changed, 336 insertions(+) create mode 100644 sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc diff --git a/sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc b/sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc new file mode 100644 index 0000000000000..bf0a799671ffa --- /dev/null +++ b/sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc @@ -0,0 +1,336 @@ += sycl_oneapi_bf16_conversion + +:source-highlighter: coderay +:coderay-linenums-mode: table + +// This section needs to be after the document title. +:doctype: book +:toc2: +:toc: left +:encoding: utf-8 +:lang: en + +:blank: pass:[ +] + +// Set the default source code type in this document to C++, +// for syntax highlighting purposes. This is needed because +// docbook uses c++ and html5 uses cpp. +:language: {basebackend@docbook:c++:cpp} + +// This is necessary for asciidoc, but not for asciidoctor +:cpp: C++ + +== Notice + +IMPORTANT: This specification is a draft. + +Copyright (c) 2021-2022 Intel Corporation. All rights reserved. + +NOTE: Khronos(R) is a registered trademark and SYCL(TM) and SPIR(TM) are +trademarks of The Khronos Group Inc. OpenCL(TM) is a trademark of Apple Inc. +used by permission by Khronos. + +== Dependencies + +This extension is written against the SYCL 2020 specification, Revision 4. + +== Status + +Draft + +This is a preview extension specification, intended to provide early access to +a feature for review and community feedback. When the feature matures, this +specification may be released as a formal extension. + +Because the interfaces defined by this specification are not final and are +subject to change they are not intended to be used by shipping software +products. + +== Version + +Revision: 4 + +== Introduction + +This extension adds functionality to convert value of single-precision +floating-point type(`float`) to `bfloat16` type and vice versa. The extension +doesn't add support for `bfloat16` type as such, instead it uses 16-bit integer +type(`uint16_t`) as a storage for `bfloat16` values. + +The purpose of conversion from float to bfloat16 is to reduce ammount of memory +required to store floating-point numbers. Computations are expected to be done with +32-bit floating-point values. + +This extension is an optional kernel feature as described in +https://www.khronos.org/registry/SYCL/specs/sycl-2020/html/sycl-2020.html#sec:optional-kernel-features[section 5.7] +of the SYCL 2020 spec. Therefore, attempting to submit a kernel using this +feature to a device that does not support it should cause a synchronous +`errc::kernel_not_supported` exception to be thrown from the kernel invocation +command (e.g. from `parallel_for`). + +== Feature test macro + +This extension provides a feature-test macro as described in the core SYCL +specification section 6.3.3 "Feature test macros". Therefore, an implementation +supporting this extension must predefine the macro +`SYCL_EXT_ONEAPI_BF16_CONVERSION` to one of the values defined in the table +below. Applications can test for the existence of this macro to determine if +the implementation supports this feature, or applications can test the macro’s + value to determine which of the extension’s APIs the implementation supports. + +[%header,cols="1,5"] +|=== +|Value |Description +|1 |Initial extension version. Base features are supported. +|=== + +== Extension to `enum class aspect` + +[source] +---- +namespace sycl { +enum class aspect { + ... + ext_oneapi_bf16_conversion +} +} +---- + +If a SYCL device has the `ext_oneapi_bf16_conversion` aspect, then it natively +supports conversion of values of `float` type to `bfloat16` and back. + +If the device doesn't have the aspect, objects of `bfloat16` class must not be +used in the device code. + +**NOTE**: The `ext_oneapi_bf16_conversion` aspect is not yet supported. The +`bfloat16` class is currently supported only on Xe HP GPU and Nvidia A100 GPU. + +== New `bfloat16` class + +The `bfloat16` class below provides the conversion functionality. Conversion +from `float` to `bfloat16` is done with round to nearest even(RTE) rounding +mode. + +[source] +---- +namespace sycl { +namespace ext { +namespace oneapi { +namespace experimental { + +class bfloat16 { + using storage_t = uint16_t; + storage_t value; + +public: + bfloat16() = default; + bfloat16(const bfloat16 &) = default; + ~bfloat16() = default; + + // Explicit conversion functions + static storage_t from_float(const float &a); + static float to_float(const storage_t &a); + + // Convert from float to bfloat16 + bfloat16(const float &a); + bfloat16 &operator=(const float &a); + + // Convert from bfloat16 to float + operator float() const; + + // Get bfloat16 as uint16. + operator storage_t() const; + + // Convert to bool type + explicit operator bool(); + + friend bfloat16 operator-(bfloat16 &bf) { /* ... */ } + + // OP is: prefix ++, -- + friend bfloat16 &operatorOP(bfloat16 &bf) { /* ... */ } + + // OP is: postfix ++, -- + friend bfloat16 operatorOP(bfloat16 &bf, int) { /* ... */ } + + // OP is: +=, -=, *=, /= + friend bfloat16 &operatorOP(bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ } + + // OP is +, -, *, / + friend bfloat16 operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ } + template + friend bfloat16 operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ } + template + friend bfloat16 operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ } + + // OP is ==,!=, <, >, <=, >= + friend bool operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ } + template + friend bool operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ } + template + friend bool operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ } +}; + +} // namespace experimental +} // namespace oneapi +} // namespace ext +} // namespace sycl +---- + +Table 1. Member functions of `bfloat16` class. +|=== +| Member Function | Description + +| `static storage_t from_float(const float &a);` +| Explicitly convert from `float` to `bfloat16`. + +| `static float to_float(const storage_t &a);` +| Interpret `a` as `bfloat16` and explicitly convert it to `float`. + +| `bfloat16(const float& a);` +| Construct `bfloat16` from `float`. Converts `float` to `bfloat16`. + +| `bfloat16 &operator=(const float &a);` +| Replace the value with `a` converted to `bfloat16` + +| `operator float() const;` +| Return `bfloat16` value converted to `float`. + +| `operator storage_t() const;` +| Return `uint16_t` value, whose bits represent `bfloat16` value. + +| `explicit operator bool() { /* ... */ }` +| Convert `bfloat16` to `bool` type. Return `false` if the value equals to + zero, return `true` otherwise. + +| `friend bfloat16 operator-(bfloat16 &bf) { /* ... */ }` +| Construct new instance of `bfloat16` class with negated value of the `bf`. + +| `friend bfloat16 &operatorOP(bfloat16 &bf) { /* ... */ }` +| Perform an in-place `OP` prefix arithmetic operation on the `bf`, + assigning the result to the `bf` and return the `bf`. + + OP is: `++, --` + +| `friend bfloat16 operatorOP(bfloat16 &bf, int) { /* ... */ }` +| Perform an in-place `OP` postfix arithmetic operation on `bf`, assigning + the result to the `bf` and return a copy of `bf` before the operation is + performed. + + OP is: `++, --` + +| `friend bfloat16 operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ }` +| Perform an in-place `OP` arithmetic operation between the `lhs` and the `rhs` + and return the `lhs`. + + OP is: `+=, -=, *=, /=` + +| `friend type operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ }` +| Construct a new instance of the `bfloat16` class with the value of the new + `bfloat16` instance being the result of an OP arithmetic operation between + the `lhs` `bfloat16` and `rhs` `bfloat16` values. + + OP is `+, -, *, /` + +| `template + friend bfloat16 operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ }` +| Construct a new instance of the `bfloat16` class with the value of the new + `bfloat16` instance being the result of an OP arithmetic operation between + the `lhs` `bfloat16` value and `rhs` of template type `T`. Type `T` must be + convertible to `float`. + + OP is `+, -, *, /` + +| `template + friend bfloat16 operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ }` +| Construct a new instance of the `bfloat16` class with the value of the new + `bfloat16` instance being the result of an OP arithmetic operation between + the `lhs` of template type `T` and `rhs` `bfloat16` value. Type `T` must be + convertible to `float`. + + OP is `+, -, *, /` + +| `friend bool operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ }` +| Perform comparison operation OP between `lhs` `bfloat16` and `rhs` `bfloat16` + values and return the result as a boolean value. + +OP is `==, !=, <, >, <=, >=` + +| `template + friend bool operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ }` +| Perform comparison operation OP between `lhs` `bfloat16` and `rhs` of + template type `T` and return the result as a boolean value. Type `T` must be + convertible to `float`. + +OP is `==, !=, <, >, <=, >=` + +| `template + friend bool operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ }` +| Perform comparison operation OP between `lhs` of template type `T` and `rhs` + `bfloat16` value and return the result as a boolean value. Type `T` must be + convertible to `float`. + +OP is `==, !=, <, >, <=, >=` +|=== + +== Example + +[source] +---- +#include +#include + +using sycl::ext::oneapi::experimental::bfloat16; + +bfloat16 operator+(const bfloat16 &lhs, const bfloat16 &rhs) { + return static_cast(lhs) + static_cast(rhs); +} + +float foo(float a, float b) { + // Convert from float to bfloat16. + bfloat16 A {a}; + bfloat16 B {b}; + + // Convert A and B from bfloat16 to float, do addition on floating-pointer + // numbers, then convert the result to bfloat16 and store it in C. + bfloat16 C = A + B; + + // Return the result converted from bfloat16 to float. + return C; +} + +int main (int argc, char *argv[]) { + float data[3] = {7.0, 8.1, 0.0}; + sycl::device dev; + sycl::queue deviceQueue{dev}; + sycl::buffer buf {data, sycl::range<1> {3}}; + + if (dev.has(sycl::aspect::ext_oneapi_bf16_conversion)) { + deviceQueue.submit ([&] (sycl::handler& cgh) { + auto numbers = buf.get_access (cgh); + cgh.single_task ([=] () { + numbers[2] = foo(numbers[0], numbers[1]); + }); + }); + } + return 0; +} +---- + +== Issues + +None. + +== Revision History + +[cols="5,15,15,70"] +[grid="rows"] +[options="header"] +|======================================== +|Rev|Date|Author|Changes +|1|2021-08-02|Alexey Sotkin |Initial public working draft +|2|2021-08-17|Alexey Sotkin |Add explicit conversion functions + + Add operator overloadings + + Apply code review suggestions +|3|2021-08-18|Alexey Sotkin |Remove `uint16_t` constructor +|4|2022-03-07|Jack Kirk |Switch from Intel vendor specific to oneapi +|======================================== From 8a29c4412c06b1246bdcd0fa5954b70957211e36 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 15 Mar 2022 14:41:54 +0000 Subject: [PATCH 08/45] Renamed extension to cover all bfloat16 funct. Removed aspect reference: can be added once the ext_oneapi_bfloat16 aspect is merged. --- ....asciidoc => sycl_ext_oneapi_bfloat16.asciidoc} | 14 +++++++------- .../sycl/ext/oneapi/experimental/bfloat16.hpp | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) rename sycl/doc/extensions/experimental/{SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc => sycl_ext_oneapi_bfloat16.asciidoc} (96%) diff --git a/sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc b/sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc similarity index 96% rename from sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc rename to sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc index bf0a799671ffa..175219e23c47f 100644 --- a/sycl/doc/extensions/experimental/SYCL_EXT_ONEAPI_BF16_CONVERSION.asciidoc +++ b/sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc @@ -1,4 +1,4 @@ -= sycl_oneapi_bf16_conversion += sycl_ext_oneapi_bfloat16 :source-highlighter: coderay :coderay-linenums-mode: table @@ -73,7 +73,7 @@ command (e.g. from `parallel_for`). This extension provides a feature-test macro as described in the core SYCL specification section 6.3.3 "Feature test macros". Therefore, an implementation supporting this extension must predefine the macro -`SYCL_EXT_ONEAPI_BF16_CONVERSION` to one of the values defined in the table +`SYCL_EXT_ONEAPI_BFLOAT16` to one of the values defined in the table below. Applications can test for the existence of this macro to determine if the implementation supports this feature, or applications can test the macro’s value to determine which of the extension’s APIs the implementation supports. @@ -91,18 +91,18 @@ the implementation supports this feature, or applications can test the macro’s namespace sycl { enum class aspect { ... - ext_oneapi_bf16_conversion + ext_oneapi_bfloat16 } } ---- -If a SYCL device has the `ext_oneapi_bf16_conversion` aspect, then it natively +If a SYCL device has the `ext_oneapi_bfloat16` aspect, then it natively supports conversion of values of `float` type to `bfloat16` and back. If the device doesn't have the aspect, objects of `bfloat16` class must not be used in the device code. -**NOTE**: The `ext_oneapi_bf16_conversion` aspect is not yet supported. The +**NOTE**: The `ext_oneapi_bfloat16` aspect is not yet supported. The `bfloat16` class is currently supported only on Xe HP GPU and Nvidia A100 GPU. == New `bfloat16` class @@ -304,7 +304,7 @@ int main (int argc, char *argv[]) { sycl::queue deviceQueue{dev}; sycl::buffer buf {data, sycl::range<1> {3}}; - if (dev.has(sycl::aspect::ext_oneapi_bf16_conversion)) { + if (dev.has(sycl::aspect::ext_oneapi_bfloat16)) { deviceQueue.submit ([&] (sycl::handler& cgh) { auto numbers = buf.get_access (cgh); cgh.single_task ([=] () { @@ -332,5 +332,5 @@ None. Add operator overloadings + Apply code review suggestions |3|2021-08-18|Alexey Sotkin |Remove `uint16_t` constructor -|4|2022-03-07|Jack Kirk |Switch from Intel vendor specific to oneapi +|4|2022-03-07|Aidan Belton and Jack Kirk |Switch from Intel vendor specific to oneapi |======================================== diff --git a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp index 3768c65aab6a3..1190c80631928 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/bfloat16.hpp @@ -17,7 +17,7 @@ namespace ext { namespace oneapi { namespace experimental { -class [[sycl_detail::uses_aspects(ext_intel_bf16_conversion)]] bfloat16 { +class bfloat16 { using storage_t = uint16_t; storage_t value; From f1fba08b89c43795e68ca5e991520904fa556a8a Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Wed, 16 Mar 2022 11:08:25 +0000 Subject: [PATCH 09/45] Updated macro name --- sycl/include/CL/sycl/feature_test.hpp.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sycl/include/CL/sycl/feature_test.hpp.in b/sycl/include/CL/sycl/feature_test.hpp.in index 516464e1b9fe3..11a7afdaed8ed 100644 --- a/sycl/include/CL/sycl/feature_test.hpp.in +++ b/sycl/include/CL/sycl/feature_test.hpp.in @@ -55,7 +55,7 @@ namespace sycl { #define SYCL_EXT_ONEAPI_SUB_GROUP 1 #define SYCL_EXT_ONEAPI_PROPERTIES 1 #define SYCL_EXT_ONEAPI_NATIVE_MATH 1 -#define SYCL_EXT_ONEAPI_BF16_CONVERSION 1 +#define SYCL_EXT_ONEAPI_BFLOAT16 1 #define SYCL_EXT_INTEL_DATAFLOW_PIPES 1 #ifdef __clang__ #if __has_extension(sycl_extended_atomics) From 461ddb8206653bacae049e7c46b245bc1b2ac954 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Wed, 16 Mar 2022 15:57:51 +0000 Subject: [PATCH 10/45] Removed old extension doc --- .../sycl_ext_intel_bf16_conversion.asciidoc | 335 ------------------ 1 file changed, 335 deletions(-) delete mode 100644 sycl/doc/extensions/experimental/sycl_ext_intel_bf16_conversion.asciidoc diff --git a/sycl/doc/extensions/experimental/sycl_ext_intel_bf16_conversion.asciidoc b/sycl/doc/extensions/experimental/sycl_ext_intel_bf16_conversion.asciidoc deleted file mode 100644 index 9b1018ced0b34..0000000000000 --- a/sycl/doc/extensions/experimental/sycl_ext_intel_bf16_conversion.asciidoc +++ /dev/null @@ -1,335 +0,0 @@ -= SYCL_INTEL_bf16_conversion - -:source-highlighter: coderay -:coderay-linenums-mode: table - -// This section needs to be after the document title. -:doctype: book -:toc2: -:toc: left -:encoding: utf-8 -:lang: en - -:blank: pass:[ +] - -// Set the default source code type in this document to C++, -// for syntax highlighting purposes. This is needed because -// docbook uses c++ and html5 uses cpp. -:language: {basebackend@docbook:c++:cpp} - -// This is necessary for asciidoc, but not for asciidoctor -:cpp: C++ - -== Notice - -IMPORTANT: This specification is a draft. - -Copyright (c) 2021 Intel Corporation. All rights reserved. - -NOTE: Khronos(R) is a registered trademark and SYCL(TM) and SPIR(TM) are -trademarks of The Khronos Group Inc. OpenCL(TM) is a trademark of Apple Inc. -used by permission by Khronos. - -== Dependencies - -This extension is written against the SYCL 2020 specification, Revision 3. - -== Status - -Draft - -This is a preview extension specification, intended to provide early access to -a feature for review and community feedback. When the feature matures, this -specification may be released as a formal extension. - -Because the interfaces defined by this specification are not final and are -subject to change they are not intended to be used by shipping software -products. - -== Version - -Revision: 3 - -== Introduction - -This extension adds functionality to convert value of single-precision -floating-point type(`float`) to `bfloat16` type and vice versa. The extension -doesn't add support for `bfloat16` type as such, instead it uses 16-bit integer -type(`uint16_t`) as a storage for `bfloat16` values. - -The purpose of conversion from float to bfloat16 is to reduce ammount of memory -required to store floating-point numbers. Computations are expected to be done with -32-bit floating-point values. - -This extension is an optional kernel feature as described in -https://www.khronos.org/registry/SYCL/specs/sycl-2020/html/sycl-2020.html#sec:optional-kernel-features[section 5.7] -of the SYCL 2020 spec. Therefore, attempting to submit a kernel using this -feature to a device that does not support it should cause a synchronous -`errc::kernel_not_supported` exception to be thrown from the kernel invocation -command (e.g. from `parallel_for`). - -== Feature test macro - -This extension provides a feature-test macro as described in the core SYCL -specification section 6.3.3 "Feature test macros". Therefore, an implementation -supporting this extension must predefine the macro -`SYCL_EXT_INTEL_BF16_CONVERSION` to one of the values defined in the table -below. Applications can test for the existence of this macro to determine if -the implementation supports this feature, or applications can test the macro’s - value to determine which of the extension’s APIs the implementation supports. - -[%header,cols="1,5"] -|=== -|Value |Description -|1 |Initial extension version. Base features are supported. -|=== - -== Extension to `enum class aspect` - -[source] ----- -namespace sycl { -enum class aspect { - ... - ext_intel_bf16_conversion -} -} ----- - -If a SYCL device has the `ext_intel_bf16_conversion` aspect, then it natively -supports conversion of values of `float` type to `bfloat16` and back. - -If the device doesn't have the aspect, objects of `bfloat16` class must not be -used in the device code. - -**NOTE**: The `ext_intel_bf16_conversion` aspect is not yet supported. The -`bfloat16` class is currently supported only on Xe HP GPU. - -== New `bfloat16` class - -The `bfloat16` class below provides the conversion functionality. Conversion -from `float` to `bfloat16` is done with round to nearest even(RTE) rounding -mode. - -[source] ----- -namespace sycl { -namespace ext { -namespace intel { -namespace experimental { - -class bfloat16 { - using storage_t = uint16_t; - storage_t value; - -public: - bfloat16() = default; - bfloat16(const bfloat16 &) = default; - ~bfloat16() = default; - - // Explicit conversion functions - static storage_t from_float(const float &a); - static float to_float(const storage_t &a); - - // Convert from float to bfloat16 - bfloat16(const float &a); - bfloat16 &operator=(const float &a); - - // Convert from bfloat16 to float - operator float() const; - - // Get bfloat16 as uint16. - operator storage_t() const; - - // Convert to bool type - explicit operator bool(); - - friend bfloat16 operator-(bfloat16 &bf) { /* ... */ } - - // OP is: prefix ++, -- - friend bfloat16 &operatorOP(bfloat16 &bf) { /* ... */ } - - // OP is: postfix ++, -- - friend bfloat16 operatorOP(bfloat16 &bf, int) { /* ... */ } - - // OP is: +=, -=, *=, /= - friend bfloat16 &operatorOP(bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ } - - // OP is +, -, *, / - friend bfloat16 operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ } - template - friend bfloat16 operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ } - template - friend bfloat16 operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ } - - // OP is ==,!=, <, >, <=, >= - friend bool operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ } - template - friend bool operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ } - template - friend bool operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ } -}; - -} // namespace experimental -} // namespace intel -} // namespace ext -} // namespace sycl ----- - -Table 1. Member functions of `bfloat16` class. -|=== -| Member Function | Description - -| `static storage_t from_float(const float &a);` -| Explicitly convert from `float` to `bfloat16`. - -| `static float to_float(const storage_t &a);` -| Interpret `a` as `bfloat16` and explicitly convert it to `float`. - -| `bfloat16(const float& a);` -| Construct `bfloat16` from `float`. Converts `float` to `bfloat16`. - -| `bfloat16 &operator=(const float &a);` -| Replace the value with `a` converted to `bfloat16` - -| `operator float() const;` -| Return `bfloat16` value converted to `float`. - -| `operator storage_t() const;` -| Return `uint16_t` value, whose bits represent `bfloat16` value. - -| `explicit operator bool() { /* ... */ }` -| Convert `bfloat16` to `bool` type. Return `false` if the value equals to - zero, return `true` otherwise. - -| `friend bfloat16 operator-(bfloat16 &bf) { /* ... */ }` -| Construct new instance of `bfloat16` class with negated value of the `bf`. - -| `friend bfloat16 &operatorOP(bfloat16 &bf) { /* ... */ }` -| Perform an in-place `OP` prefix arithmetic operation on the `bf`, - assigning the result to the `bf` and return the `bf`. - - OP is: `++, --` - -| `friend bfloat16 operatorOP(bfloat16 &bf, int) { /* ... */ }` -| Perform an in-place `OP` postfix arithmetic operation on `bf`, assigning - the result to the `bf` and return a copy of `bf` before the operation is - performed. - - OP is: `++, --` - -| `friend bfloat16 operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ }` -| Perform an in-place `OP` arithmetic operation between the `lhs` and the `rhs` - and return the `lhs`. - - OP is: `+=, -=, *=, /=` - -| `friend type operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ }` -| Construct a new instance of the `bfloat16` class with the value of the new - `bfloat16` instance being the result of an OP arithmetic operation between - the `lhs` `bfloat16` and `rhs` `bfloat16` values. - - OP is `+, -, *, /` - -| `template - friend bfloat16 operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ }` -| Construct a new instance of the `bfloat16` class with the value of the new - `bfloat16` instance being the result of an OP arithmetic operation between - the `lhs` `bfloat16` value and `rhs` of template type `T`. Type `T` must be - convertible to `float`. - - OP is `+, -, *, /` - -| `template - friend bfloat16 operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ }` -| Construct a new instance of the `bfloat16` class with the value of the new - `bfloat16` instance being the result of an OP arithmetic operation between - the `lhs` of template type `T` and `rhs` `bfloat16` value. Type `T` must be - convertible to `float`. - - OP is `+, -, *, /` - -| `friend bool operatorOP(const bfloat16 &lhs, const bfloat16 &rhs) { /* ... */ }` -| Perform comparison operation OP between `lhs` `bfloat16` and `rhs` `bfloat16` - values and return the result as a boolean value. - -OP is `==, !=, <, >, <=, >=` - -| `template - friend bool operatorOP(const bfloat16 &lhs, const T &rhs) { /* ... */ }` -| Perform comparison operation OP between `lhs` `bfloat16` and `rhs` of - template type `T` and return the result as a boolean value. Type `T` must be - convertible to `float`. - -OP is `==, !=, <, >, <=, >=` - -| `template - friend bool operatorOP(const T &lhs, const bfloat16 &rhs) { /* ... */ }` -| Perform comparison operation OP between `lhs` of template type `T` and `rhs` - `bfloat16` value and return the result as a boolean value. Type `T` must be - convertible to `float`. - -OP is `==, !=, <, >, <=, >=` -|=== - -== Example - -[source] ----- -#include -#include - -using sycl::ext::intel::experimental::bfloat16; - -bfloat16 operator+(const bfloat16 &lhs, const bfloat16 &rhs) { - return static_cast(lhs) + static_cast(rhs); -} - -float foo(float a, float b) { - // Convert from float to bfloat16. - bfloat16 A {a}; - bfloat16 B {b}; - - // Convert A and B from bfloat16 to float, do addition on floating-pointer - // numbers, then convert the result to bfloat16 and store it in C. - bfloat16 C = A + B; - - // Return the result converted from bfloat16 to float. - return C; -} - -int main (int argc, char *argv[]) { - float data[3] = {7.0, 8.1, 0.0}; - sycl::device dev; - sycl::queue deviceQueue{dev}; - sycl::buffer buf {data, sycl::range<1> {3}}; - - if (dev.has(sycl::aspect::ext_intel_bf16_conversion)) { - deviceQueue.submit ([&] (sycl::handler& cgh) { - auto numbers = buf.get_access (cgh); - cgh.single_task ([=] () { - numbers[2] = foo(numbers[0], numbers[1]); - }); - }); - } - return 0; -} ----- - -== Issues - -None. - -== Revision History - -[cols="5,15,15,70"] -[grid="rows"] -[options="header"] -|======================================== -|Rev|Date|Author|Changes -|1|2021-08-02|Alexey Sotkin |Initial public working draft -|2|2021-08-17|Alexey Sotkin |Add explicit conversion functions + - Add operator overloadings + - Apply code review suggestions -|3|2021-08-18|Alexey Sotkin |Remove `uint16_t` constructor -|======================================== From e433fbcac8a3019510889a7f0a4db4c5f466393f Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Thu, 31 Mar 2022 12:02:37 +0100 Subject: [PATCH 11/45] typo --- .../extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc b/sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc index 175219e23c47f..88b6c73b02514 100644 --- a/sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc +++ b/sycl/doc/extensions/experimental/sycl_ext_oneapi_bfloat16.asciidoc @@ -57,7 +57,7 @@ floating-point type(`float`) to `bfloat16` type and vice versa. The extension doesn't add support for `bfloat16` type as such, instead it uses 16-bit integer type(`uint16_t`) as a storage for `bfloat16` values. -The purpose of conversion from float to bfloat16 is to reduce ammount of memory +The purpose of conversion from float to bfloat16 is to reduce the amount of memory required to store floating-point numbers. Computations are expected to be done with 32-bit floating-point values. From 48ee8ffa00bafc2b39f57b5589adafaf2e2d10a2 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Fri, 1 Apr 2022 18:44:15 +0100 Subject: [PATCH 12/45] Initial bfloat16 function impl. Signed-off-by: jack.kirk --- .../sycl/ext/oneapi/bf16_storage_builtins.hpp | 87 ------------------ .../sycl/ext/oneapi/experimental/builtins.hpp | 91 +++++++++++++++++++ .../ext/oneapi/matrix/matrix-tensorcore.hpp | 7 +- 3 files changed, 96 insertions(+), 89 deletions(-) delete mode 100644 sycl/include/sycl/ext/oneapi/bf16_storage_builtins.hpp diff --git a/sycl/include/sycl/ext/oneapi/bf16_storage_builtins.hpp b/sycl/include/sycl/ext/oneapi/bf16_storage_builtins.hpp deleted file mode 100644 index ee5c58b67a440..0000000000000 --- a/sycl/include/sycl/ext/oneapi/bf16_storage_builtins.hpp +++ /dev/null @@ -1,87 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include -#include - -__SYCL_INLINE_NAMESPACE(cl) { -namespace sycl { -namespace ext { -namespace oneapi { - -namespace detail { - -template struct is_bf16_storage_type { - static constexpr int value = false; -}; - -template <> struct is_bf16_storage_type { - static constexpr int value = true; -}; - -template <> struct is_bf16_storage_type { - static constexpr int value = true; -}; - -template struct is_bf16_storage_type> { - static constexpr int value = true; -}; - -template struct is_bf16_storage_type> { - static constexpr int value = true; -}; - -} // namespace detail - -template -std::enable_if_t::value, T> fabs(T x) { -#ifdef __SYCL_DEVICE_ONLY__ - return __clc_fabs(x); -#else - (void)x; - throw runtime_error("bf16 is not supported on host device.", - PI_INVALID_DEVICE); -#endif -} -template -std::enable_if_t::value, T> fmin(T x, T y) { -#ifdef __SYCL_DEVICE_ONLY__ - return __clc_fmin(x, y); -#else - (void)x; - (void)y; - throw runtime_error("bf16 is not supported on host device.", - PI_INVALID_DEVICE); -#endif -} -template -std::enable_if_t::value, T> fmax(T x, T y) { -#ifdef __SYCL_DEVICE_ONLY__ - return __clc_fmax(x, y); -#else - (void)x; - (void)y; - throw runtime_error("bf16 is not supported on host device.", - PI_INVALID_DEVICE); -#endif -} -template -std::enable_if_t::value, T> fma(T x, T y, T z) { -#ifdef __SYCL_DEVICE_ONLY__ - return __clc_fma(x, y, z); -#else - (void)x; - (void)y; - (void)z; - throw runtime_error("bf16 is not supported on host device.", - PI_INVALID_DEVICE); -#endif -} - -} // namespace oneapi -} // namespace ext -} // namespace sycl -} // __SYCL_INLINE_NAMESPACE(cl) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index c8fa033d8c79e..4f0f39e3a939f 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -15,6 +15,7 @@ #include #include +#include // TODO Decide whether to mark functions with this attribute. #define __NOEXC /*noexcept*/ @@ -117,6 +118,96 @@ inline __SYCL_ALWAYS_INLINE } // namespace native +namespace detail { + +template struct is_bf16_storage_type { + static constexpr int value = false; +}; + +template <> struct is_bf16_storage_type { + static constexpr int value = true; +}; + +template <> struct is_bf16_storage_type { + static constexpr int value = true; +}; + +template struct is_bf16_storage_type> { + static constexpr int value = true; +}; + +template struct is_bf16_storage_type> { + static constexpr int value = true; +}; + +} // namespace detail + +template +std::enable_if_t::value, T> fabs(T x) { +#ifdef __SYCL_DEVICE_ONLY__ + return __clc_fabs(x); +#else + (void)x; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} +template +std::enable_if_t::value, T> fabs(T x) { +#ifdef __SYCL_DEVICE_ONLY__ + return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits(__clc_fabs(x.raw())); +#else + (void)x; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} +template +std::enable_if_t::value, T> fmin(T x, T y) { +#ifdef __SYCL_DEVICE_ONLY__ + return __clc_fmin(x, y); +#else + (void)x; + (void)y; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} +template +std::enable_if_t::value, T> fmin(T x, T y) { +#ifdef __SYCL_DEVICE_ONLY__ + return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); +#else + (void)x; + (void)y; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} +template +std::enable_if_t::value, T> fmax(T x, T y) { +#ifdef __SYCL_DEVICE_ONLY__ + return __clc_fmax(x, y); +#else + (void)x; + (void)y; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} +template +std::enable_if_t::value, T> fma(T x, T y, T z) { +#ifdef __SYCL_DEVICE_ONLY__ + return __clc_fma(x, y, z); +#else + (void)x; + (void)y; + (void)z; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} + } // namespace experimental } // namespace oneapi } // namespace ext diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 5c6df9114b161..954c170353ef9 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -7,6 +7,7 @@ // ===--------------------------------------------------------------------=== // #pragma once +#include __SYCL_INLINE_NAMESPACE(cl) { namespace sycl { @@ -70,6 +71,8 @@ __SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 32, 8, int32_t, 8) // m16n16k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 16, int32_t, 4) +__SYCL_JOINT_MATRIX_OVERLOAD(cl::sycl::ext::oneapi::experimental::bfloat16, a, 16, 16, int32_t, 4) +__SYCL_JOINT_MATRIX_OVERLOAD(cl::sycl::ext::oneapi::experimental::bfloat16, b, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(half, a, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 16, 16, float, 8) @@ -124,7 +127,7 @@ struct joint_matrix_load_impl< void load(sycl::ext::oneapi::experimental::matrix::joint_matrix< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same::value || std::is_same::value) { int32_t *tileptr = reinterpret_cast(src.get()); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == @@ -453,7 +456,7 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same::value || std::is_same::value) { __mma_bf16_m16n16k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } From 4a30a270121002698638b32a473cd761e52e2e5b Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Mon, 4 Apr 2022 11:54:41 +0100 Subject: [PATCH 13/45] Added other bfloat16 scalar cases --- .../sycl/ext/oneapi/experimental/builtins.hpp | 62 ++++++++++++++++--- 1 file changed, 54 insertions(+), 8 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 4f0f39e3a939f..0ea45e9749a83 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -143,7 +143,8 @@ template struct is_bf16_storage_type> { } // namespace detail template -std::enable_if_t::value, T> fabs(T x) { +std::enable_if_t::value, T> +fabs(T x) { #ifdef __SYCL_DEVICE_ONLY__ return __clc_fabs(x); #else @@ -152,18 +153,24 @@ std::enable_if_t::value, T> fabs(T PI_INVALID_DEVICE); #endif } + template -std::enable_if_t::value, T> fabs(T x) { +std::enable_if_t< + std::is_same::value, T> +fabs(T x) { #ifdef __SYCL_DEVICE_ONLY__ - return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits(__clc_fabs(x.raw())); + return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + __clc_fabs(x.raw())); #else (void)x; throw runtime_error("bf16 is not supported on host device.", PI_INVALID_DEVICE); #endif } + template -std::enable_if_t::value, T> fmin(T x, T y) { +std::enable_if_t::value, T> +fmin(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ return __clc_fmin(x, y); #else @@ -173,10 +180,14 @@ std::enable_if_t::value, T> fmin(T PI_INVALID_DEVICE); #endif } + template -std::enable_if_t::value, T> fmin(T x, T y) { +std::enable_if_t< + std::is_same::value, T> +fmin(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ - return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); + return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + __clc_fmin(x.raw(), y.raw())); #else (void)x; (void)y; @@ -184,8 +195,10 @@ std::enable_if_t: PI_INVALID_DEVICE); #endif } + template -std::enable_if_t::value, T> fmax(T x, T y) { +std::enable_if_t::value, T> +fmax(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ return __clc_fmax(x, y); #else @@ -195,8 +208,25 @@ std::enable_if_t::value, T> fmax(T PI_INVALID_DEVICE); #endif } + +template +std::enable_if_t< + std::is_same::value, T> +fmax(T x, T y) { +#ifdef __SYCL_DEVICE_ONLY__ + return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + __clc_fmax(x.raw(), y.raw())); +#else + (void)x; + (void)y; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} + template -std::enable_if_t::value, T> fma(T x, T y, T z) { +std::enable_if_t::value, T> +fma(T x, T y, T z) { #ifdef __SYCL_DEVICE_ONLY__ return __clc_fma(x, y, z); #else @@ -208,6 +238,22 @@ std::enable_if_t::value, T> fma(T #endif } +template +std::enable_if_t< + std::is_same::value, T> +fma(T x, T y, T z) { +#ifdef __SYCL_DEVICE_ONLY__ + return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + __clc_fma(x.raw(), y.raw(), z.raw())); +#else + (void)x; + (void)y; + (void)z; + throw runtime_error("bf16 is not supported on host device.", + PI_INVALID_DEVICE); +#endif +} + } // namespace experimental } // namespace oneapi } // namespace ext From 5cb7b0966e8b2e9303a45aec47c9799e3f838c1c Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Mon, 4 Apr 2022 16:39:22 +0100 Subject: [PATCH 14/45] added bfloat16 device code test. --- .../sycl/ext/oneapi/experimental/builtins.hpp | 16 +- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 18 +- .../matrix/matrix-nvptx-bfloat16-test.cpp | 200 ++++++++++++++++++ 3 files changed, 219 insertions(+), 15 deletions(-) create mode 100644 sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 0ea45e9749a83..105cfd8ac5e3b 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -156,10 +156,10 @@ fabs(T x) { template std::enable_if_t< - std::is_same::value, T> + std::is_same::value, T> fabs(T x) { #ifdef __SYCL_DEVICE_ONLY__ - return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + return bfloat16::from_bits( __clc_fabs(x.raw())); #else (void)x; @@ -183,10 +183,10 @@ fmin(T x, T y) { template std::enable_if_t< - std::is_same::value, T> + std::is_same::value, T> fmin(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ - return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + return bfloat16::from_bits( __clc_fmin(x.raw(), y.raw())); #else (void)x; @@ -211,10 +211,10 @@ fmax(T x, T y) { template std::enable_if_t< - std::is_same::value, T> + std::is_same::value, T> fmax(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ - return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + return bfloat16::from_bits( __clc_fmax(x.raw(), y.raw())); #else (void)x; @@ -240,10 +240,10 @@ fma(T x, T y, T z) { template std::enable_if_t< - std::is_same::value, T> + std::is_same::value, T> fma(T x, T y, T z) { #ifdef __SYCL_DEVICE_ONLY__ - return cl::sycl::ext::oneapi::experimental::bfloat16::from_bits( + return bfloat16::from_bits( __clc_fma(x.raw(), y.raw(), z.raw())); #else (void)x; diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 954c170353ef9..ddcf7c02c2f55 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -40,9 +40,11 @@ __SYCL_JOINT_MATRIX_OVERLOAD(double, b, 4, 8, double, 1) __SYCL_JOINT_MATRIX_OVERLOAD(double, accumulator, 8, 8, double, 2) // m8n32k16 -// bf16 data format uses uint16_t data type +// bf16 data format uint16_t implementation is deprecated __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 8, 16, int32_t, 2) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 32, int32_t, 8) +__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 8, 16, int32_t, 2) +__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 32, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(half, a, 8, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 32, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 8, 32, float, 8) @@ -57,6 +59,8 @@ __SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 8, 32, int32_t, 8) // m32n8k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 32, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 8, int32_t, 2) +__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 32, 16, int32_t, 8) +__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 8, int32_t, 2) __SYCL_JOINT_MATRIX_OVERLOAD(half, a, 32, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 8, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 32, 8, float, 8) @@ -71,8 +75,8 @@ __SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 32, 8, int32_t, 8) // m16n16k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(cl::sycl::ext::oneapi::experimental::bfloat16, a, 16, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(cl::sycl::ext::oneapi::experimental::bfloat16, b, 16, 16, int32_t, 4) +__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 16, 16, int32_t, 4) +__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(half, a, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 16, 16, float, 8) @@ -127,7 +131,7 @@ struct joint_matrix_load_impl< void load(sycl::ext::oneapi::experimental::matrix::joint_matrix< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { - if constexpr (std::is_same::value || std::is_same::value) { + if constexpr (std::is_same::value || std::is_same::value) { int32_t *tileptr = reinterpret_cast(src.get()); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == @@ -456,7 +460,7 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value || std::is_same::value) { + } else if constexpr (std::is_same::value || std::is_same::value) { __mma_bf16_m16n16k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } @@ -475,7 +479,7 @@ struct joint_matrix_mad_impl< __hmma_m8n32k16_mma_f16f16(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same::value || std::is_same::value) { __mma_bf16_m8n32k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } @@ -486,7 +490,7 @@ struct joint_matrix_mad_impl< } else if constexpr (std::is_same::value) { __imma_m32n8k16_mma_u8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same::value || std::is_same::value) { __mma_bf16_m32n8k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } else if constexpr (std::is_same::value) { diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp new file mode 100644 index 0000000000000..fd17cc30bbfa7 --- /dev/null +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp @@ -0,0 +1,200 @@ +// REQUIRES: cuda + +// RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s + +#include + +using namespace sycl; +using namespace sycl::ext::oneapi::experimental::matrix; +using sycl::ext::oneapi::experimental::bfloat16; + +constexpr int stride = 16; + +int main() { + + buffer bufA(nullptr, range<1>(1)); + buffer bufB(nullptr, range<1>(1)); + buffer bufC(nullptr, range<1>(1)); + buffer bufD(nullptr, range<1>(1)); + + queue q; + + q.submit([&](handler &cgh) { + auto accC = bufC.get_access(cgh); + auto accA = bufA.get_access(cgh); + auto accB = bufB.get_access(cgh); + auto accD = bufD.get_access(cgh); + + cgh.parallel_for( + nd_range<2>({1, 32}, {1, 32}), + [=](nd_item<2> item) [[sycl::reqd_work_group_size(1, 1, 32)]] { + sycl::sub_group sg = item.get_sub_group(); + + joint_matrix + sub_c; + + joint_matrix + sub_a; + + joint_matrix + sub_b; + + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m16n16k16.load.c.row.stride.f32.p1f32(float addrspace(1)* %_arg_accC, i32 16) #{{.*}} + joint_matrix_load(sg, sub_c, accC.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k16.load.a.row.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_a, accA.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k16.load.b.row.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_b, accB.get_pointer(), stride); + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m16n16k16.mma.row.row.bf16(i32 %11, i32 %12, i32 %13, i32 %14, i32 %17, i32 %18, i32 %19, i32 %20, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8) #{{.*}} + sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); + // CHECK: tail call void @llvm.nvvm.wmma.m16n16k16.store.d.row.stride.f32.p1f32(float addrspace(1)* %_arg_accD, float %22, float %23, float %24, float %25, float %26, float %27, float %28, float %29, i32 16) #{{.*}} + joint_matrix_store(sg, sub_c, accD.get_pointer(), stride); + }); + + cgh.parallel_for( + nd_range<2>({1, 32}, {1, 32}), + [=](nd_item<2> item) [[sycl::reqd_work_group_size(1, 1, 32)]] { + sycl::sub_group sg = item.get_sub_group(); + + joint_matrix + sub_c; + + joint_matrix + sub_a; + + joint_matrix + sub_b; + + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m16n16k16.load.c.col.stride.f32.p1f32(float addrspace(1)* %_arg_accC, i32 16) #{{.*}} + joint_matrix_load(sg, sub_c, accC.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k16.load.a.col.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_a, accA.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32 } @llvm.nvvm.wmma.m16n16k16.load.b.col.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_b, accB.get_pointer(), stride); + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m16n16k16.mma.col.col.bf16(i32 %11, i32 %12, i32 %13, i32 %14, i32 %17, i32 %18, i32 %19, i32 %20, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8) #{{.*}} + sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); + // CHECK: tail call void @llvm.nvvm.wmma.m16n16k16.store.d.col.stride.f32.p1f32(float addrspace(1)* %_arg_accD, float %22, float %23, float %24, float %25, float %26, float %27, float %28, float %29, i32 16) #{{.*}} + joint_matrix_store(sg, sub_c, accD.get_pointer(), stride); + }); + + cgh.parallel_for( + nd_range<2>({1, 32}, {1, 32}), + [=](nd_item<2> item) [[sycl::reqd_work_group_size(1, 1, 32)]] { + sycl::sub_group sg = item.get_sub_group(); + + joint_matrix + sub_c; + + joint_matrix + sub_a; + + joint_matrix + sub_b; + + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m32n8k16.load.c.row.stride.f32.p1f32(float addrspace(1)* %_arg_accC, i32 16) #{{.*}} + joint_matrix_load(sg, sub_c, accC.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32, i32, i32, i32, i32 } @llvm.nvvm.wmma.m32n8k16.load.a.row.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_a, accA.get_pointer(), stride); + // CHECK: tail call { i32, i32 } @llvm.nvvm.wmma.m32n8k16.load.b.row.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_b, accB.get_pointer(), stride); + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m32n8k16.mma.row.row.bf16(i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16, i32 %17, i32 %18, i32 %21, i32 %22, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8) #{{.*}} + sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); + // CHECK: tail call void @llvm.nvvm.wmma.m32n8k16.store.d.row.stride.f32.p1f32(float addrspace(1)* %_arg_accD, float %24, float %25, float %26, float %27, float %28, float %29, float %30, float %31, i32 16) #{{.*}} + joint_matrix_store(sg, sub_c, accD.get_pointer(), stride); + }); + + cgh.parallel_for( + nd_range<2>({1, 32}, {1, 32}), + [=](nd_item<2> item) [[sycl::reqd_work_group_size(1, 1, 32)]] { + sycl::sub_group sg = item.get_sub_group(); + + joint_matrix + sub_c; + + joint_matrix + sub_a; + + joint_matrix + sub_b; + + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m32n8k16.load.c.col.stride.f32.p1f32(float addrspace(1)* %_arg_accC, i32 16) #{{.*}} + joint_matrix_load(sg, sub_c, accC.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32, i32, i32, i32, i32 } @llvm.nvvm.wmma.m32n8k16.load.a.col.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_a, accA.get_pointer(), stride); + // CHECK: tail call { i32, i32 } @llvm.nvvm.wmma.m32n8k16.load.b.col.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_b, accB.get_pointer(), stride); + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m32n8k16.mma.col.col.bf16(i32 %11, i32 %12, i32 %13, i32 %14, i32 %15, i32 %16, i32 %17, i32 %18, i32 %21, i32 %22, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8) #{{.*}} + sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); + // CHECK: tail call void @llvm.nvvm.wmma.m32n8k16.store.d.col.stride.f32.p1f32(float addrspace(1)* %_arg_accD, float %24, float %25, float %26, float %27, float %28, float %29, float %30, float %31, i32 16) #{{.*}} + joint_matrix_store(sg, sub_c, accD.get_pointer(), stride); + }); + + cgh.parallel_for( + nd_range<2>({1, 32}, {1, 32}), + [=](nd_item<2> item) [[sycl::reqd_work_group_size(1, 1, 32)]] { + sycl::sub_group sg = item.get_sub_group(); + + joint_matrix + sub_c; + + joint_matrix + sub_a; + + joint_matrix + sub_b; + + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m8n32k16.load.c.row.stride.f32.p1f32(float addrspace(1)* %_arg_accC, i32 16) #{{.*}} + joint_matrix_load(sg, sub_c, accC.get_pointer(), stride); + // CHECK: tail call { i32, i32 } @llvm.nvvm.wmma.m8n32k16.load.a.row.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_a, accA.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32, i32, i32, i32, i32 } @llvm.nvvm.wmma.m8n32k16.load.b.row.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_b, accB.get_pointer(), stride); + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m8n32k16.mma.row.row.bf16(i32 %11, i32 %12, i32 %15, i32 %16, i32 %17, i32 %18, i32 %19, i32 %20, i32 %21, i32 %22, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8) #{{.*}} + sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); + // CHECK: tail call void @llvm.nvvm.wmma.m8n32k16.store.d.row.stride.f32.p1f32(float addrspace(1)* %_arg_accD, float %24, float %25, float %26, float %27, float %28, float %29, float %30, float %31, i32 16) #{{.*}} + joint_matrix_store(sg, sub_c, accD.get_pointer(), stride); + }); + + cgh.parallel_for( + nd_range<2>({1, 32}, {1, 32}), + [=](nd_item<2> item) [[sycl::reqd_work_group_size(1, 1, 32)]] { + sycl::sub_group sg = item.get_sub_group(); + + joint_matrix + sub_c; + + joint_matrix + sub_a; + + joint_matrix + sub_b; + + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m8n32k16.load.c.col.stride.f32.p1f32(float addrspace(1)* %_arg_accC, i32 16) #{{.*}} + joint_matrix_load(sg, sub_c, accC.get_pointer(), stride); + // CHECK: tail call { i32, i32 } @llvm.nvvm.wmma.m8n32k16.load.a.col.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_a, accA.get_pointer(), stride); + // CHECK: tail call { i32, i32, i32, i32, i32, i32, i32, i32 } @llvm.nvvm.wmma.m8n32k16.load.b.col.stride.bf16.p0i32(i32* %call.ascast.i.i{{.*}}.i, i32 16) #{{.*}} + joint_matrix_load(sg, sub_b, accB.get_pointer(), stride); + // CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m8n32k16.mma.col.col.bf16(i32 %11, i32 %12, i32 %15, i32 %16, i32 %17, i32 %18, i32 %19, i32 %20, i32 %21, i32 %22, float %1, float %2, float %3, float %4, float %5, float %6, float %7, float %8) #{{.*}} + sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); + // CHECK: tail call void @llvm.nvvm.wmma.m8n32k16.store.d.col.stride.f32.p1f32(float addrspace(1)* %_arg_accD, float %24, float %25, float %26, float %27, float %28, float %29, float %30, float %31, i32 16) #{{.*}} + joint_matrix_store(sg, sub_c, accD.get_pointer(), stride); + }); + }); + + return 0; +}; From 081008b4739f82271c982b70ded61af3beab32fa Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 5 Apr 2022 17:03:22 +0100 Subject: [PATCH 15/45] Clarified error msg --- .../sycl/ext/oneapi/experimental/builtins.hpp | 44 +++++++------------ 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 105cfd8ac5e3b..509f649a7807e 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -149,21 +149,18 @@ fabs(T x) { return __clc_fabs(x); #else (void)x; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } template -std::enable_if_t< - std::is_same::value, T> -fabs(T x) { +std::enable_if_t::value, T> fabs(T x) { #ifdef __SYCL_DEVICE_ONLY__ - return bfloat16::from_bits( - __clc_fabs(x.raw())); + return bfloat16::from_bits(__clc_fabs(x.raw())); #else (void)x; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } @@ -176,22 +173,19 @@ fmin(T x, T y) { #else (void)x; (void)y; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } template -std::enable_if_t< - std::is_same::value, T> -fmin(T x, T y) { +std::enable_if_t::value, T> fmin(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ - return bfloat16::from_bits( - __clc_fmin(x.raw(), y.raw())); + return bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); #else (void)x; (void)y; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } @@ -204,22 +198,19 @@ fmax(T x, T y) { #else (void)x; (void)y; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } template -std::enable_if_t< - std::is_same::value, T> -fmax(T x, T y) { +std::enable_if_t::value, T> fmax(T x, T y) { #ifdef __SYCL_DEVICE_ONLY__ - return bfloat16::from_bits( - __clc_fmax(x.raw(), y.raw())); + return bfloat16::from_bits(__clc_fmax(x.raw(), y.raw())); #else (void)x; (void)y; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } @@ -233,23 +224,20 @@ fma(T x, T y, T z) { (void)x; (void)y; (void)z; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } template -std::enable_if_t< - std::is_same::value, T> -fma(T x, T y, T z) { +std::enable_if_t::value, T> fma(T x, T y, T z) { #ifdef __SYCL_DEVICE_ONLY__ - return bfloat16::from_bits( - __clc_fma(x.raw(), y.raw(), z.raw())); + return bfloat16::from_bits(__clc_fma(x.raw(), y.raw(), z.raw())); #else (void)x; (void)y; (void)z; - throw runtime_error("bf16 is not supported on host device.", + throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif } From 25877c020cb8e702308c7d01054a3845d8ff0375 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 5 Apr 2022 17:15:29 +0100 Subject: [PATCH 16/45] format --- .../sycl/ext/oneapi/matrix/matrix-tensorcore.hpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index ddcf7c02c2f55..f0b00bfd6e115 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -131,7 +131,9 @@ struct joint_matrix_load_impl< void load(sycl::ext::oneapi::experimental::matrix::joint_matrix< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { - if constexpr (std::is_same::value || std::is_same::value) { + if constexpr (std::is_same::value || + std::is_same< + T, sycl::ext::oneapi::experimental::bfloat16>::value) { int32_t *tileptr = reinterpret_cast(src.get()); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == @@ -460,7 +462,9 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value || std::is_same::value) { + } else if constexpr (std::is_same::value || + std::is_same::value) { __mma_bf16_m16n16k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } @@ -479,7 +483,9 @@ struct joint_matrix_mad_impl< __hmma_m8n32k16_mma_f16f16(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value || std::is_same::value) { + } else if constexpr (std::is_same::value || + std::is_same::value) { __mma_bf16_m8n32k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } @@ -490,7 +496,9 @@ struct joint_matrix_mad_impl< } else if constexpr (std::is_same::value) { __imma_m32n8k16_mma_u8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value || std::is_same::value) { + } else if constexpr (std::is_same::value || + std::is_same::value) { __mma_bf16_m32n8k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } else if constexpr (std::is_same::value) { From 4b38281cb5a2f65ce58cd5789280388faca24392 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 5 Apr 2022 17:20:24 +0100 Subject: [PATCH 17/45] format --- sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index f0b00bfd6e115..287fe5160489c 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -485,7 +485,7 @@ struct joint_matrix_mad_impl< } } else if constexpr (std::is_same::value || std::is_same::value) { + bfloat16>::value) { __mma_bf16_m8n32k16_mma_f32(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } From 7ed380c6c962cd7c51b4f35171f59e08de2c905d Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 5 Apr 2022 17:51:32 +0100 Subject: [PATCH 18/45] removed deleted header from sycl.hpp --- sycl/include/CL/sycl.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/sycl/include/CL/sycl.hpp b/sycl/include/CL/sycl.hpp index 8ca7d28223cad..a7278b275aa9d 100644 --- a/sycl/include/CL/sycl.hpp +++ b/sycl/include/CL/sycl.hpp @@ -60,7 +60,6 @@ #if SYCL_EXT_ONEAPI_BACKEND_LEVEL_ZERO #include #endif -#include #include #include #include From a53ce3d18a105a6fa9a148ac409f3e3dd85d7fd5 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Fri, 8 Apr 2022 11:49:07 +0100 Subject: [PATCH 19/45] example optimized impl using marray. --- .../sycl/ext/oneapi/experimental/builtins.hpp | 22 +++++++++++++++ .../ext/oneapi/matrix/matrix-tensorcore.hpp | 28 +++++++++++++++---- 2 files changed, 45 insertions(+), 5 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 509f649a7807e..8e64f2fb07e16 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -165,6 +165,28 @@ std::enable_if_t::value, T> fabs(T x) { #endif } +template +std::enable_if_t::value, sycl::marray> +fabs(sycl::marray x) { +#ifdef __SYCL_DEVICE_ONLY__ + + sycl::marray res; + uint32_t *x_storage = reinterpret_cast(&x); + uint32_t *res_storage = reinterpret_cast(&res); + for (int i = 0; i < N / 2; i++) + res_storage[i] = __clc_fabs(x_storage[i]); + + if (N % 2) { + res[N - 1] = bfloat16::from_bits(__clc_fabs(x[N - 1].raw())); + } + return res; +#else + (void)x; + throw runtime_error("bfloat16 is not currently supported on the host device.", + PI_INVALID_DEVICE); +#endif +} + template std::enable_if_t::value, T> fmin(T x, T y) { diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 287fe5160489c..c2f416912dadf 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -34,6 +34,23 @@ struct joint_matrix; frag_type data[frag_size]; \ }; +// example implementation +template +struct joint_matrix< + bfloat16, matrix_use::a, 16, 16, Layout, sycl::sub_group, + typename std::enable_if_t> { + sycl::marray data; +}; + +template +struct joint_matrix< + bfloat16, matrix_use::b, 16, 16, Layout, sycl::sub_group, + typename std::enable_if_t> { + sycl::marray data; +}; + // m8n8k4 double only __SYCL_JOINT_MATRIX_OVERLOAD(double, a, 8, 4, double, 1) __SYCL_JOINT_MATRIX_OVERLOAD(double, b, 4, 8, double, 1) @@ -75,8 +92,6 @@ __SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 32, 8, int32_t, 8) // m16n16k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 16, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(half, a, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 16, 16, float, 8) @@ -135,14 +150,15 @@ struct joint_matrix_load_impl< std::is_same< T, sycl::ext::oneapi::experimental::bfloat16>::value) { int32_t *tileptr = reinterpret_cast(src.get()); + int32_t *destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { - __mma_bf16_m16n16k16_ld_a(res.data, tileptr, stride, + __mma_bf16_m16n16k16_ld_a(destptr, tileptr, stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::b) { - __mma_bf16_m16n16k16_ld_b(res.data, tileptr, stride, + __mma_bf16_m16n16k16_ld_b(destptr, tileptr, stride, get_layout_id()); } } else if constexpr (NumRows == 8 && NumCols == 16) { @@ -465,7 +481,9 @@ struct joint_matrix_mad_impl< } else if constexpr (std::is_same::value || std::is_same::value) { - __mma_bf16_m16n16k16_mma_f32(D.data, A.data, B.data, C.data, + int32_t *destptrA = reinterpret_cast(&A.data); + int32_t *destptrB = reinterpret_cast(&B.data); + __mma_bf16_m16n16k16_mma_f32(D.data, destptrA, destptrB, C.data, get_layout_pair_id(), 0); } } else if constexpr (M == 8 && N == 32 && K == 16) { From b0badd296cced4b46c255d2c9d0e9b51f82822be Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Mon, 11 Apr 2022 17:11:58 +0100 Subject: [PATCH 20/45] Array impls of bfloat16 math fcts. Signed-off-by: jack.kirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 136 ++++++++++++++---- 1 file changed, 108 insertions(+), 28 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 8e64f2fb07e16..f205c4638d613 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -145,102 +145,155 @@ template struct is_bf16_storage_type> { template std::enable_if_t::value, T> fabs(T x) { -#ifdef __SYCL_DEVICE_ONLY__ +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return __clc_fabs(x); #else (void)x; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template -std::enable_if_t::value, T> fabs(T x) { -#ifdef __SYCL_DEVICE_ONLY__ +std::enable_if_t, T> fabs(T x) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fabs(x.raw())); #else (void)x; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, sycl::marray> +template +std::enable_if_t, sycl::marray> fabs(sycl::marray x) { -#ifdef __SYCL_DEVICE_ONLY__ - +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; - uint32_t *x_storage = reinterpret_cast(&x); - uint32_t *res_storage = reinterpret_cast(&res); - for (int i = 0; i < N / 2; i++) + auto x_storage = reinterpret_cast(&x); + auto res_storage = reinterpret_cast(&res); + + for (size_t i = 0; i < N / 2; i++) res_storage[i] = __clc_fabs(x_storage[i]); if (N % 2) { res[N - 1] = bfloat16::from_bits(__clc_fabs(x[N - 1].raw())); } + return res; #else (void)x; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template std::enable_if_t::value, T> fmin(T x, T y) { -#ifdef __SYCL_DEVICE_ONLY__ +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return __clc_fmin(x, y); #else (void)x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template -std::enable_if_t::value, T> fmin(T x, T y) { -#ifdef __SYCL_DEVICE_ONLY__ +std::enable_if_t, T> fmin(T x, T y) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); #else (void)x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) +} + +template +std::enable_if_t, sycl::marray> +fmin(sycl::marray x, sycl::marray y) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + sycl::marray res; + auto x_storage = reinterpret_cast(&x); + auto y_storage = reinterpret_cast(&y); + auto res_storage = reinterpret_cast(&res); + + for (size_t i = 0; i < N / 2; i++) + res_storage[i] = __clc_fmin(x_storage[i], y_storage[i]); + + if (N % 2) { + res[N - 1] = + bfloat16::from_bits(__clc_fmin(x[N - 1].raw(), y[N - 1].raw())); + } + + return res; +#else + (void)x; + (void)y; + throw runtime_error("bfloat16 is not currently supported on the host device.", + PI_INVALID_DEVICE); +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template std::enable_if_t::value, T> fmax(T x, T y) { -#ifdef __SYCL_DEVICE_ONLY__ +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return __clc_fmax(x, y); #else (void)x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template -std::enable_if_t::value, T> fmax(T x, T y) { -#ifdef __SYCL_DEVICE_ONLY__ +std::enable_if_t, T> fmax(T x, T y) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmax(x.raw(), y.raw())); #else (void)x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) +} + +template +std::enable_if_t, sycl::marray> +fmax(sycl::marray x, sycl::marray y) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + sycl::marray res; + auto x_storage = reinterpret_cast(&x); + auto y_storage = reinterpret_cast(&y); + auto res_storage = reinterpret_cast(&res); + + for (size_t i = 0; i < N / 2; i++) + res_storage[i] = __clc_fmax(x_storage[i], y_storage[i]); + + if (N % 2) { + res[N - 1] = + bfloat16::from_bits(__clc_fmax(x[N - 1].raw(), y[N - 1].raw())); + } + + return res; +#else + (void)x; + (void)y; + throw runtime_error("bfloat16 is not currently supported on the host device.", + PI_INVALID_DEVICE); +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template std::enable_if_t::value, T> fma(T x, T y, T z) { -#ifdef __SYCL_DEVICE_ONLY__ +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return __clc_fma(x, y, z); #else (void)x; @@ -248,12 +301,12 @@ fma(T x, T y, T z) { (void)z; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } template -std::enable_if_t::value, T> fma(T x, T y, T z) { -#ifdef __SYCL_DEVICE_ONLY__ +std::enable_if_t, T> fma(T x, T y, T z) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fma(x.raw(), y.raw(), z.raw())); #else (void)x; @@ -261,7 +314,34 @@ std::enable_if_t::value, T> fma(T x, T y, T z) { (void)z; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); -#endif +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) +} + +template +std::enable_if_t, sycl::marray> +fma(sycl::marray x, sycl::marray y, sycl::marray z) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + sycl::marray res; + auto x_storage = reinterpret_cast(&x); + auto y_storage = reinterpret_cast(&y); + auto z_storage = reinterpret_cast(&z); + auto res_storage = reinterpret_cast(&res); + + for (size_t i = 0; i < N / 2; i++) + res_storage[i] = __clc_fma(x_storage[i], y_storage[i], z_storage[i]); + + if (N % 2) { + res[N - 1] = bfloat16::from_bits( + __clc_fma(x[N - 1].raw(), y[N - 1].raw(), z[N - 1].raw())); + } + + return res; +#else + (void)x; + (void)y; + throw runtime_error("bfloat16 is not currently supported on the host device.", + PI_INVALID_DEVICE); +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } } // namespace experimental From a8f8041e685c8987502be522d75b2c80728ec250 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Mon, 11 Apr 2022 17:17:14 +0100 Subject: [PATCH 21/45] matrix device tests use sycl::accessor. Signed-off-by: jack.kirk --- .../matrix/matrix-nvptx-bfloat16-test.cpp | 17 +++++++++----- .../matrix/matrix-nvptx-double-test.cpp | 22 +++++++++---------- .../matrix/matrix-nvptx-half-float-test.cpp | 15 ++++++++----- .../matrix/matrix-nvptx-half-half-test.cpp | 15 ++++++++----- .../matrix/matrix-nvptx-int8-test.cpp | 15 ++++++++----- .../matrix/matrix-nvptx-uint8-test.cpp | 15 ++++++++----- 6 files changed, 62 insertions(+), 37 deletions(-) diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp index fd17cc30bbfa7..3f2858f9edcf1 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp @@ -2,7 +2,7 @@ // RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s -#include +#include using namespace sycl; using namespace sycl::ext::oneapi::experimental::matrix; @@ -20,10 +20,17 @@ int main() { queue q; q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); + + sycl::accessor + accA(bufA, cgh); + sycl::accessor + accB(bufB, cgh); + sycl::accessor + accC(bufC, cgh); + sycl::accessor + accD(bufD, cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp index 7d9ba2966d866..ce151261e3fae 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp @@ -2,7 +2,7 @@ // RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s -#include +#include using namespace sycl; using namespace sycl::ext::oneapi::experimental::matrix; @@ -30,10 +30,15 @@ int main() { queue q; q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); + + sycl::accessor + accA(bufA, cgh); + sycl::accessor + accB(bufB, cgh); + sycl::accessor + accC(bufC, cgh); + sycl::accessor + accD(bufD, cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), @@ -61,13 +66,6 @@ int main() { //CHECK: tail call void @llvm.nvvm.wmma.m8n8k4.store.d.row.stride.f64.p1f64(double addrspace(1)* %_arg_accD, double %6, double %7, i32 8) #{{.*}} joint_matrix_store(sg, sub_c, accD.get_pointer(), N); }); - }); - - q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp index 538c04bb783e0..22d45fe5348ec 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp @@ -2,7 +2,7 @@ // RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_70 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s -#include +#include using namespace sycl; using namespace sycl::ext::oneapi::experimental::matrix; @@ -19,10 +19,15 @@ int main() { queue q; q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); + + sycl::accessor + accA(bufA, cgh); + sycl::accessor + accB(bufB, cgh); + sycl::accessor + accC(bufC, cgh); + sycl::accessor + accD(bufD, cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp index 54d7c427d917f..cb74b76c0fc12 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp @@ -2,7 +2,7 @@ // RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_70 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s -#include +#include using namespace sycl; using namespace sycl::ext::oneapi::experimental::matrix; @@ -19,10 +19,15 @@ int main() { queue q; q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); + + sycl::accessor + accA(bufA, cgh); + sycl::accessor + accB(bufB, cgh); + sycl::accessor + accC(bufC, cgh); + sycl::accessor + accD(bufD, cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp index 805c7fe02eff5..131bb63074a0e 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp @@ -2,7 +2,7 @@ // RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_72 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s -#include +#include using namespace sycl; using namespace sycl::ext::oneapi::experimental::matrix; @@ -19,10 +19,15 @@ int main() { queue q; q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); + + sycl::accessor + accA(bufA, cgh); + sycl::accessor + accB(bufB, cgh); + sycl::accessor + accC(bufC, cgh); + sycl::accessor + accD(bufD, cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp index 2962810436716..b46f552f72e35 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp @@ -2,7 +2,7 @@ // RUN: %clangxx -fsycl-device-only -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_72 -DSYCL_EXT_ONEAPI_MATRIX=3 -S -Xclang -emit-llvm %s -o -| FileCheck %s -#include +#include using namespace sycl; using namespace sycl::ext::oneapi::experimental::matrix; @@ -19,10 +19,15 @@ int main() { queue q; q.submit([&](handler &cgh) { - auto accC = bufC.get_access(cgh); - auto accA = bufA.get_access(cgh); - auto accB = bufB.get_access(cgh); - auto accD = bufD.get_access(cgh); + + sycl::accessor + accA(bufA, cgh); + sycl::accessor + accB(bufB, cgh); + sycl::accessor + accC(bufC, cgh); + sycl::accessor + accD(bufD, cgh); cgh.parallel_for( nd_range<2>({1, 32}, {1, 32}), From fbd9a98db2def1c91cdf5394493b0bddeca26177 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 12 Apr 2022 10:18:32 +0100 Subject: [PATCH 22/45] mixed float array impl for volta testing. --- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 248 ++++++++++-------- 1 file changed, 142 insertions(+), 106 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index c2f416912dadf..fb75dd82aa754 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -34,22 +34,38 @@ struct joint_matrix; frag_type data[frag_size]; \ }; -// example implementation -template -struct joint_matrix< - bfloat16, matrix_use::a, 16, 16, Layout, sycl::sub_group, - typename std::enable_if_t> { - sycl::marray data; -}; +#define __SYCL_JOINT_MATRIX_OVERLOAD_ARR(type, use, M, N, size) \ + template \ + struct joint_matrix< \ + type, matrix_use::use, M, N, Layout, sycl::sub_group, \ + typename std::enable_if_t> { \ + sycl::marray data; \ + }; -template -struct joint_matrix< - bfloat16, matrix_use::b, 16, 16, Layout, sycl::sub_group, - typename std::enable_if_t> { - sycl::marray data; -}; +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 8, 16, 4) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 32, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 32, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 8, 4) + +// TODO this case needs to be treated carefully for element wise ops: The number of fragments actually used depends on SM version +// m8n32k16 +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, a, 8, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, b, 16, 32, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, accumulator, 8, 32, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD_ARR(float, accumulator, 8, 32, 8) + +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, a, 32, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, b, 16, 8, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, accumulator, 32, 8, 8) + +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, a, 16, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, b, 16, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, accumulator, 16, 16, 8) + +#undef __SYCL_JOINT_MATRIX_OVERLOAD_ARR // m8n8k4 double only __SYCL_JOINT_MATRIX_OVERLOAD(double, a, 8, 4, double, 1) @@ -60,12 +76,12 @@ __SYCL_JOINT_MATRIX_OVERLOAD(double, accumulator, 8, 8, double, 2) // bf16 data format uint16_t implementation is deprecated __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 8, 16, int32_t, 2) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 32, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 8, 16, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 32, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 8, 16, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 32, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 8, 16, int32_t, 2) +//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 32, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 8, 16, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 32, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 8, 32, float, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 8, 32, int32_t, 4) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 8, 32, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(int8_t, a, 8, 16, int32_t, 1) __SYCL_JOINT_MATRIX_OVERLOAD(int8_t, b, 16, 32, int32_t, 4) @@ -76,12 +92,12 @@ __SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 8, 32, int32_t, 8) // m32n8k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 32, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 8, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 32, 16, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 8, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 32, 16, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 8, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 32, 16, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 8, int32_t, 2) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 32, 16, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 8, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 32, 8, float, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 32, 8, int32_t, 4) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 32, 8, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(int8_t, a, 32, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(int8_t, b, 16, 8, int32_t, 1) @@ -92,10 +108,10 @@ __SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 32, 8, int32_t, 8) // m16n16k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 16, 16, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 16, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 16, 16, int32_t, 8) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 16, 16, float, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 16, 16, int32_t, 4) +//__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(int8_t, a, 16, 16, int32_t, 2) __SYCL_JOINT_MATRIX_OVERLOAD(int8_t, b, 16, 16, int32_t, 2) @@ -146,11 +162,11 @@ struct joint_matrix_load_impl< void load(sycl::ext::oneapi::experimental::matrix::joint_matrix< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { - if constexpr (std::is_same::value || - std::is_same< - T, sycl::ext::oneapi::experimental::bfloat16>::value) { - int32_t *tileptr = reinterpret_cast(src.get()); - int32_t *destptr = reinterpret_cast(&res.data); + if constexpr (std::is_same_v || + std::is_same_v< + T, sycl::ext::oneapi::experimental::bfloat16>) { + auto tileptr = reinterpret_cast(src.get()); + auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -162,20 +178,20 @@ struct joint_matrix_load_impl< get_layout_id()); } } else if constexpr (NumRows == 8 && NumCols == 16) { - __mma_bf16_m8n32k16_ld_a(res.data, tileptr, stride, + __mma_bf16_m8n32k16_ld_a(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 32) { - __mma_bf16_m8n32k16_ld_b(res.data, tileptr, stride, + __mma_bf16_m8n32k16_ld_b(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 16) { - __mma_bf16_m32n8k16_ld_a(res.data, tileptr, stride, + __mma_bf16_m32n8k16_ld_a(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 8) { - __mma_bf16_m32n8k16_ld_b(res.data, tileptr, stride, + __mma_bf16_m32n8k16_ld_b(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { - int32_t *tileptr = reinterpret_cast(src.get()); + } else if constexpr (std::is_same_v) { + auto tileptr = reinterpret_cast(src.get()); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -199,8 +215,8 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_b_u8(res.data, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { - int32_t *tileptr = reinterpret_cast(src.get()); + } else if constexpr (std::is_same_v) { + auto tileptr = reinterpret_cast(src.get()); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -224,43 +240,44 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_b_s8(res.data, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { - int32_t *tileptr = reinterpret_cast(src.get()); + } else if constexpr (std::is_same_v) { + auto tileptr = reinterpret_cast(src.get()); + auto dstptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { - __hmma_m16n16k16_ld_a(res.data, tileptr, stride, + __hmma_m16n16k16_ld_a(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::b) { - __hmma_m16n16k16_ld_b(res.data, tileptr, stride, + __hmma_m16n16k16_ld_b(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::accumulator) { - __hmma_m16n16k16_ld_c_f16(res.data, tileptr, stride, + __hmma_m16n16k16_ld_c_f16(dstptr, tileptr, stride, get_layout_id()); } } else if constexpr (NumRows == 8 && NumCols == 16) { - __hmma_m8n32k16_ld_a(res.data, tileptr, stride, + __hmma_m8n32k16_ld_a(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 32) { - __hmma_m8n32k16_ld_b(res.data, tileptr, stride, + __hmma_m8n32k16_ld_b(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 16) { - __hmma_m32n8k16_ld_a(res.data, tileptr, stride, + __hmma_m32n8k16_ld_a(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 8) { - __hmma_m32n8k16_ld_b(res.data, tileptr, stride, + __hmma_m32n8k16_ld_b(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 8) { - __hmma_m32n8k16_ld_c_f16(res.data, tileptr, stride, + __hmma_m32n8k16_ld_c_f16(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 8 && NumCols == 32) { - __hmma_m8n32k16_ld_c_f16(res.data, tileptr, stride, + __hmma_m8n32k16_ld_c_f16(dstptr, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { if constexpr (NumRows == 16 && NumCols == 16) { __imma_m16n16k16_ld_c(res.data, src.get(), stride, get_layout_id()); @@ -271,7 +288,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_c(res.data, src.get(), stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { if constexpr (NumRows == 16 && NumCols == 16) { __hmma_m16n16k16_ld_c_f32(res.data, src.get(), stride, get_layout_id()); @@ -282,7 +299,7 @@ struct joint_matrix_load_impl< __hmma_m32n8k16_ld_c_f32(res.data, src.get(), stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { __dmma_m8n8k4_ld_a(res.data, src.get(), stride, @@ -326,42 +343,45 @@ struct joint_matrix_store_impl< NumRows, NumCols, Layout, sycl::sub_group> &src, multi_ptr dst, size_t stride) { if (NumRows == 16 && NumCols == 16) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { __hmma_m16n16k16_st_c_f32(dst.get(), src.data, stride, get_layout_id()); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __imma_m16n16k16_st_c_i32(dst.get(), src.data, stride, get_layout_id()); - } else if constexpr (std::is_same::value) { - int32_t *tileptr = reinterpret_cast(dst.get()); - __hmma_m16n16k16_st_c_f16(tileptr, src.data, stride, + } else if constexpr (std::is_same_v) { + auto tileptr = reinterpret_cast(dst.get()); + auto srcptr = reinterpret_cast(&src.data); + __hmma_m16n16k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 8 && NumCols == 32) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { __hmma_m8n32k16_st_c_f32(dst.get(), src.data, stride, get_layout_id()); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __imma_m8n32k16_st_c_i32(dst.get(), src.data, stride, get_layout_id()); - } else if constexpr (std::is_same::value) { - int32_t *tileptr = reinterpret_cast(dst.get()); - __hmma_m8n32k16_st_c_f16(tileptr, src.data, stride, + } else if constexpr (std::is_same_v) { + auto tileptr = reinterpret_cast(dst.get()); + auto srcptr = reinterpret_cast(&src.data); + __hmma_m8n32k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 32 && NumCols == 8) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { __hmma_m32n8k16_st_c_f32(dst.get(), src.data, stride, get_layout_id()); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __imma_m32n8k16_st_c_i32(dst.get(), src.data, stride, get_layout_id()); - } else if constexpr (std::is_same::value) { - int32_t *tileptr = reinterpret_cast(dst.get()); - __hmma_m32n8k16_st_c_f16(tileptr, src.data, stride, + } else if constexpr (std::is_same_v) { + auto tileptr = reinterpret_cast(dst.get()); + auto srcptr = reinterpret_cast(&src.data); + __hmma_m32n8k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __dmma_m8n8k4_st_c_f64(dst.get(), src.data, stride, get_layout_id()); } @@ -462,73 +482,89 @@ struct joint_matrix_mad_impl< N, LayoutC, sycl::sub_group> D; if constexpr (M == 16 && N == 16 && K == 16) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { __imma_m16n16k16_mma_s8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __imma_m16n16k16_mma_u8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); if constexpr (std::is_same::value) { - __hmma_m16n16k16_mma_f32f32(D.data, A.data, B.data, C.data, + __hmma_m16n16k16_mma_f32f32(D.data, ptrA, ptrB, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { - __hmma_m16n16k16_mma_f16f16(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v) { + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __hmma_m16n16k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value || - std::is_same::value) { - int32_t *destptrA = reinterpret_cast(&A.data); - int32_t *destptrB = reinterpret_cast(&B.data); - __mma_bf16_m16n16k16_mma_f32(D.data, destptrA, destptrB, C.data, + } else if constexpr (std::is_same_v || + std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + __mma_bf16_m16n16k16_mma_f32(D.data, ptrA, ptrB, C.data, get_layout_pair_id(), 0); } } else if constexpr (M == 8 && N == 32 && K == 16) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { __imma_m8n32k16_mma_s8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __imma_m8n32k16_mma_u8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { - if constexpr (std::is_same::value) { - __hmma_m8n32k16_mma_f32f32(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + if constexpr (std::is_same_v) { + __hmma_m8n32k16_mma_f32f32(D.data, ptrA, ptrB, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { - __hmma_m8n32k16_mma_f16f16(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v) { + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __hmma_m8n32k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same::value || - std::is_same::value) { - __mma_bf16_m8n32k16_mma_f32(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v || + std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + __mma_bf16_m8n32k16_mma_f32(D.data, ptrA, ptrB, C.data, get_layout_pair_id(), 0); } } else if constexpr (M == 32 && N == 8 && K == 16) { - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { __imma_m32n8k16_mma_s8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __imma_m32n8k16_mma_u8(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value || - std::is_same::value) { - __mma_bf16_m32n8k16_mma_f32(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v || + std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + __mma_bf16_m32n8k16_mma_f32(D.data, ptrA, ptrB, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { - if constexpr (std::is_same::value) { - __hmma_m32n8k16_mma_f32f32(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + if constexpr (std::is_same_v) { + __hmma_m32n8k16_mma_f32f32(D.data, ptrA, ptrB, C.data, get_layout_pair_id(), 0); - } else if constexpr (std::is_same::value) { - __hmma_m32n8k16_mma_f16f16(D.data, A.data, B.data, C.data, + } else if constexpr (std::is_same_v) { + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __hmma_m32n8k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same_v) { __dmma_m8n8k4_mma_f64(D.data, A.data, B.data, C.data, get_layout_pair_id(), 0); } From d12b6c37c8bcfc1be764036289477f8740c7e2bd Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 12 Apr 2022 15:34:44 +0100 Subject: [PATCH 23/45] fragments now marray for wi_data use --- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 299 ++++++++++-------- 1 file changed, 160 insertions(+), 139 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index fb75dd82aa754..6b1705b026538 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -25,99 +25,80 @@ template struct joint_matrix; -#define __SYCL_JOINT_MATRIX_OVERLOAD(type, use, M, N, frag_type, frag_size) \ - template \ - struct joint_matrix< \ - type, matrix_use::use, M, N, Layout, sycl::sub_group, \ - typename std::enable_if_t> { \ - frag_type data[frag_size]; \ - }; - -#define __SYCL_JOINT_MATRIX_OVERLOAD_ARR(type, use, M, N, size) \ +#define __SYCL_JOINT_MATRIX_OVERLOAD_ARR(type, use, M, N, size) \ template \ struct joint_matrix< \ type, matrix_use::use, M, N, Layout, sycl::sub_group, \ typename std::enable_if_t> { \ - sycl::marray data; \ + sycl::marray data; \ }; -__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 16, 16, 8) -__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 16, 8) +// m8n32k16 __SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 8, 16, 4) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 32, 16) -__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 32, 16, 16) -__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 8, 4) - -// TODO this case needs to be treated carefully for element wise ops: The number of fragments actually used depends on SM version -// m8n32k16 __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, a, 8, 16, 16) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, b, 16, 32, 16) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, accumulator, 8, 32, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD_ARR(float, accumulator, 8, 32, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(float, accumulator, 8, 32, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int8_t, a, 8, 16, 4) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int8_t, b, 16, 32, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(uint8_t, a, 8, 16, 4) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(uint8_t, b, 16, 32, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int32_t, accumulator, 8, 32, 8) +// m32n8k16 +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 32, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 8, 4) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, a, 32, 16, 16) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, b, 16, 8, 16) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, accumulator, 32, 8, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(float, accumulator, 32, 8, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int8_t, a, 32, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int8_t, b, 16, 8, 4) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(uint8_t, a, 32, 16, 16) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(uint8_t, b, 16, 8, 4) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int32_t, accumulator, 32, 8, 8) +// m16n16k16 +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, a, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(bfloat16, b, 16, 16, 8) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, a, 16, 16, 16) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, b, 16, 16, 16) __SYCL_JOINT_MATRIX_OVERLOAD_ARR(half, accumulator, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(float, accumulator, 16, 16, 8) + +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int8_t, a, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int8_t, b, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(uint8_t, a, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(uint8_t, b, 16, 16, 8) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(int32_t, accumulator, 16, 16, 8) +// m8n8k4 double only +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(double, a, 8, 4, 1) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(double, b, 4, 8, 1) +__SYCL_JOINT_MATRIX_OVERLOAD_ARR(double, accumulator, 8, 8, 2) #undef __SYCL_JOINT_MATRIX_OVERLOAD_ARR -// m8n8k4 double only -__SYCL_JOINT_MATRIX_OVERLOAD(double, a, 8, 4, double, 1) -__SYCL_JOINT_MATRIX_OVERLOAD(double, b, 4, 8, double, 1) -__SYCL_JOINT_MATRIX_OVERLOAD(double, accumulator, 8, 8, double, 2) +#define __SYCL_JOINT_MATRIX_OVERLOAD(type, use, M, N, frag_type, frag_size) \ + template \ + struct joint_matrix< \ + type, matrix_use::use, M, N, Layout, sycl::sub_group, \ + typename std::enable_if_t> { \ + frag_type data[frag_size]; \ + }; -// m8n32k16 // bf16 data format uint16_t implementation is deprecated +// m8n32k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 8, 16, int32_t, 2) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 32, int32_t, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 8, 16, int32_t, 2) -//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 32, int32_t, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 8, 16, int32_t, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 32, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 8, 32, float, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 8, 32, int32_t, 4) - -__SYCL_JOINT_MATRIX_OVERLOAD(int8_t, a, 8, 16, int32_t, 1) -__SYCL_JOINT_MATRIX_OVERLOAD(int8_t, b, 16, 32, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(uint8_t, a, 8, 16, int32_t, 1) -__SYCL_JOINT_MATRIX_OVERLOAD(uint8_t, b, 16, 32, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 8, 32, int32_t, 8) - // m32n8k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 32, 16, int32_t, 8) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 8, int32_t, 2) -//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, a, 32, 16, int32_t, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(bfloat16, b, 16, 8, int32_t, 2) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 32, 16, int32_t, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 8, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 32, 8, float, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 32, 8, int32_t, 4) - -__SYCL_JOINT_MATRIX_OVERLOAD(int8_t, a, 32, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(int8_t, b, 16, 8, int32_t, 1) -__SYCL_JOINT_MATRIX_OVERLOAD(uint8_t, a, 32, 16, int32_t, 4) -__SYCL_JOINT_MATRIX_OVERLOAD(uint8_t, b, 16, 8, int32_t, 1) -__SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 32, 8, int32_t, 8) - // m16n16k16 __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 16, int32_t, 4) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, a, 16, 16, int32_t, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, b, 16, 16, int32_t, 8) -__SYCL_JOINT_MATRIX_OVERLOAD(float, accumulator, 16, 16, float, 8) -//__SYCL_JOINT_MATRIX_OVERLOAD(half, accumulator, 16, 16, int32_t, 4) - -__SYCL_JOINT_MATRIX_OVERLOAD(int8_t, a, 16, 16, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(int8_t, b, 16, 16, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(uint8_t, a, 16, 16, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(uint8_t, b, 16, 16, int32_t, 2) -__SYCL_JOINT_MATRIX_OVERLOAD(int32_t, accumulator, 16, 16, int32_t, 8) #undef __SYCL_JOINT_MATRIX_OVERLOAD } // namespace experimental::matrix @@ -163,8 +144,8 @@ struct joint_matrix_load_impl< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { if constexpr (std::is_same_v || - std::is_same_v< - T, sycl::ext::oneapi::experimental::bfloat16>) { + std::is_same_v) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -192,52 +173,54 @@ struct joint_matrix_load_impl< } } else if constexpr (std::is_same_v) { auto tileptr = reinterpret_cast(src.get()); + auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { - __imma_m16n16k16_ld_a_u8(res.data, tileptr, stride, + __imma_m16n16k16_ld_a_u8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::b) { - __imma_m16n16k16_ld_b_u8(res.data, tileptr, stride, + __imma_m16n16k16_ld_b_u8(destptr, tileptr, stride, get_layout_id()); } } else if constexpr (NumRows == 8 && NumCols == 16) { - __imma_m8n32k16_ld_a_u8(res.data, tileptr, stride, + __imma_m8n32k16_ld_a_u8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 32) { - __imma_m8n32k16_ld_b_u8(res.data, tileptr, stride, + __imma_m8n32k16_ld_b_u8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 16) { - __imma_m32n8k16_ld_a_u8(res.data, tileptr, stride, + __imma_m32n8k16_ld_a_u8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 8) { - __imma_m32n8k16_ld_b_u8(res.data, tileptr, stride, + __imma_m32n8k16_ld_b_u8(destptr, tileptr, stride, get_layout_id()); } } else if constexpr (std::is_same_v) { auto tileptr = reinterpret_cast(src.get()); + auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { - __imma_m16n16k16_ld_a_s8(res.data, tileptr, stride, + __imma_m16n16k16_ld_a_s8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::b) { - __imma_m16n16k16_ld_b_s8(res.data, tileptr, stride, + __imma_m16n16k16_ld_b_s8(destptr, tileptr, stride, get_layout_id()); } } else if constexpr (NumRows == 8 && NumCols == 16) { - __imma_m8n32k16_ld_a_s8(res.data, tileptr, stride, + __imma_m8n32k16_ld_a_s8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 32) { - __imma_m8n32k16_ld_b_s8(res.data, tileptr, stride, + __imma_m8n32k16_ld_b_s8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 16) { - __imma_m32n8k16_ld_a_s8(res.data, tileptr, stride, + __imma_m32n8k16_ld_a_s8(destptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 8) { - __imma_m32n8k16_ld_b_s8(res.data, tileptr, stride, + __imma_m32n8k16_ld_b_s8(destptr, tileptr, stride, get_layout_id()); } } else if constexpr (std::is_same_v) { @@ -258,17 +241,13 @@ struct joint_matrix_load_impl< get_layout_id()); } } else if constexpr (NumRows == 8 && NumCols == 16) { - __hmma_m8n32k16_ld_a(dstptr, tileptr, stride, - get_layout_id()); + __hmma_m8n32k16_ld_a(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 32) { - __hmma_m8n32k16_ld_b(dstptr, tileptr, stride, - get_layout_id()); + __hmma_m8n32k16_ld_b(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 16) { - __hmma_m32n8k16_ld_a(dstptr, tileptr, stride, - get_layout_id()); + __hmma_m32n8k16_ld_a(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 16 && NumCols == 8) { - __hmma_m32n8k16_ld_b(dstptr, tileptr, stride, - get_layout_id()); + __hmma_m32n8k16_ld_b(dstptr, tileptr, stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 8) { __hmma_m32n8k16_ld_c_f16(dstptr, tileptr, stride, get_layout_id()); @@ -278,40 +257,40 @@ struct joint_matrix_load_impl< } } else if constexpr (std::is_same_v) { + auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { - __imma_m16n16k16_ld_c(res.data, src.get(), stride, + __imma_m16n16k16_ld_c(destptr, src.get(), stride, get_layout_id()); } else if constexpr (NumRows == 8 && NumCols == 32) { - __imma_m8n32k16_ld_c(res.data, src.get(), stride, + __imma_m8n32k16_ld_c(destptr, src.get(), stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 8) { - __imma_m32n8k16_ld_c(res.data, src.get(), stride, + __imma_m32n8k16_ld_c(destptr, src.get(), stride, get_layout_id()); } } else if constexpr (std::is_same_v) { + auto dstptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { - __hmma_m16n16k16_ld_c_f32(res.data, src.get(), stride, + __hmma_m16n16k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); } else if constexpr (NumRows == 8 && NumCols == 32) { - __hmma_m8n32k16_ld_c_f32(res.data, src.get(), stride, + __hmma_m8n32k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); } else if constexpr (NumRows == 32 && NumCols == 8) { - __hmma_m32n8k16_ld_c_f32(res.data, src.get(), stride, + __hmma_m32n8k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); } } else if constexpr (std::is_same_v) { + auto dstptr = reinterpret_cast(&res.data); if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { - __dmma_m8n8k4_ld_a(res.data, src.get(), stride, - get_layout_id()); + __dmma_m8n8k4_ld_a(dstptr, src.get(), stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::b) { - __dmma_m8n8k4_ld_b(res.data, src.get(), stride, - get_layout_id()); + __dmma_m8n8k4_ld_b(dstptr, src.get(), stride, get_layout_id()); } else if constexpr (Use == sycl::ext::oneapi::experimental::matrix:: matrix_use::accumulator) { - __dmma_m8n8k4_ld_c(res.data, src.get(), stride, - get_layout_id()); + __dmma_m8n8k4_ld_c(dstptr, src.get(), stride, get_layout_id()); } } } @@ -343,11 +322,14 @@ struct joint_matrix_store_impl< NumRows, NumCols, Layout, sycl::sub_group> &src, multi_ptr dst, size_t stride) { if (NumRows == 16 && NumCols == 16) { + if constexpr (std::is_same_v) { - __hmma_m16n16k16_st_c_f32(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __hmma_m16n16k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same_v) { - __imma_m16n16k16_st_c_i32(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __imma_m16n16k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same_v) { auto tileptr = reinterpret_cast(dst.get()); @@ -357,32 +339,37 @@ struct joint_matrix_store_impl< } } else if (NumRows == 8 && NumCols == 32) { if constexpr (std::is_same_v) { - __hmma_m8n32k16_st_c_f32(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __hmma_m8n32k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same_v) { - __imma_m8n32k16_st_c_i32(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __imma_m8n32k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same_v) { auto tileptr = reinterpret_cast(dst.get()); - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.data); __hmma_m8n32k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 32 && NumCols == 8) { if constexpr (std::is_same_v) { - __hmma_m32n8k16_st_c_f32(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __hmma_m32n8k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same_v) { - __imma_m32n8k16_st_c_i32(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __imma_m32n8k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same_v) { auto tileptr = reinterpret_cast(dst.get()); - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.data); __hmma_m32n8k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if constexpr (std::is_same_v) { - __dmma_m8n8k4_st_c_f64(dst.get(), src.data, stride, + auto srcptr = reinterpret_cast(&src.data); + __dmma_m8n8k4_st_c_f64(dst.get(), srcptr, stride, get_layout_id()); } } @@ -482,90 +469,124 @@ struct joint_matrix_mad_impl< N, LayoutC, sycl::sub_group> D; if constexpr (M == 16 && N == 16 && K == 16) { - if constexpr (std::is_same_v) { - __imma_m16n16k16_mma_s8(D.data, A.data, B.data, C.data, - get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { - __imma_m16n16k16_mma_u8(D.data, A.data, B.data, C.data, - get_layout_pair_id(), 0); + if constexpr (std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + if constexpr (std::is_same_v) { + __imma_m16n16k16_mma_s8(ptrD, ptrA, ptrB, ptrC, + get_layout_pair_id(), 0); + } else if constexpr (std::is_same_v) { + __imma_m16n16k16_mma_u8(ptrD, ptrA, ptrB, ptrC, + get_layout_pair_id(), 0); + } } else if constexpr (std::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); if constexpr (std::is_same::value) { - __hmma_m16n16k16_mma_f32f32(D.data, ptrA, ptrB, C.data, + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __hmma_m16n16k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same_v) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); __hmma_m16n16k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (std::is_same_v || - std::is_same_v) { + std::is_same_v< + T1, sycl::ext::oneapi::experimental::bfloat16>) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - __mma_bf16_m16n16k16_mma_f32(D.data, ptrA, ptrB, C.data, + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __mma_bf16_m16n16k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (M == 8 && N == 32 && K == 16) { - if constexpr (std::is_same_v) { - __imma_m8n32k16_mma_s8(D.data, A.data, B.data, C.data, - get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { - __imma_m8n32k16_mma_u8(D.data, A.data, B.data, C.data, - get_layout_pair_id(), 0); + if constexpr (std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + if constexpr (std::is_same_v) { + __imma_m8n32k16_mma_s8(ptrD, ptrA, ptrB, ptrC, + get_layout_pair_id(), 0); + } else if constexpr (std::is_same_v) { + __imma_m8n32k16_mma_u8(ptrD, ptrA, ptrB, ptrC, + get_layout_pair_id(), 0); + } } else if constexpr (std::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); if constexpr (std::is_same_v) { - __hmma_m8n32k16_mma_f32f32(D.data, ptrA, ptrB, C.data, + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __hmma_m8n32k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same_v) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); __hmma_m8n32k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (std::is_same_v || - std::is_same_v) { + std::is_same_v< + T1, sycl::ext::oneapi::experimental::bfloat16>) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - __mma_bf16_m8n32k16_mma_f32(D.data, ptrA, ptrB, C.data, + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __mma_bf16_m8n32k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (M == 32 && N == 8 && K == 16) { - if constexpr (std::is_same_v) { - __imma_m32n8k16_mma_s8(D.data, A.data, B.data, C.data, - get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { - __imma_m32n8k16_mma_u8(D.data, A.data, B.data, C.data, - get_layout_pair_id(), 0); + if constexpr (std::is_same_v) { + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + if constexpr (std::is_same_v) { + __imma_m32n8k16_mma_s8(ptrD, ptrA, ptrB, ptrC, + get_layout_pair_id(), 0); + } else if constexpr (std::is_same_v) { + __imma_m32n8k16_mma_u8(ptrD, ptrA, ptrB, ptrC, + get_layout_pair_id(), 0); + } } else if constexpr (std::is_same_v || - std::is_same_v) { + std::is_same_v< + T1, sycl::ext::oneapi::experimental::bfloat16>) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - __mma_bf16_m32n8k16_mma_f32(D.data, ptrA, ptrB, C.data, + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __mma_bf16_m32n8k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); if constexpr (std::is_same_v) { - __hmma_m32n8k16_mma_f32f32(D.data, ptrA, ptrB, C.data, + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __hmma_m32n8k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same_v) { auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrD = reinterpret_cast(&D.data); __hmma_m32n8k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } } else if constexpr (std::is_same_v) { - __dmma_m8n8k4_mma_f64(D.data, A.data, B.data, C.data, + auto ptrA = reinterpret_cast(&A.data); + auto ptrB = reinterpret_cast(&B.data); + auto ptrC = reinterpret_cast(&C.data); + auto ptrD = reinterpret_cast(&D.data); + __dmma_m8n8k4_mma_f64(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } return D; From 003ac9e532a68e1af97b1b107c595a7aea252dbf Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 12 Apr 2022 16:14:11 +0100 Subject: [PATCH 24/45] format --- .../matrix/matrix-nvptx-bfloat16-test.cpp | 6 ++++-- .../matrix/matrix-nvptx-double-test.cpp | 12 ++++++++---- .../matrix/matrix-nvptx-half-float-test.cpp | 12 ++++++++---- .../matrix/matrix-nvptx-half-half-test.cpp | 12 ++++++++---- .../matrix/matrix-nvptx-int8-test.cpp | 12 ++++++++---- .../matrix/matrix-nvptx-uint8-test.cpp | 12 ++++++++---- 6 files changed, 44 insertions(+), 22 deletions(-) diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp index 3f2858f9edcf1..0c6b83986a928 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp @@ -27,9 +27,11 @@ int main() { sycl::accessor accB(bufB, cgh); - sycl::accessor + sycl::accessor accC(bufC, cgh); - sycl::accessor + sycl::accessor accD(bufD, cgh); cgh.parallel_for( diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp index ce151261e3fae..06780dcf51989 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp @@ -31,13 +31,17 @@ int main() { q.submit([&](handler &cgh) { - sycl::accessor + sycl::accessor accA(bufA, cgh); - sycl::accessor + sycl::accessor accB(bufB, cgh); - sycl::accessor + sycl::accessor accC(bufC, cgh); - sycl::accessor + sycl::accessor accD(bufD, cgh); cgh.parallel_for( diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp index 22d45fe5348ec..2d646837ee649 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp @@ -20,13 +20,17 @@ int main() { q.submit([&](handler &cgh) { - sycl::accessor + sycl::accessor accA(bufA, cgh); - sycl::accessor + sycl::accessor accB(bufB, cgh); - sycl::accessor + sycl::accessor accC(bufC, cgh); - sycl::accessor + sycl::accessor accD(bufD, cgh); cgh.parallel_for( diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp index cb74b76c0fc12..dcafe64d2ca2b 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp @@ -20,13 +20,17 @@ int main() { q.submit([&](handler &cgh) { - sycl::accessor + sycl::accessor accA(bufA, cgh); - sycl::accessor + sycl::accessor accB(bufB, cgh); - sycl::accessor + sycl::accessor accC(bufC, cgh); - sycl::accessor + sycl::accessor accD(bufD, cgh); cgh.parallel_for( diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp index 131bb63074a0e..0f329e8af6f19 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp @@ -20,13 +20,17 @@ int main() { q.submit([&](handler &cgh) { - sycl::accessor + sycl::accessor accA(bufA, cgh); - sycl::accessor + sycl::accessor accB(bufB, cgh); - sycl::accessor + sycl::accessor accC(bufC, cgh); - sycl::accessor + sycl::accessor accD(bufD, cgh); cgh.parallel_for( diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp index b46f552f72e35..72823f018700b 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp @@ -20,13 +20,17 @@ int main() { q.submit([&](handler &cgh) { - sycl::accessor + sycl::accessor accA(bufA, cgh); - sycl::accessor + sycl::accessor accB(bufB, cgh); - sycl::accessor + sycl::accessor accC(bufC, cgh); - sycl::accessor + sycl::accessor accD(bufD, cgh); cgh.parallel_for( From 3c622499e06abee68da491e4c80791e525616e8a Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 12 Apr 2022 16:20:38 +0100 Subject: [PATCH 25/45] format --- .../test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp | 1 - sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp | 1 - .../check_device_code/matrix/matrix-nvptx-half-float-test.cpp | 1 - .../check_device_code/matrix/matrix-nvptx-half-half-test.cpp | 1 - sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp | 1 - sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp | 1 - 6 files changed, 6 deletions(-) diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp index 0c6b83986a928..724093bf13e77 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-bfloat16-test.cpp @@ -20,7 +20,6 @@ int main() { queue q; q.submit([&](handler &cgh) { - sycl::accessor accA(bufA, cgh); diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp index 06780dcf51989..f5a3fdfbca6af 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-double-test.cpp @@ -30,7 +30,6 @@ int main() { queue q; q.submit([&](handler &cgh) { - sycl::accessor accA(bufA, cgh); diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp index 2d646837ee649..d53e6ad37b2ca 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-half-float-test.cpp @@ -19,7 +19,6 @@ int main() { queue q; q.submit([&](handler &cgh) { - sycl::accessor accA(bufA, cgh); diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp index dcafe64d2ca2b..ec425cb228ed8 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-half-half-test.cpp @@ -19,7 +19,6 @@ int main() { queue q; q.submit([&](handler &cgh) { - sycl::accessor accA(bufA, cgh); diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp index 0f329e8af6f19..28166db16b3e3 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-int8-test.cpp @@ -19,7 +19,6 @@ int main() { queue q; q.submit([&](handler &cgh) { - sycl::accessor accA(bufA, cgh); diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp index 72823f018700b..aee19c4fc0ce8 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-uint8-test.cpp @@ -19,7 +19,6 @@ int main() { queue q; q.submit([&](handler &cgh) { - sycl::accessor accA(bufA, cgh); From b18a0c70cec789466f545c504aa15456e33e7460 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 12 Apr 2022 16:56:21 +0100 Subject: [PATCH 26/45] if constexpr optimisation --- sycl/include/sycl/ext/oneapi/experimental/builtins.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index f205c4638d613..c713072b34403 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -176,7 +176,7 @@ fabs(sycl::marray x) { for (size_t i = 0; i < N / 2; i++) res_storage[i] = __clc_fabs(x_storage[i]); - if (N % 2) { + if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits(__clc_fabs(x[N - 1].raw())); } @@ -225,7 +225,7 @@ fmin(sycl::marray x, sycl::marray y) { for (size_t i = 0; i < N / 2; i++) res_storage[i] = __clc_fmin(x_storage[i], y_storage[i]); - if (N % 2) { + if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits(__clc_fmin(x[N - 1].raw(), y[N - 1].raw())); } @@ -276,7 +276,7 @@ fmax(sycl::marray x, sycl::marray y) { for (size_t i = 0; i < N / 2; i++) res_storage[i] = __clc_fmax(x_storage[i], y_storage[i]); - if (N % 2) { + if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits(__clc_fmax(x[N - 1].raw(), y[N - 1].raw())); } @@ -330,7 +330,7 @@ fma(sycl::marray x, sycl::marray y, sycl::marray z) { for (size_t i = 0; i < N / 2; i++) res_storage[i] = __clc_fma(x_storage[i], y_storage[i], z_storage[i]); - if (N % 2) { + if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits( __clc_fma(x[N - 1].raw(), y[N - 1].raw(), z[N - 1].raw())); } From 342a83a730b3483cde905d70860577813239223f Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Wed, 13 Apr 2022 10:35:44 +0100 Subject: [PATCH 27/45] commit for demonstrative purposes examples of get_wi_data impls --- sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 6b1705b026538..cf451f274c5d5 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -32,6 +32,10 @@ struct joint_matrix; typename std::enable_if_t> { \ sycl::marray data; \ + size_t num_elements = size; \ + inline __SYCL_ALWAYS_INLINE sycl::marray &get_wi_data() { \ + return data; \ + } \ }; // m8n32k16 From 863450a8751be8f71832bc66f0409edaa81a5a37 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Thu, 14 Apr 2022 16:26:59 +0100 Subject: [PATCH 28/45] std C++17 to sycl::detail C++17 --- .../sycl/ext/oneapi/experimental/builtins.hpp | 30 ++---- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 97 +++++++++---------- 2 files changed, 54 insertions(+), 73 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index c713072b34403..461e9b883c969 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -27,10 +27,7 @@ #endif __SYCL_INLINE_NAMESPACE(cl) { -namespace sycl { -namespace ext { -namespace oneapi { -namespace experimental { +namespace sycl::ext::oneapi::experimental { // Provides functionality to print data from kernels in a C way: // - On non-host devices this function is directly mapped to printf from @@ -155,7 +152,7 @@ fabs(T x) { } template -std::enable_if_t, T> fabs(T x) { +std::enable_if_t, T> fabs(T x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fabs(x.raw())); #else @@ -166,7 +163,7 @@ std::enable_if_t, T> fabs(T x) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t, sycl::marray> fabs(sycl::marray x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -179,7 +176,6 @@ fabs(sycl::marray x) { if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits(__clc_fabs(x[N - 1].raw())); } - return res; #else (void)x; @@ -202,7 +198,7 @@ fmin(T x, T y) { } template -std::enable_if_t, T> fmin(T x, T y) { +std::enable_if_t, T> fmin(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); #else @@ -214,7 +210,7 @@ std::enable_if_t, T> fmin(T x, T y) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t, sycl::marray> fmin(sycl::marray x, sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -253,7 +249,7 @@ fmax(T x, T y) { } template -std::enable_if_t, T> fmax(T x, T y) { +std::enable_if_t, T> fmax(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmax(x.raw(), y.raw())); #else @@ -265,7 +261,7 @@ std::enable_if_t, T> fmax(T x, T y) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t, sycl::marray> fmax(sycl::marray x, sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -280,7 +276,6 @@ fmax(sycl::marray x, sycl::marray y) { res[N - 1] = bfloat16::from_bits(__clc_fmax(x[N - 1].raw(), y[N - 1].raw())); } - return res; #else (void)x; @@ -305,7 +300,7 @@ fma(T x, T y, T z) { } template -std::enable_if_t, T> fma(T x, T y, T z) { +std::enable_if_t, T> fma(T x, T y, T z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fma(x.raw(), y.raw(), z.raw())); #else @@ -318,7 +313,7 @@ std::enable_if_t, T> fma(T x, T y, T z) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t, sycl::marray> fma(sycl::marray x, sycl::marray y, sycl::marray z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -334,7 +329,6 @@ fma(sycl::marray x, sycl::marray y, sycl::marray z) { res[N - 1] = bfloat16::from_bits( __clc_fma(x[N - 1].raw(), y[N - 1].raw(), z[N - 1].raw())); } - return res; #else (void)x; @@ -344,11 +338,7 @@ fma(sycl::marray x, sycl::marray y, sycl::marray z) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -} // namespace experimental -} // namespace oneapi -} // namespace ext - -} // namespace sycl +} // namespace sycl::ext::oneapi::experimental } // __SYCL_INLINE_NAMESPACE(cl) #undef __SYCL_CONSTANT_AS diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index cf451f274c5d5..26dea5d86dccf 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -10,9 +10,7 @@ #include __SYCL_INLINE_NAMESPACE(cl) { -namespace sycl { -namespace ext { -namespace oneapi { +namespace sycl::ext::oneapi { namespace experimental::matrix { enum class matrix_use { a, b, accumulator }; @@ -32,10 +30,6 @@ struct joint_matrix; typename std::enable_if_t> { \ sycl::marray data; \ - size_t num_elements = size; \ - inline __SYCL_ALWAYS_INLINE sycl::marray &get_wi_data() { \ - return data; \ - } \ }; // m8n32k16 @@ -147,8 +141,8 @@ struct joint_matrix_load_impl< void load(sycl::ext::oneapi::experimental::matrix::joint_matrix< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { - if constexpr (std::is_same_v || - std::is_same_v || + sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); @@ -175,7 +169,7 @@ struct joint_matrix_load_impl< __mma_bf16_m32n8k16_ld_b(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -201,7 +195,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_b_u8(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -227,7 +221,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_b_s8(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(src.get()); auto dstptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -260,7 +254,7 @@ struct joint_matrix_load_impl< get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { __imma_m16n16k16_ld_c(destptr, src.get(), stride, @@ -272,7 +266,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_c(destptr, src.get(), stride, get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto dstptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { __hmma_m16n16k16_ld_c_f32(dstptr, src.get(), stride, @@ -284,7 +278,7 @@ struct joint_matrix_load_impl< __hmma_m32n8k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto dstptr = reinterpret_cast(&res.data); if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -326,52 +320,51 @@ struct joint_matrix_store_impl< NumRows, NumCols, Layout, sycl::sub_group> &src, multi_ptr dst, size_t stride) { if (NumRows == 16 && NumCols == 16) { - - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __hmma_m16n16k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __imma_m16n16k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(dst.get()); auto srcptr = reinterpret_cast(&src.data); __hmma_m16n16k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 8 && NumCols == 32) { - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __hmma_m8n32k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __imma_m8n32k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(dst.get()); auto srcptr = reinterpret_cast(&src.data); __hmma_m8n32k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 32 && NumCols == 8) { - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __hmma_m32n8k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __imma_m32n8k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto tileptr = reinterpret_cast(dst.get()); auto srcptr = reinterpret_cast(&src.data); __hmma_m32n8k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto srcptr = reinterpret_cast(&src.data); __dmma_m8n8k4_st_c_f64(dst.get(), srcptr, stride, get_layout_id()); @@ -473,19 +466,19 @@ struct joint_matrix_mad_impl< N, LayoutC, sycl::sub_group> D; if constexpr (M == 16 && N == 16 && K == 16) { - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { __imma_m16n16k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { __imma_m16n16k16_mma_u8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); if constexpr (std::is_same::value) { @@ -494,15 +487,15 @@ struct joint_matrix_mad_impl< __hmma_m16n16k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m16n16k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same_v || - std::is_same_v< + } else if constexpr (sycl::detail::is_same_v || + sycl::detail::is_same_v< T1, sycl::ext::oneapi::experimental::bfloat16>) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); @@ -512,34 +505,34 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (M == 8 && N == 32 && K == 16) { - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { __imma_m8n32k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { __imma_m8n32k16_mma_u8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m8n32k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m8n32k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same_v || - std::is_same_v< + } else if constexpr (sycl::detail::is_same_v || + sycl::detail::is_same_v< T1, sycl::ext::oneapi::experimental::bfloat16>) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); @@ -549,20 +542,20 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (M == 32 && N == 8 && K == 16) { - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { __imma_m32n8k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { __imma_m32n8k16_mma_u8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (std::is_same_v || - std::is_same_v< + } else if constexpr (sycl::detail::is_same_v || + sycl::detail::is_same_v< T1, sycl::ext::oneapi::experimental::bfloat16>) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); @@ -570,22 +563,22 @@ struct joint_matrix_mad_impl< auto ptrD = reinterpret_cast(&D.data); __mma_bf16_m32n8k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - if constexpr (std::is_same_v) { + if constexpr (sycl::detail::is_same_v) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m32n8k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m32n8k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } - } else if constexpr (std::is_same_v) { + } else if constexpr (sycl::detail::is_same_v) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); @@ -668,7 +661,5 @@ joint_matrix_mad( } } // namespace experimental::matrix -} // namespace oneapi -} // namespace ext -} // namespace sycl +} // namespace sycl::ext::oneapi } // __SYCL_INLINE_NAMESPACE(cl) From 9106ddcf7596d088901b1e38dc3fba2107f203ed Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Thu, 14 Apr 2022 16:39:42 +0100 Subject: [PATCH 29/45] format --- sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 26dea5d86dccf..2b0ff2970812d 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -142,8 +142,8 @@ struct joint_matrix_load_impl< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { if constexpr (sycl::detail::is_same_v || - sycl::detail::is_same_v) { + sycl::detail::is_same_v< + T, sycl::ext::oneapi::experimental::bfloat16>) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { From 1393371a704fab6beb343bc7a6f87713f88a643f Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Mon, 9 May 2022 15:46:14 +0100 Subject: [PATCH 30/45] Switched back to c++14. Signed-off-by: jack.kirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 461e9b883c969..82ae19a306fb7 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -152,7 +152,7 @@ fabs(T x) { } template -std::enable_if_t, T> fabs(T x) { +std::enable_if_t::value, T> fabs(T x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fabs(x.raw())); #else @@ -163,7 +163,7 @@ std::enable_if_t, T> fabs(T x) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t::value, sycl::marray> fabs(sycl::marray x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -198,7 +198,7 @@ fmin(T x, T y) { } template -std::enable_if_t, T> fmin(T x, T y) { +std::enable_if_t::value, T> fmin(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); #else @@ -210,7 +210,7 @@ std::enable_if_t, T> fmin(T x, T y) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t::value, sycl::marray> fmin(sycl::marray x, sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -249,7 +249,7 @@ fmax(T x, T y) { } template -std::enable_if_t, T> fmax(T x, T y) { +std::enable_if_t::value, T> fmax(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmax(x.raw(), y.raw())); #else @@ -261,7 +261,7 @@ std::enable_if_t, T> fmax(T x, T y) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t::value, sycl::marray> fmax(sycl::marray x, sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; @@ -300,7 +300,7 @@ fma(T x, T y, T z) { } template -std::enable_if_t, T> fma(T x, T y, T z) { +std::enable_if_t::value, T> fma(T x, T y, T z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fma(x.raw(), y.raw(), z.raw())); #else @@ -313,7 +313,7 @@ std::enable_if_t, T> fma(T x, T y, T z) { } template -std::enable_if_t, sycl::marray> +std::enable_if_t::value, sycl::marray> fma(sycl::marray x, sycl::marray y, sycl::marray z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; From 43c2bc56648d0a8c45cb348b024415e098903c60 Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 10 May 2022 11:22:01 +0100 Subject: [PATCH 31/45] Switched backed to std c++14 Signed-off-by: jack.kirk --- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 2b0ff2970812d..5f4fa7379d560 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -141,9 +141,9 @@ struct joint_matrix_load_impl< void load(sycl::ext::oneapi::experimental::matrix::joint_matrix< T, Use, NumRows, NumCols, Layout, sycl::sub_group> &res, multi_ptr src, size_t stride) { - if constexpr (sycl::detail::is_same_v || - sycl::detail::is_same_v< - T, sycl::ext::oneapi::experimental::bfloat16>) { + if constexpr (std::is_same::value || + std::is_same< + T, sycl::ext::oneapi::experimental::bfloat16>::value) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -169,7 +169,7 @@ struct joint_matrix_load_impl< __mma_bf16_m32n8k16_ld_b(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -195,7 +195,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_b_u8(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -221,7 +221,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_b_s8(destptr, tileptr, stride, get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); auto dstptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { @@ -254,7 +254,7 @@ struct joint_matrix_load_impl< get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto destptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { __imma_m16n16k16_ld_c(destptr, src.get(), stride, @@ -266,7 +266,7 @@ struct joint_matrix_load_impl< __imma_m32n8k16_ld_c(destptr, src.get(), stride, get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto dstptr = reinterpret_cast(&res.data); if constexpr (NumRows == 16 && NumCols == 16) { __hmma_m16n16k16_ld_c_f32(dstptr, src.get(), stride, @@ -278,7 +278,7 @@ struct joint_matrix_load_impl< __hmma_m32n8k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto dstptr = reinterpret_cast(&res.data); if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -320,51 +320,51 @@ struct joint_matrix_store_impl< NumRows, NumCols, Layout, sycl::sub_group> &src, multi_ptr dst, size_t stride) { if (NumRows == 16 && NumCols == 16) { - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __hmma_m16n16k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __imma_m16n16k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(dst.get()); auto srcptr = reinterpret_cast(&src.data); __hmma_m16n16k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 8 && NumCols == 32) { - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __hmma_m8n32k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __imma_m8n32k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(dst.get()); auto srcptr = reinterpret_cast(&src.data); __hmma_m8n32k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 32 && NumCols == 8) { - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __hmma_m32n8k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __imma_m32n8k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(dst.get()); auto srcptr = reinterpret_cast(&src.data); __hmma_m32n8k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto srcptr = reinterpret_cast(&src.data); __dmma_m8n8k4_st_c_f64(dst.get(), srcptr, stride, get_layout_id()); @@ -466,19 +466,19 @@ struct joint_matrix_mad_impl< N, LayoutC, sycl::sub_group> D; if constexpr (M == 16 && N == 16 && K == 16) { - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { __imma_m16n16k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { __imma_m16n16k16_mma_u8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); if constexpr (std::is_same::value) { @@ -487,16 +487,16 @@ struct joint_matrix_mad_impl< __hmma_m16n16k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m16n16k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (sycl::detail::is_same_v || - sycl::detail::is_same_v< - T1, sycl::ext::oneapi::experimental::bfloat16>) { + } else if constexpr (std::is_same::value || + std::is_same< + T1, sycl::ext::oneapi::experimental::bfloat16>::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); @@ -505,35 +505,35 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (M == 8 && N == 32 && K == 16) { - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { __imma_m8n32k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { __imma_m8n32k16_mma_u8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m8n32k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m8n32k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (sycl::detail::is_same_v || - sycl::detail::is_same_v< - T1, sycl::ext::oneapi::experimental::bfloat16>) { + } else if constexpr (std::is_same::value || + std::is_same< + T1, sycl::ext::oneapi::experimental::bfloat16>::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); @@ -542,43 +542,43 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (M == 32 && N == 8 && K == 16) { - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { __imma_m32n8k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { __imma_m32n8k16_mma_u8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } - } else if constexpr (sycl::detail::is_same_v || - sycl::detail::is_same_v< - T1, sycl::ext::oneapi::experimental::bfloat16>) { + } else if constexpr (std::is_same::value || + std::is_same< + T1, sycl::ext::oneapi::experimental::bfloat16>::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __mma_bf16_m32n8k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); - if constexpr (sycl::detail::is_same_v) { + if constexpr (std::is_same::value) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m32n8k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrC = reinterpret_cast(&C.data); auto ptrD = reinterpret_cast(&D.data); __hmma_m32n8k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } - } else if constexpr (sycl::detail::is_same_v) { + } else if constexpr (std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); From 4081add9641dab326688bb27df94978e82ff9edf Mon Sep 17 00:00:00 2001 From: "jack.kirk" Date: Tue, 10 May 2022 11:28:39 +0100 Subject: [PATCH 32/45] format Signed-off-by: jack.kirk --- .../sycl/ext/oneapi/matrix/matrix-tensorcore.hpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 5f4fa7379d560..ce8bbf8f3fbbd 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -495,8 +495,8 @@ struct joint_matrix_mad_impl< 0); } } else if constexpr (std::is_same::value || - std::is_same< - T1, sycl::ext::oneapi::experimental::bfloat16>::value) { + std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); @@ -532,8 +532,8 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (std::is_same::value || - std::is_same< - T1, sycl::ext::oneapi::experimental::bfloat16>::value) { + std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); @@ -555,8 +555,8 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (std::is_same::value || - std::is_same< - T1, sycl::ext::oneapi::experimental::bfloat16>::value) { + std::is_same::value) { auto ptrA = reinterpret_cast(&A.data); auto ptrB = reinterpret_cast(&B.data); auto ptrC = reinterpret_cast(&C.data); From 48177ccf049f1c17e6e365a8f693848b5dc34d1c Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Fri, 27 May 2022 15:20:59 +0100 Subject: [PATCH 33/45] Impls of joint_matrix_fill, wi_data, get_wi_data(). Signed-off-by: JackAKirk --- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 161 +++++++++++------- 1 file changed, 96 insertions(+), 65 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index ce8bbf8f3fbbd..fea554043e9ed 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -17,6 +17,16 @@ enum class matrix_use { a, b, accumulator }; enum class matrix_layout { row_major, col_major, packed_a, packed_b }; +template class wi_data { + marray &data; + +public: + wi_data(marray &wi_data) : data(wi_data){}; + size_t length() { return data.size(); }; + + type &operator[](size_t i) { return data[i]; }; +}; + template > { \ - sycl::marray data; \ + marray wi_marray; \ + inline __SYCL_ALWAYS_INLINE wi_data get_wi_data() { \ + return wi_data(wi_marray); \ + }; \ }; // m8n32k16 @@ -84,7 +97,7 @@ __SYCL_JOINT_MATRIX_OVERLOAD_ARR(double, accumulator, 8, 8, 2) type, matrix_use::use, M, N, Layout, sycl::sub_group, \ typename std::enable_if_t> { \ - frag_type data[frag_size]; \ + frag_type wi_marray[frag_size]; \ }; // bf16 data format uint16_t implementation is deprecated @@ -99,6 +112,24 @@ __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, a, 16, 16, int32_t, 4) __SYCL_JOINT_MATRIX_OVERLOAD(uint16_t, b, 16, 16, int32_t, 4) #undef __SYCL_JOINT_MATRIX_OVERLOAD + +template +inline __SYCL_ALWAYS_INLINE void +joint_matrix_fill(Group sg, + joint_matrix &res, + const T2 v) { + // We kept the unused "sg" in joint_matrix_fill to match the other DPC++ + // functions + (void)sg; +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + res.wi_marray = v; +#else + (void)res; + (void)v; +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) +} + } // namespace experimental::matrix namespace detail { @@ -145,7 +176,7 @@ struct joint_matrix_load_impl< std::is_same< T, sycl::ext::oneapi::experimental::bfloat16>::value) { auto tileptr = reinterpret_cast(src.get()); - auto destptr = reinterpret_cast(&res.data); + auto destptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -171,7 +202,7 @@ struct joint_matrix_load_impl< } } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); - auto destptr = reinterpret_cast(&res.data); + auto destptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -197,7 +228,7 @@ struct joint_matrix_load_impl< } } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); - auto destptr = reinterpret_cast(&res.data); + auto destptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -223,7 +254,7 @@ struct joint_matrix_load_impl< } } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); - auto dstptr = reinterpret_cast(&res.data); + auto dstptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 16) { if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { @@ -255,7 +286,7 @@ struct joint_matrix_load_impl< } } else if constexpr (std::is_same::value) { - auto destptr = reinterpret_cast(&res.data); + auto destptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 16) { __imma_m16n16k16_ld_c(destptr, src.get(), stride, get_layout_id()); @@ -267,7 +298,7 @@ struct joint_matrix_load_impl< get_layout_id()); } } else if constexpr (std::is_same::value) { - auto dstptr = reinterpret_cast(&res.data); + auto dstptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 16) { __hmma_m16n16k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); @@ -279,7 +310,7 @@ struct joint_matrix_load_impl< get_layout_id()); } } else if constexpr (std::is_same::value) { - auto dstptr = reinterpret_cast(&res.data); + auto dstptr = reinterpret_cast(&res.wi_marray); if constexpr (Use == sycl::ext::oneapi::experimental::matrix::matrix_use::a) { __dmma_m8n8k4_ld_a(dstptr, src.get(), stride, get_layout_id()); @@ -321,51 +352,51 @@ struct joint_matrix_store_impl< multi_ptr dst, size_t stride) { if (NumRows == 16 && NumCols == 16) { if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __hmma_m16n16k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __imma_m16n16k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(dst.get()); - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __hmma_m16n16k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 8 && NumCols == 32) { if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __hmma_m8n32k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __imma_m8n32k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(dst.get()); - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __hmma_m8n32k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if (NumRows == 32 && NumCols == 8) { if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __hmma_m32n8k16_st_c_f32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __imma_m32n8k16_st_c_i32(dst.get(), srcptr, stride, get_layout_id()); } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(dst.get()); - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __hmma_m32n8k16_st_c_f16(tileptr, srcptr, stride, get_layout_id()); } } else if constexpr (std::is_same::value) { - auto srcptr = reinterpret_cast(&src.data); + auto srcptr = reinterpret_cast(&src.wi_marray); __dmma_m8n8k4_st_c_f64(dst.get(), srcptr, stride, get_layout_id()); } @@ -467,10 +498,10 @@ struct joint_matrix_mad_impl< D; if constexpr (M == 16 && N == 16 && K == 16) { if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); if constexpr (std::is_same::value) { __imma_m16n16k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); @@ -479,17 +510,17 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); if constexpr (std::is_same::value) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __hmma_m16n16k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same::value) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __hmma_m16n16k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); @@ -497,19 +528,19 @@ struct joint_matrix_mad_impl< } else if constexpr (std::is_same::value || std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __mma_bf16_m16n16k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (M == 8 && N == 32 && K == 16) { if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); if constexpr (std::is_same::value) { __imma_m8n32k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); @@ -518,35 +549,35 @@ struct joint_matrix_mad_impl< get_layout_pair_id(), 0); } } else if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); if constexpr (std::is_same::value) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __hmma_m8n32k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same::value) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __hmma_m8n32k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (std::is_same::value || std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __mma_bf16_m8n32k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } else if constexpr (M == 32 && N == 8 && K == 16) { if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); if constexpr (std::is_same::value) { __imma_m32n8k16_mma_s8(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); @@ -557,32 +588,32 @@ struct joint_matrix_mad_impl< } else if constexpr (std::is_same::value || std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __mma_bf16_m32n8k16_mma_f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); if constexpr (std::is_same::value) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __hmma_m32n8k16_mma_f32f32(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } else if constexpr (std::is_same::value) { - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __hmma_m32n8k16_mma_f16f16(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } } } else if constexpr (std::is_same::value) { - auto ptrA = reinterpret_cast(&A.data); - auto ptrB = reinterpret_cast(&B.data); - auto ptrC = reinterpret_cast(&C.data); - auto ptrD = reinterpret_cast(&D.data); + auto ptrA = reinterpret_cast(&A.wi_marray); + auto ptrB = reinterpret_cast(&B.wi_marray); + auto ptrC = reinterpret_cast(&C.wi_marray); + auto ptrD = reinterpret_cast(&D.wi_marray); __dmma_m8n8k4_mma_f64(ptrD, ptrA, ptrB, ptrC, get_layout_pair_id(), 0); } From 18c71dfccafb10783341704a4eb903c46c4e7873 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Tue, 31 May 2022 10:43:15 +0100 Subject: [PATCH 34/45] Added runtime errors for host. Signed-off-by: JackAKirk --- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index fea554043e9ed..a6aadff89d551 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -22,9 +22,23 @@ template class wi_data { public: wi_data(marray &wi_data) : data(wi_data){}; - size_t length() { return data.size(); }; + size_t length() { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + return data.size(); +#else + throw runtime_error("joint matrix is not supported on host device.", + PI_INVALID_DEVICE); +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + }; - type &operator[](size_t i) { return data[i]; }; + type &operator[](size_t i) { +#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + return data[i]; +#else + throw runtime_error("joint matrix is not supported on host device.", + PI_INVALID_DEVICE); +#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) + }; }; template Date: Thu, 2 Jun 2022 18:16:19 +0100 Subject: [PATCH 35/45] Removed storage type impls. Signed-off-by: JackAKirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 100 +++--------------- 1 file changed, 12 insertions(+), 88 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 82ae19a306fb7..afd15a75834fe 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -115,42 +115,6 @@ inline __SYCL_ALWAYS_INLINE } // namespace native -namespace detail { - -template struct is_bf16_storage_type { - static constexpr int value = false; -}; - -template <> struct is_bf16_storage_type { - static constexpr int value = true; -}; - -template <> struct is_bf16_storage_type { - static constexpr int value = true; -}; - -template struct is_bf16_storage_type> { - static constexpr int value = true; -}; - -template struct is_bf16_storage_type> { - static constexpr int value = true; -}; - -} // namespace detail - -template -std::enable_if_t::value, T> -fabs(T x) { -#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) - return __clc_fabs(x); -#else - (void)x; - throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); -#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) -} - template std::enable_if_t::value, T> fabs(T x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) @@ -162,9 +126,8 @@ std::enable_if_t::value, T> fabs(T x) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, sycl::marray> -fabs(sycl::marray x) { +template +sycl::marray fabs(sycl::marray x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; auto x_storage = reinterpret_cast(&x); @@ -184,19 +147,6 @@ fabs(sycl::marray x) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, T> -fmin(T x, T y) { -#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) - return __clc_fmin(x, y); -#else - (void)x; - (void)y; - throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); -#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) -} - template std::enable_if_t::value, T> fmin(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) @@ -209,9 +159,9 @@ std::enable_if_t::value, T> fmin(T x, T y) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, sycl::marray> -fmin(sycl::marray x, sycl::marray y) { +template +sycl::marray fmin(sycl::marray x, + sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; auto x_storage = reinterpret_cast(&x); @@ -235,19 +185,6 @@ fmin(sycl::marray x, sycl::marray y) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, T> -fmax(T x, T y) { -#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) - return __clc_fmax(x, y); -#else - (void)x; - (void)y; - throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); -#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) -} - template std::enable_if_t::value, T> fmax(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) @@ -260,9 +197,9 @@ std::enable_if_t::value, T> fmax(T x, T y) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, sycl::marray> -fmax(sycl::marray x, sycl::marray y) { +template +sycl::marray fmax(sycl::marray x, + sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; auto x_storage = reinterpret_cast(&x); @@ -285,20 +222,6 @@ fmax(sycl::marray x, sycl::marray y) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, T> -fma(T x, T y, T z) { -#if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) - return __clc_fma(x, y, z); -#else - (void)x; - (void)y; - (void)z; - throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); -#endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) -} - template std::enable_if_t::value, T> fma(T x, T y, T z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) @@ -312,9 +235,10 @@ std::enable_if_t::value, T> fma(T x, T y, T z) { #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } -template -std::enable_if_t::value, sycl::marray> -fma(sycl::marray x, sycl::marray y, sycl::marray z) { +template +sycl::marray fma(sycl::marray x, + sycl::marray y, + sycl::marray z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; auto x_storage = reinterpret_cast(&x); From e46997b442477103a12d474d52ff29d0aaed13c8 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Mon, 6 Jun 2022 14:57:25 +0100 Subject: [PATCH 36/45] wi_data constructor made private. Signed-off-by: JackAKirk --- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index a6aadff89d551..f324b7bd2743e 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -17,11 +17,20 @@ enum class matrix_use { a, b, accumulator }; enum class matrix_layout { row_major, col_major, packed_a, packed_b }; +template +struct joint_matrix; + template class wi_data { marray &data; + wi_data(marray &wi_data) : data(wi_data){}; + template + friend struct joint_matrix; public: - wi_data(marray &wi_data) : data(wi_data){}; size_t length() { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return data.size(); @@ -41,12 +50,6 @@ template class wi_data { }; }; -template -struct joint_matrix; - #define __SYCL_JOINT_MATRIX_OVERLOAD_ARR(type, use, M, N, size) \ template \ struct joint_matrix< \ From b104b30fc8a3f751b67cad44de7d68f54e6fcd05 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Wed, 8 Jun 2022 12:49:28 +0100 Subject: [PATCH 37/45] Use std::ignore. Signed-off-by: JackAKirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 16 +++++----- .../ext/oneapi/matrix/matrix-tensorcore.hpp | 30 +++++++++---------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index afd15a75834fe..0d5eeff27d168 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -120,7 +120,7 @@ std::enable_if_t::value, T> fabs(T x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fabs(x.raw())); #else - (void)x; + std::ignore = x; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) @@ -141,7 +141,7 @@ sycl::marray fabs(sycl::marray x) { } return res; #else - (void)x; + std::ignore = x; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) @@ -152,7 +152,7 @@ std::enable_if_t::value, T> fmin(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmin(x.raw(), y.raw())); #else - (void)x; + std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); @@ -178,7 +178,7 @@ sycl::marray fmin(sycl::marray x, return res; #else - (void)x; + std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); @@ -190,7 +190,7 @@ std::enable_if_t::value, T> fmax(T x, T y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fmax(x.raw(), y.raw())); #else - (void)x; + std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); @@ -215,7 +215,7 @@ sycl::marray fmax(sycl::marray x, } return res; #else - (void)x; + std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); @@ -227,7 +227,7 @@ std::enable_if_t::value, T> fma(T x, T y, T z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) return bfloat16::from_bits(__clc_fma(x.raw(), y.raw(), z.raw())); #else - (void)x; + std::ignore = x; (void)y; (void)z; throw runtime_error("bfloat16 is not currently supported on the host device.", @@ -255,7 +255,7 @@ sycl::marray fma(sycl::marray x, } return res; #else - (void)x; + std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", PI_INVALID_DEVICE); diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index f324b7bd2743e..6cafdd5e81c1f 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -138,12 +138,12 @@ joint_matrix_fill(Group sg, const T2 v) { // We kept the unused "sg" in joint_matrix_fill to match the other DPC++ // functions - (void)sg; + std::ignore = sg; #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) res.wi_marray = v; #else - (void)res; - (void)v; + std::ignore = res; + std::ignore = v; #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -652,10 +652,10 @@ void joint_matrix_load( Layout, Space>{} .load(res, src, stride); #else - (void)sg; - (void)res; - (void)src; - (void)stride; + std::ignore = sg; + std::ignore = res; + std::ignore = src; + std::ignore = stride; throw runtime_error( "When using SYCL_EXT_ONEAPI_MATRIX=3 joint_matrix_load is " "only supported by CUDA devices", @@ -674,10 +674,10 @@ void joint_matrix_store(Group sg, Layout, Space>{} .store(src, dst, stride); #else - (void)sg; - (void)src; - (void)dst; - (void)stride; + std::ignore = sg; + std::ignore = src; + std::ignore = dst; + std::ignore = stride; throw runtime_error( "When using SYCL_EXT_ONEAPI_MATRIX=3 joint_matrix_store is " "only supported by CUDA devices", @@ -698,10 +698,10 @@ joint_matrix_mad( T1, T2, M, K, N, LayoutA, LayoutB, LayoutC>{} .mad(A, B, C); #else - (void)sg; - (void)A; - (void)B; - (void)C; + std::ignore = sg; + std::ignore = A; + std::ignore = B; + std::ignore = C; throw runtime_error("When using SYCL_EXT_ONEAPI_MATRIX=3 joint_matrix_mad is " "only supported by CUDA devices", PI_INVALID_DEVICE); From a67d1fd51ab70eece6a98674ca47edb4d13a9053 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Thu, 9 Jun 2022 15:22:45 +0100 Subject: [PATCH 38/45] PI_INVALID_DEVICE -> PI_ERROR_INVALID_DEVICE Signed-off-by: JackAKirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 0d5eeff27d168..ab4d09437d113 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -122,7 +122,7 @@ std::enable_if_t::value, T> fabs(T x) { #else std::ignore = x; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -143,7 +143,7 @@ sycl::marray fabs(sycl::marray x) { #else std::ignore = x; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -155,7 +155,7 @@ std::enable_if_t::value, T> fmin(T x, T y) { std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -181,7 +181,7 @@ sycl::marray fmin(sycl::marray x, std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -193,7 +193,7 @@ std::enable_if_t::value, T> fmax(T x, T y) { std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -218,7 +218,7 @@ sycl::marray fmax(sycl::marray x, std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -231,7 +231,7 @@ std::enable_if_t::value, T> fma(T x, T y, T z) { (void)y; (void)z; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } @@ -258,7 +258,7 @@ sycl::marray fma(sycl::marray x, std::ignore = x; (void)y; throw runtime_error("bfloat16 is not currently supported on the host device.", - PI_INVALID_DEVICE); + PI_ERROR_INVALID_DEVICE); #endif // defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) } From 0f0215b1e456d99b2b7a974d742f14cd135b5cae Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Thu, 9 Jun 2022 16:33:15 +0100 Subject: [PATCH 39/45] format Signed-off-by: JackAKirk --- sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp index 2a0f6add512e8..6de66d0f8590c 100644 --- a/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp +++ b/sycl/include/sycl/ext/oneapi/matrix/matrix-tensorcore.hpp @@ -351,8 +351,9 @@ struct joint_matrix_load_impl< __hmma_m32n8k16_ld_c_f32(dstptr, src.get(), stride, get_layout_id()); } - } else if constexpr (std::is_same::value) { + } else if constexpr (std::is_same::value) { auto tileptr = reinterpret_cast(src.get()); auto dstptr = reinterpret_cast(&res.wi_marray); if constexpr (NumRows == 16 && NumCols == 8) { From a9c290136c2c817aea550630be58e733562580f3 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Thu, 9 Jun 2022 16:49:26 +0100 Subject: [PATCH 40/45] data -> wi_marray --- .../check_device_code/matrix/matrix-nvptx-tf32-test.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sycl/test/check_device_code/matrix/matrix-nvptx-tf32-test.cpp b/sycl/test/check_device_code/matrix/matrix-nvptx-tf32-test.cpp index 9cdd5e739b00a..9381bf709c2d3 100644 --- a/sycl/test/check_device_code/matrix/matrix-nvptx-tf32-test.cpp +++ b/sycl/test/check_device_code/matrix/matrix-nvptx-tf32-test.cpp @@ -80,10 +80,10 @@ int main() { // CHECK: tail call i32 @llvm.nvvm.f2tf32.rna(float {{.*}} // Round a, b to tf32 for (auto i = 0; i < 4; ++i) - sub_a.data[i] = round_to_tf32(sub_a.data[i]); + sub_a.wi_marray[i] = round_to_tf32(sub_a.wi_marray[i]); for (auto i = 0; i < 4; ++i) - sub_b.data[i] = round_to_tf32(sub_b.data[i]); + sub_b.wi_marray[i] = round_to_tf32(sub_b.wi_marray[i]); //CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m16n16k8.mma.row.row.tf32(i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}) #{{.*}} sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); @@ -125,10 +125,10 @@ int main() { // CHECK: tail call i32 @llvm.nvvm.f2tf32.rna(float {{.*}} // Round a, b to tf32 for (auto i = 0; i < 4; ++i) - sub_a.data[i] = round_to_tf32(sub_a.data[i]); + sub_a.wi_marray[i] = round_to_tf32(sub_a.wi_marray[i]); for (auto i = 0; i < 4; ++i) - sub_b.data[i] = round_to_tf32(sub_b.data[i]); + sub_b.wi_marray[i] = round_to_tf32(sub_b.wi_marray[i]); //CHECK: tail call { float, float, float, float, float, float, float, float } @llvm.nvvm.wmma.m16n16k8.mma.col.col.tf32(i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, i32 {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}, float {{.*}}) #{{.*}} sub_c = joint_matrix_mad(sg, sub_a, sub_b, sub_c); From 7141fdc09780300a5179a8a0678ec0fea48e0e03 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Wed, 22 Jun 2022 17:19:12 +0100 Subject: [PATCH 41/45] Replaced type punning with memcpy. Signed-off-by: JackAKirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 48 +++++++++++-------- 1 file changed, 28 insertions(+), 20 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index ab4d09437d113..d5ad7cf30a82a 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -28,6 +28,14 @@ __SYCL_INLINE_NAMESPACE(cl) { namespace sycl::ext::oneapi::experimental { +namespace detail { + template + uint32_t to_uint32_t(sycl::marray x, size_t start) { + uint32_t res; + std::memcpy(&res, &x[start], sizeof(uint32_t)); + return res; +} +} // Provides functionality to print data from kernels in a C way: // - On non-host devices this function is directly mapped to printf from @@ -130,11 +138,11 @@ template sycl::marray fabs(sycl::marray x) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; - auto x_storage = reinterpret_cast(&x); - auto res_storage = reinterpret_cast(&res); - for (size_t i = 0; i < N / 2; i++) - res_storage[i] = __clc_fabs(x_storage[i]); + for (size_t i = 0; i < N / 2; i++) { + auto partial_res = __clc_fabs(detail::to_uint32_t(x, i * 2)); + std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); + } if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits(__clc_fabs(x[N - 1].raw())); @@ -164,12 +172,12 @@ sycl::marray fmin(sycl::marray x, sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; - auto x_storage = reinterpret_cast(&x); - auto y_storage = reinterpret_cast(&y); - auto res_storage = reinterpret_cast(&res); - for (size_t i = 0; i < N / 2; i++) - res_storage[i] = __clc_fmin(x_storage[i], y_storage[i]); + for (size_t i = 0; i < N / 2; i++) { + auto partial_res = + __clc_fmin(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2)); + std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); + } if constexpr (N % 2) { res[N - 1] = @@ -202,12 +210,12 @@ sycl::marray fmax(sycl::marray x, sycl::marray y) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; - auto x_storage = reinterpret_cast(&x); - auto y_storage = reinterpret_cast(&y); - auto res_storage = reinterpret_cast(&res); - for (size_t i = 0; i < N / 2; i++) - res_storage[i] = __clc_fmax(x_storage[i], y_storage[i]); + for (size_t i = 0; i < N / 2; i++) { + auto partial_res = + __clc_fmax(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2)); + std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); + } if constexpr (N % 2) { res[N - 1] = @@ -241,13 +249,13 @@ sycl::marray fma(sycl::marray x, sycl::marray z) { #if defined(__SYCL_DEVICE_ONLY__) && defined(__NVPTX__) sycl::marray res; - auto x_storage = reinterpret_cast(&x); - auto y_storage = reinterpret_cast(&y); - auto z_storage = reinterpret_cast(&z); - auto res_storage = reinterpret_cast(&res); - for (size_t i = 0; i < N / 2; i++) - res_storage[i] = __clc_fma(x_storage[i], y_storage[i], z_storage[i]); + for (size_t i = 0; i < N / 2; i++) { + auto partial_res = + __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), + detail::to_uint32_t(z, i * 2)); + std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); + } if constexpr (N % 2) { res[N - 1] = bfloat16::from_bits( From 22f86507914e0d256629ca32b0bb653184caad77 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Wed, 22 Jun 2022 17:30:28 +0100 Subject: [PATCH 42/45] format Signed-off-by: JackAKirk --- .../sycl/ext/oneapi/experimental/builtins.hpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index d5ad7cf30a82a..07a6dee39f3bc 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -29,13 +29,13 @@ __SYCL_INLINE_NAMESPACE(cl) { namespace sycl::ext::oneapi::experimental { namespace detail { - template - uint32_t to_uint32_t(sycl::marray x, size_t start) { +template +uint32_t to_uint32_t(sycl::marray x, size_t start) { uint32_t res; std::memcpy(&res, &x[start], sizeof(uint32_t)); return res; } -} +} // namespace detail // Provides functionality to print data from kernels in a C way: // - On non-host devices this function is directly mapped to printf from @@ -174,8 +174,8 @@ sycl::marray fmin(sycl::marray x, sycl::marray res; for (size_t i = 0; i < N / 2; i++) { - auto partial_res = - __clc_fmin(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2)); + auto partial_res = __clc_fmin(detail::to_uint32_t(x, i * 2), + detail::to_uint32_t(y, i * 2)); std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); } @@ -212,8 +212,8 @@ sycl::marray fmax(sycl::marray x, sycl::marray res; for (size_t i = 0; i < N / 2; i++) { - auto partial_res = - __clc_fmax(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2)); + auto partial_res = __clc_fmax(detail::to_uint32_t(x, i * 2), + detail::to_uint32_t(y, i * 2)); std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); } @@ -251,9 +251,8 @@ sycl::marray fma(sycl::marray x, sycl::marray res; for (size_t i = 0; i < N / 2; i++) { - auto partial_res = __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), - detail::to_uint32_t(z, i * 2)); + detail::to_uint32_t(z, i * 2)); std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); } From 6ee55f1645c03c57d50689e3140d6cd8eb2f5127 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Wed, 22 Jun 2022 17:40:31 +0100 Subject: [PATCH 43/45] Format Signed-off-by: JackAKirk --- sycl/include/sycl/ext/oneapi/experimental/builtins.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 07a6dee39f3bc..ccdb19c63f9f6 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -252,7 +252,7 @@ sycl::marray fma(sycl::marray x, for (size_t i = 0; i < N / 2; i++) { __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), - detail::to_uint32_t(z, i * 2)); + detail::to_uint32_t(z, i * 2)); std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); } From 49c962d6362a62d486be9930288ab47b0e1c89cc Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Wed, 22 Jun 2022 18:43:05 +0100 Subject: [PATCH 44/45] add back partial_res decl. Signed-off-by: JackAKirk --- sycl/include/sycl/ext/oneapi/experimental/builtins.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index ccdb19c63f9f6..005a5a634c0f2 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -251,8 +251,8 @@ sycl::marray fma(sycl::marray x, sycl::marray res; for (size_t i = 0; i < N / 2; i++) { - __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), - detail::to_uint32_t(z, i * 2)); + auto partial_res = __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), + detail::to_uint32_t(z, i * 2)); std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); } From 81f8ba0796d08d7c925091be62c7fb23c2392410 Mon Sep 17 00:00:00 2001 From: JackAKirk Date: Wed, 22 Jun 2022 18:59:31 +0100 Subject: [PATCH 45/45] format Signed-off-by: JackAKirk --- sycl/include/sycl/ext/oneapi/experimental/builtins.hpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp index 005a5a634c0f2..c2326c6563efc 100644 --- a/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp +++ b/sycl/include/sycl/ext/oneapi/experimental/builtins.hpp @@ -251,8 +251,9 @@ sycl::marray fma(sycl::marray x, sycl::marray res; for (size_t i = 0; i < N / 2; i++) { - auto partial_res = __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), - detail::to_uint32_t(z, i * 2)); + auto partial_res = + __clc_fma(detail::to_uint32_t(x, i * 2), detail::to_uint32_t(y, i * 2), + detail::to_uint32_t(z, i * 2)); std::memcpy(&res[i * 2], &partial_res, sizeof(uint32_t)); }