diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 0d823d4c3161..4f5b3a9ee4e9 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -2212,7 +2212,7 @@ static int64_t getIntValueFromConstOp(mlir::Value val) { // expression type. // 2. Function arg types are given, not deduced from actual arg types. static mlir::Value -buildCommonNeonCallPattern0(CIRGenFunction &cgf, std::string &intrincsName, +buildCommonNeonCallPattern0(CIRGenFunction &cgf, llvm::StringRef intrincsName, llvm::SmallVector argTypes, llvm::SmallVectorImpl &ops, mlir::Type funcResTy, const clang::CallExpr *e) { @@ -2304,6 +2304,7 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( // This second switch is for the intrinsics that might have a more generic // codegen solution so we can use the common codegen in future. + llvm::StringRef intrincsName; switch (builtinID) { default: llvm::errs() << getAArch64SIMDIntrinsicString(builtinID) << " "; @@ -2311,22 +2312,27 @@ mlir::Value CIRGenFunction::buildCommonNeonBuiltinExpr( case NEON::BI__builtin_neon_vpadd_v: case NEON::BI__builtin_neon_vpaddq_v: { - std::string intrincsName = mlir::isa(vTy.getEltType()) - ? "llvm.aarch64.neon.faddp" - : "llvm.aarch64.neon.addp"; - return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, - vTy, e); + intrincsName = mlir::isa(vTy.getEltType()) + ? "llvm.aarch64.neon.faddp" + : "llvm.aarch64.neon.addp"; break; } - case NEON::BI__builtin_neon_vqadd_v: { - std::string intrincsName = (intrinicId != altLLVMIntrinsic) - ? "llvm.aarch64.neon.uqadd" - : "llvm.aarch64.neon.sqadd"; - return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, - vTy, e); + case NEON::BI__builtin_neon_vqadd_v: + case NEON::BI__builtin_neon_vqaddq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uqadd" + : "llvm.aarch64.neon.sqadd"; + break; + } + case NEON::BI__builtin_neon_vqsub_v: + case NEON::BI__builtin_neon_vqsubq_v: { + intrincsName = (intrinicId != altLLVMIntrinsic) ? "llvm.aarch64.neon.uqsub" + : "llvm.aarch64.neon.sqsub"; break; } } + if (!intrincsName.empty()) + return buildCommonNeonCallPattern0(*this, intrincsName, {vTy, vTy}, ops, + vTy, e); return nullptr; } diff --git a/clang/test/CIR/CodeGen/AArch64/neon-arith.c b/clang/test/CIR/CodeGen/AArch64/neon-arith.c index ab37dded4881..2bfa4e89505f 100644 --- a/clang/test/CIR/CodeGen/AArch64/neon-arith.c +++ b/clang/test/CIR/CodeGen/AArch64/neon-arith.c @@ -331,3 +331,279 @@ int32x4_t test_vqrdmulhq_lane_s32(int32x4_t a, int32x2_t v) { // LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqrdmulh.lane.v4i32.v2i32 // LLVM-SAME: (<4 x i32> [[A]], <2 x i32> [[V]], i32 1) // LLVM: ret <4 x i32> [[RES]] + +int8x16_t test_vqaddq_s8(int8x16_t a, int8x16_t b) { + return vqaddq_s8(a, b); +} + +// CIR-LABEL: vqaddq_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqadd.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +uint8x16_t test_vqaddq_u8(uint8x16_t a, uint8x16_t b) { + return vqaddq_u8(a, b); +} + +// CIR-LABEL: vqaddq_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_u8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqadd.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +int16x8_t test_vqaddq_s16(int16x8_t a, int16x8_t b) { + return vqaddq_s16(a, b); +} + +// CIR-LABEL: vqaddq_s16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +uint16x8_t test_vqaddq_u16(uint16x8_t a, uint16x8_t b) { + return vqaddq_u16(a, b); +} + +// CIR-LABEL: vqaddq_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_u16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqadd.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +int32x4_t test_vqaddq_s32(int32x4_t a, int32x4_t b) { + return vqaddq_s32(a, b); +} + +// CIR-LABEL: vqaddq_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +int64x2_t test_vqaddq_s64(int64x2_t a, int64x2_t b) { + return vqaddq_s64(a, b); +} + +// CIR-LABEL: vqaddq_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_s64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] + +uint64x2_t test_vqaddq_u64(uint64x2_t a, uint64x2_t b) { + return vqaddq_u64(a, b); +} + +// CIR-LABEL: vqaddq_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqadd" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqaddq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqadd.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] + +int8x8_t test_vqsub_s8(int8x8_t a, int8x8_t b) { + return vqsub_s8(a, b); +} + +// CIR-LABEL: vqsub_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i8> @llvm.aarch64.neon.sqsub.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) +// LLVM: ret <8 x i8> [[RES]] + +uint8x8_t test_vqsub_u8(uint8x8_t a, uint8x8_t b) { + return vqsub_u8(a, b); +} + +// CIR-LABEL: vqsub_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u8(<8 x i8>{{.*}} [[A:%.*]], <8 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i8> @llvm.aarch64.neon.uqsub.v8i8(<8 x i8> [[A]], <8 x i8> [[B]]) +// LLVM: ret <8 x i8> [[RES]] + +int16x4_t test_vqsub_s16(int16x4_t a, int16x4_t b) { + return vqsub_s16(a, b); +} + +// CIR-LABEL: vqsub_s16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) +// LLVM: ret <4 x i16> [[RES]] + +uint16x4_t test_vqsub_u16(uint16x4_t a, uint16x4_t b) { + return vqsub_u16(a, b); +} + +// CIR-LABEL: vqsub_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u16(<4 x i16>{{.*}} [[A:%.*]], <4 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i16> @llvm.aarch64.neon.uqsub.v4i16(<4 x i16> [[A]], <4 x i16> [[B]]) +// LLVM: ret <4 x i16> [[RES]] + +int32x2_t test_vqsub_s32(int32x2_t a, int32x2_t b) { + return vqsub_s32(a, b); +} + +// CIR-LABEL: vqsub_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) +// LLVM: ret <2 x i32> [[RES]] + +uint32x2_t test_vqsub_u32(uint32x2_t a, uint32x2_t b) { + return vqsub_u32(a, b); +} + +// CIR-LABEL: vqsub_u32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u32(<2 x i32>{{.*}} [[A:%.*]], <2 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i32> @llvm.aarch64.neon.uqsub.v2i32(<2 x i32> [[A]], <2 x i32> [[B]]) +// LLVM: ret <2 x i32> [[RES]] + +int64x1_t test_vqsub_s64(int64x1_t a, int64x1_t b) { + return vqsub_s64(a, b); +} + +// CIR-LABEL: vqsub_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_s64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <1 x i64> @llvm.aarch64.neon.sqsub.v1i64(<1 x i64> [[A]], <1 x i64> [[B]]) +// LLVM: ret <1 x i64> [[RES]] + +uint64x1_t test_vqsub_u64(uint64x1_t a, uint64x1_t b) { + return vqsub_u64(a, b); +} + +// CIR-LABEL: vqsub_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsub_u64(<1 x i64>{{.*}} [[A:%.*]], <1 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <1 x i64> @llvm.aarch64.neon.uqsub.v1i64(<1 x i64> [[A]], <1 x i64> [[B]]) +// LLVM: ret <1 x i64> [[RES]] + +int8x16_t test_vqsubq_s8(int8x16_t a, int8x16_t b) { + return vqsubq_s8(a, b); +} + +// CIR-LABEL: vqsubq_s8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.sqsub.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +uint8x16_t test_vqsubq_u8(uint8x16_t a, uint8x16_t b) { + return vqsubq_u8(a, b); +} + +// CIR-LABEL: vqsubq_u8 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u8(<16 x i8>{{.*}} [[A:%.*]], <16 x i8>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <16 x i8> @llvm.aarch64.neon.uqsub.v16i8(<16 x i8> [[A]], <16 x i8> [[B]]) +// LLVM: ret <16 x i8> [[RES]] + +int16x8_t test_vqsubq_s16(int16x8_t a, int16x8_t b) { + return vqsubq_s16(a, b); +} + +// CIR-LABEL: vqsubq_s16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +uint16x8_t test_vqsubq_u16(uint16x8_t a, uint16x8_t b) { + return vqsubq_u16(a, b); +} + +// CIR-LABEL: vqsubq_u16 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u16(<8 x i16>{{.*}} [[A:%.*]], <8 x i16>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <8 x i16> @llvm.aarch64.neon.uqsub.v8i16(<8 x i16> [[A]], <8 x i16> [[B]]) +// LLVM: ret <8 x i16> [[RES]] + +int32x4_t test_vqsubq_s32(int32x4_t a, int32x4_t b) { + return vqsubq_s32(a, b); +} + +// CIR-LABEL: vqsubq_s32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +uint32x4_t test_vqsubq_u32(uint32x4_t a, uint32x4_t b) { + return vqsubq_u32(a, b); +} + +// CIR-LABEL: vqsubq_u32 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u32(<4 x i32>{{.*}} [[A:%.*]], <4 x i32>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <4 x i32> @llvm.aarch64.neon.uqsub.v4i32(<4 x i32> [[A]], <4 x i32> [[B]]) +// LLVM: ret <4 x i32> [[RES]] + +int64x2_t test_vqsubq_s64(int64x2_t a, int64x2_t b) { + return vqsubq_s64(a, b); +} + +// CIR-LABEL: vqsubq_s64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.sqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_s64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]] + +uint64x2_t test_vqsubq_u64(uint64x2_t a, uint64x2_t b) { + return vqsubq_u64(a, b); +} + +// CIR-LABEL: vqsubq_u64 +// CIR: {{%.*}} = cir.llvm.intrinsic "llvm.aarch64.neon.uqsub" {{%.*}}, {{%.*}} : +// CIR-SAME: (!cir.vector, !cir.vector) -> !cir.vector + +// LLVM: {{.*}}test_vqsubq_u64(<2 x i64>{{.*}} [[A:%.*]], <2 x i64>{{.*}} [[B:%.*]]) +// LLVM: [[RES:%.*]] = call <2 x i64> @llvm.aarch64.neon.uqsub.v2i64(<2 x i64> [[A]], <2 x i64> [[B]]) +// LLVM: ret <2 x i64> [[RES]]