@@ -1863,14 +1863,12 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags,
1863
1863
switch (TypeFlags.getEltType ()) {
1864
1864
case NeonTypeFlags::Int8:
1865
1865
case NeonTypeFlags::Poly8:
1866
- return cir::VectorType::get (CGF->getBuilder ().getContext (),
1867
- TypeFlags.isUnsigned () ? CGF->UInt8Ty
1866
+ return cir::VectorType::get (TypeFlags.isUnsigned () ? CGF->UInt8Ty
1868
1867
: CGF->SInt8Ty ,
1869
1868
V1Ty ? 1 : (8 << IsQuad));
1870
1869
case NeonTypeFlags::Int16:
1871
1870
case NeonTypeFlags::Poly16:
1872
- return cir::VectorType::get (CGF->getBuilder ().getContext (),
1873
- TypeFlags.isUnsigned () ? CGF->UInt16Ty
1871
+ return cir::VectorType::get (TypeFlags.isUnsigned () ? CGF->UInt16Ty
1874
1872
: CGF->SInt16Ty ,
1875
1873
V1Ty ? 1 : (4 << IsQuad));
1876
1874
case NeonTypeFlags::BFloat16:
@@ -1884,14 +1882,12 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags,
1884
1882
else
1885
1883
llvm_unreachable (" NeonTypeFlags::Float16 NYI" );
1886
1884
case NeonTypeFlags::Int32:
1887
- return cir::VectorType::get (CGF->getBuilder ().getContext (),
1888
- TypeFlags.isUnsigned () ? CGF->UInt32Ty
1885
+ return cir::VectorType::get (TypeFlags.isUnsigned () ? CGF->UInt32Ty
1889
1886
: CGF->SInt32Ty ,
1890
1887
V1Ty ? 1 : (2 << IsQuad));
1891
1888
case NeonTypeFlags::Int64:
1892
1889
case NeonTypeFlags::Poly64:
1893
- return cir::VectorType::get (CGF->getBuilder ().getContext (),
1894
- TypeFlags.isUnsigned () ? CGF->UInt64Ty
1890
+ return cir::VectorType::get (TypeFlags.isUnsigned () ? CGF->UInt64Ty
1895
1891
: CGF->SInt64Ty ,
1896
1892
V1Ty ? 1 : (1 << IsQuad));
1897
1893
case NeonTypeFlags::Poly128:
@@ -1900,12 +1896,10 @@ static cir::VectorType GetNeonType(CIRGenFunction *CGF, NeonTypeFlags TypeFlags,
1900
1896
// so we use v16i8 to represent poly128 and get pattern matched.
1901
1897
llvm_unreachable (" NeonTypeFlags::Poly128 NYI" );
1902
1898
case NeonTypeFlags::Float32 :
1903
- return cir::VectorType::get (CGF->getBuilder ().getContext (),
1904
- CGF->getCIRGenModule ().FloatTy ,
1899
+ return cir::VectorType::get (CGF->getCIRGenModule ().FloatTy ,
1905
1900
V1Ty ? 1 : (2 << IsQuad));
1906
1901
case NeonTypeFlags::Float64 :
1907
- return cir::VectorType::get (CGF->getBuilder ().getContext (),
1908
- CGF->getCIRGenModule ().DoubleTy ,
1902
+ return cir::VectorType::get (CGF->getCIRGenModule ().DoubleTy ,
1909
1903
V1Ty ? 1 : (1 << IsQuad));
1910
1904
}
1911
1905
llvm_unreachable (" Unknown vector element type!" );
@@ -2102,7 +2096,7 @@ static cir::VectorType getSignChangedVectorType(CIRGenBuilderTy &builder,
2102
2096
auto elemTy = mlir::cast<cir::IntType>(vecTy.getEltType ());
2103
2097
elemTy = elemTy.isSigned () ? builder.getUIntNTy (elemTy.getWidth ())
2104
2098
: builder.getSIntNTy (elemTy.getWidth ());
2105
- return cir::VectorType::get (builder. getContext (), elemTy, vecTy.getSize ());
2099
+ return cir::VectorType::get (elemTy, vecTy.getSize ());
2106
2100
}
2107
2101
2108
2102
static cir::VectorType
@@ -2111,19 +2105,16 @@ getHalfEltSizeTwiceNumElemsVecType(CIRGenBuilderTy &builder,
2111
2105
auto elemTy = mlir::cast<cir::IntType>(vecTy.getEltType ());
2112
2106
elemTy = elemTy.isSigned () ? builder.getSIntNTy (elemTy.getWidth () / 2 )
2113
2107
: builder.getUIntNTy (elemTy.getWidth () / 2 );
2114
- return cir::VectorType::get (builder.getContext (), elemTy,
2115
- vecTy.getSize () * 2 );
2108
+ return cir::VectorType::get (elemTy, vecTy.getSize () * 2 );
2116
2109
}
2117
2110
2118
2111
static cir::VectorType
2119
2112
castVecOfFPTypeToVecOfIntWithSameWidth (CIRGenBuilderTy &builder,
2120
2113
cir::VectorType vecTy) {
2121
2114
if (mlir::isa<cir::SingleType>(vecTy.getEltType ()))
2122
- return cir::VectorType::get (builder.getContext (), builder.getSInt32Ty (),
2123
- vecTy.getSize ());
2115
+ return cir::VectorType::get (builder.getSInt32Ty (), vecTy.getSize ());
2124
2116
if (mlir::isa<cir::DoubleType>(vecTy.getEltType ()))
2125
- return cir::VectorType::get (builder.getContext (), builder.getSInt64Ty (),
2126
- vecTy.getSize ());
2117
+ return cir::VectorType::get (builder.getSInt64Ty (), vecTy.getSize ());
2127
2118
llvm_unreachable (
2128
2119
" Unsupported element type in getVecOfIntTypeWithSameEltWidth" );
2129
2120
}
@@ -2315,8 +2306,7 @@ static mlir::Value emitCommonNeonVecAcrossCall(CIRGenFunction &cgf,
2315
2306
const clang::CallExpr *e) {
2316
2307
CIRGenBuilderTy &builder = cgf.getBuilder ();
2317
2308
mlir::Value op = cgf.emitScalarExpr (e->getArg (0 ));
2318
- cir::VectorType vTy =
2319
- cir::VectorType::get (&cgf.getMLIRContext (), eltTy, vecLen);
2309
+ cir::VectorType vTy = cir::VectorType::get (eltTy, vecLen);
2320
2310
llvm::SmallVector<mlir::Value, 1 > args{op};
2321
2311
return emitNeonCall (builder, {vTy}, args, intrincsName, eltTy,
2322
2312
cgf.getLoc (e->getExprLoc ()));
@@ -2447,8 +2437,7 @@ mlir::Value CIRGenFunction::emitCommonNeonBuiltinExpr(
2447
2437
cir::VectorType resTy =
2448
2438
(builtinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
2449
2439
builtinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
2450
- ? cir::VectorType::get (&getMLIRContext (), vTy.getEltType (),
2451
- vTy.getSize () * 2 )
2440
+ ? cir::VectorType::get (vTy.getEltType (), vTy.getSize () * 2 )
2452
2441
: vTy;
2453
2442
cir::VectorType mulVecT =
2454
2443
GetNeonType (this , NeonTypeFlags (neonType.getEltType (), false ,
@@ -2888,10 +2877,8 @@ static mlir::Value emitCommonNeonSISDBuiltinExpr(
2888
2877
llvm_unreachable (" neon_vqmovnh_u16 NYI " );
2889
2878
case NEON::BI__builtin_neon_vqmovns_s32: {
2890
2879
mlir::Location loc = cgf.getLoc (expr->getExprLoc ());
2891
- cir::VectorType argVecTy =
2892
- cir::VectorType::get (&(cgf.getMLIRContext ()), cgf.SInt32Ty , 4 );
2893
- cir::VectorType resVecTy =
2894
- cir::VectorType::get (&(cgf.getMLIRContext ()), cgf.SInt16Ty , 4 );
2880
+ cir::VectorType argVecTy = cir::VectorType::get (cgf.SInt32Ty , 4 );
2881
+ cir::VectorType resVecTy = cir::VectorType::get (cgf.SInt16Ty , 4 );
2895
2882
vecExtendIntValue (cgf, argVecTy, ops[0 ], loc);
2896
2883
mlir::Value result = emitNeonCall (builder, {argVecTy}, ops,
2897
2884
" aarch64.neon.sqxtn" , resVecTy, loc);
@@ -3706,88 +3693,74 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
3706
3693
3707
3694
case NEON::BI__builtin_neon_vset_lane_f64: {
3708
3695
Ops.push_back (emitScalarExpr (E->getArg (2 )));
3709
- Ops[1 ] = builder.createBitcast (
3710
- Ops[1 ], cir::VectorType::get (&getMLIRContext (), DoubleTy, 1 ));
3696
+ Ops[1 ] = builder.createBitcast (Ops[1 ], cir::VectorType::get (DoubleTy, 1 ));
3711
3697
return builder.create <cir::VecInsertOp>(getLoc (E->getExprLoc ()), Ops[1 ],
3712
3698
Ops[0 ], Ops[2 ]);
3713
3699
}
3714
3700
case NEON::BI__builtin_neon_vsetq_lane_f64: {
3715
3701
Ops.push_back (emitScalarExpr (E->getArg (2 )));
3716
- Ops[1 ] = builder.createBitcast (
3717
- Ops[1 ], cir::VectorType::get (&getMLIRContext (), DoubleTy, 2 ));
3702
+ Ops[1 ] = builder.createBitcast (Ops[1 ], cir::VectorType::get (DoubleTy, 2 ));
3718
3703
return builder.create <cir::VecInsertOp>(getLoc (E->getExprLoc ()), Ops[1 ],
3719
3704
Ops[0 ], Ops[2 ]);
3720
3705
}
3721
3706
case NEON::BI__builtin_neon_vget_lane_i8:
3722
3707
case NEON::BI__builtin_neon_vdupb_lane_i8:
3723
- Ops[0 ] = builder.createBitcast (
3724
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt8Ty, 8 ));
3708
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt8Ty, 8 ));
3725
3709
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3726
3710
emitScalarExpr (E->getArg (1 )));
3727
3711
case NEON::BI__builtin_neon_vgetq_lane_i8:
3728
3712
case NEON::BI__builtin_neon_vdupb_laneq_i8:
3729
- Ops[0 ] = builder.createBitcast (
3730
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt8Ty, 16 ));
3713
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt8Ty, 16 ));
3731
3714
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3732
3715
emitScalarExpr (E->getArg (1 )));
3733
3716
case NEON::BI__builtin_neon_vget_lane_i16:
3734
3717
case NEON::BI__builtin_neon_vduph_lane_i16:
3735
- Ops[0 ] = builder.createBitcast (
3736
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt16Ty, 4 ));
3718
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt16Ty, 4 ));
3737
3719
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3738
3720
emitScalarExpr (E->getArg (1 )));
3739
3721
case NEON::BI__builtin_neon_vgetq_lane_i16:
3740
3722
case NEON::BI__builtin_neon_vduph_laneq_i16:
3741
- Ops[0 ] = builder.createBitcast (
3742
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt16Ty, 8 ));
3723
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt16Ty, 8 ));
3743
3724
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3744
3725
emitScalarExpr (E->getArg (1 )));
3745
3726
case NEON::BI__builtin_neon_vget_lane_i32:
3746
3727
case NEON::BI__builtin_neon_vdups_lane_i32:
3747
- Ops[0 ] = builder.createBitcast (
3748
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt32Ty, 2 ));
3728
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt32Ty, 2 ));
3749
3729
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3750
3730
emitScalarExpr (E->getArg (1 )));
3751
3731
case NEON::BI__builtin_neon_vget_lane_f32:
3752
3732
case NEON::BI__builtin_neon_vdups_lane_f32:
3753
- Ops[0 ] = builder.createBitcast (
3754
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), FloatTy, 2 ));
3733
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (FloatTy, 2 ));
3755
3734
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3756
3735
emitScalarExpr (E->getArg (1 )));
3757
3736
case NEON::BI__builtin_neon_vgetq_lane_i32:
3758
3737
case NEON::BI__builtin_neon_vdups_laneq_i32:
3759
- Ops[0 ] = builder.createBitcast (
3760
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt32Ty, 4 ));
3738
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt32Ty, 4 ));
3761
3739
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3762
3740
emitScalarExpr (E->getArg (1 )));
3763
3741
case NEON::BI__builtin_neon_vget_lane_i64:
3764
3742
case NEON::BI__builtin_neon_vdupd_lane_i64:
3765
- Ops[0 ] = builder.createBitcast (
3766
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt64Ty, 1 ));
3743
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt64Ty, 1 ));
3767
3744
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3768
3745
emitScalarExpr (E->getArg (1 )));
3769
3746
case NEON::BI__builtin_neon_vdupd_lane_f64:
3770
3747
case NEON::BI__builtin_neon_vget_lane_f64:
3771
- Ops[0 ] = builder.createBitcast (
3772
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), DoubleTy, 1 ));
3748
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (DoubleTy, 1 ));
3773
3749
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3774
3750
emitScalarExpr (E->getArg (1 )));
3775
3751
case NEON::BI__builtin_neon_vgetq_lane_i64:
3776
3752
case NEON::BI__builtin_neon_vdupd_laneq_i64:
3777
- Ops[0 ] = builder.createBitcast (
3778
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), UInt64Ty, 2 ));
3753
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (UInt64Ty, 2 ));
3779
3754
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3780
3755
emitScalarExpr (E->getArg (1 )));
3781
3756
case NEON::BI__builtin_neon_vgetq_lane_f32:
3782
3757
case NEON::BI__builtin_neon_vdups_laneq_f32:
3783
- Ops[0 ] = builder.createBitcast (
3784
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), FloatTy, 4 ));
3758
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (FloatTy, 4 ));
3785
3759
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3786
3760
emitScalarExpr (E->getArg (1 )));
3787
3761
case NEON::BI__builtin_neon_vgetq_lane_f64:
3788
3762
case NEON::BI__builtin_neon_vdupd_laneq_f64:
3789
- Ops[0 ] = builder.createBitcast (
3790
- Ops[0 ], cir::VectorType::get (&getMLIRContext (), DoubleTy, 2 ));
3763
+ Ops[0 ] = builder.createBitcast (Ops[0 ], cir::VectorType::get (DoubleTy, 2 ));
3791
3764
return builder.create <cir::VecExtractOp>(getLoc (E->getExprLoc ()), Ops[0 ],
3792
3765
emitScalarExpr (E->getArg (1 )));
3793
3766
case NEON::BI__builtin_neon_vaddh_f16: {
@@ -4318,7 +4291,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4318
4291
[[fallthrough]];
4319
4292
case NEON::BI__builtin_neon_vaddv_s16: {
4320
4293
cir::IntType eltTy = usgn ? UInt16Ty : SInt16Ty;
4321
- cir::VectorType vTy = cir::VectorType::get (builder. getContext (), eltTy, 4 );
4294
+ cir::VectorType vTy = cir::VectorType::get (eltTy, 4 );
4322
4295
Ops.push_back (emitScalarExpr (E->getArg (0 )));
4323
4296
// This is to add across the vector elements, so wider result type needed.
4324
4297
Ops[0 ] = emitNeonCall (builder, {vTy}, Ops,
@@ -4427,8 +4400,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4427
4400
usgn = true ;
4428
4401
[[fallthrough]];
4429
4402
case NEON::BI__builtin_neon_vaddlvq_s16: {
4430
- mlir::Type argTy = cir::VectorType::get (builder.getContext (),
4431
- usgn ? UInt16Ty : SInt16Ty, 8 );
4403
+ mlir::Type argTy = cir::VectorType::get (usgn ? UInt16Ty : SInt16Ty, 8 );
4432
4404
llvm::SmallVector<mlir::Value, 1 > argOps = {emitScalarExpr (E->getArg (0 ))};
4433
4405
return emitNeonCall (builder, {argTy}, argOps,
4434
4406
usgn ? " aarch64.neon.uaddlv" : " aarch64.neon.saddlv" ,
@@ -4441,8 +4413,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
4441
4413
usgn = true ;
4442
4414
[[fallthrough]];
4443
4415
case NEON::BI__builtin_neon_vaddlv_s16: {
4444
- mlir::Type argTy = cir::VectorType::get (builder.getContext (),
4445
- usgn ? UInt16Ty : SInt16Ty, 4 );
4416
+ mlir::Type argTy = cir::VectorType::get (usgn ? UInt16Ty : SInt16Ty, 4 );
4446
4417
llvm::SmallVector<mlir::Value, 1 > argOps = {emitScalarExpr (E->getArg (0 ))};
4447
4418
return emitNeonCall (builder, {argTy}, argOps,
4448
4419
usgn ? " aarch64.neon.uaddlv" : " aarch64.neon.saddlv" ,
0 commit comments