Skip to content

Commit caf3b20

Browse files
committed
[RISCV] Handle more (add x, C) -> (sub x, -C) cases
This is a follow-up to llvm#137309, adding: - multi-use of the constant with different adds - vectors (vadd.vx -> vsub.vx)
1 parent e618a79 commit caf3b20

9 files changed

+95
-23
lines changed

llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3223,11 +3223,28 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
32233223
}
32243224

32253225
bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
3226-
if (!isa<ConstantSDNode>(N) || !N.hasOneUse())
3226+
if (!isa<ConstantSDNode>(N))
32273227
return false;
32283228
int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
32293229
if (isInt<32>(Imm))
32303230
return false;
3231+
3232+
for (const SDNode *U : N->users()) {
3233+
switch (U->getOpcode()) {
3234+
case ISD::ADD:
3235+
break;
3236+
case RISCVISD::VMV_V_X_VL:
3237+
if (!all_of(U->users(), [](const SDNode *V) {
3238+
return V->getOpcode() == ISD::ADD ||
3239+
V->getOpcode() == RISCVISD::ADD_VL;
3240+
}))
3241+
return false;
3242+
break;
3243+
default:
3244+
return false;
3245+
}
3246+
}
3247+
32313248
int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
32323249
/*CompressionCost=*/true);
32333250
int NegImmCost = RISCVMatInt::getIntMatCost(APInt(64, -Imm), 64, *Subtarget,
@@ -3630,6 +3647,11 @@ bool RISCVDAGToDAGISel::selectVSplatUimm(SDValue N, unsigned Bits,
36303647
[Bits](int64_t Imm) { return isUIntN(Bits, Imm); });
36313648
}
36323649

3650+
bool RISCVDAGToDAGISel::selectVSplatImm64Neg(SDValue N, SDValue &SplatVal) {
3651+
SDValue Splat = findVSplat(N);
3652+
return Splat && selectNegImm(Splat.getOperand(1), SplatVal);
3653+
}
3654+
36333655
bool RISCVDAGToDAGISel::selectLow8BitsVSplat(SDValue N, SDValue &SplatVal) {
36343656
auto IsExtOrTrunc = [](SDValue N) {
36353657
switch (N->getOpcode()) {

llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
142142
bool selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal);
143143
bool selectVSplatSimm5Plus1NoDec(SDValue N, SDValue &SplatVal);
144144
bool selectVSplatSimm5Plus1NonZero(SDValue N, SDValue &SplatVal);
145+
bool selectVSplatImm64Neg(SDValue N, SDValue &SplatVal);
145146
// Matches the splat of a value which can be extended or truncated, such that
146147
// only the bottom 8 bits are preserved.
147148
bool selectLow8BitsVSplat(SDValue N, SDValue &SplatVal);

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6232,6 +6232,36 @@ foreach vti = AllIntegerVectors in {
62326232
}
62336233
}
62346234

6235+
// (add v, C) -> (sub v, -C) if -C cheaper to materialize
6236+
defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
6237+
foreach vti = I64IntegerVectors in {
6238+
let Predicates = [HasVInstructionsI64] in {
6239+
def : Pat<(vti.Vector (int_riscv_vadd (vti.Vector vti.RegClass:$passthru),
6240+
(vti.Vector vti.RegClass:$rs1),
6241+
(i64 negImm:$rs2),
6242+
VLOpFrag)),
6243+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX)
6244+
vti.RegClass:$passthru,
6245+
vti.RegClass:$rs1,
6246+
negImm:$rs2,
6247+
GPR:$vl, vti.Log2SEW, TU_MU)>;
6248+
def : Pat<(vti.Vector (int_riscv_vadd_mask (vti.Vector vti.RegClass:$passthru),
6249+
(vti.Vector vti.RegClass:$rs1),
6250+
(i64 negImm:$rs2),
6251+
(vti.Mask VMV0:$vm),
6252+
VLOpFrag,
6253+
(i64 timm:$policy))),
6254+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK")
6255+
vti.RegClass:$passthru,
6256+
vti.RegClass:$rs1,
6257+
negImm:$rs2,
6258+
(vti.Mask VMV0:$vm),
6259+
GPR:$vl,
6260+
vti.Log2SEW,
6261+
(i64 timm:$policy))>;
6262+
}
6263+
}
6264+
62356265
//===----------------------------------------------------------------------===//
62366266
// 11.2. Vector Widening Integer Add/Subtract
62376267
//===----------------------------------------------------------------------===//

llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -907,6 +907,18 @@ foreach vti = AllIntegerVectors in {
907907
}
908908
}
909909

910+
// (add v, C) -> (sub v, -C) if -C cheaper to materialize
911+
foreach vti = I64IntegerVectors in {
912+
let Predicates = [HasVInstructionsI64] in {
913+
def : Pat<(add (vti.Vector vti.RegClass:$rs1),
914+
(vti.Vector (SplatPat_imm64_neg i64:$rs2))),
915+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX)
916+
(vti.Vector (IMPLICIT_DEF)),
917+
vti.RegClass:$rs1,
918+
negImm:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
919+
}
920+
}
921+
910922
// 11.2. Vector Widening Integer Add and Subtract
911923
defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">;
912924
defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">;

llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -738,6 +738,7 @@ def SplatPat_simm5_plus1_nodec
738738
: ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NoDec", [], [], 3>;
739739
def SplatPat_simm5_plus1_nonzero
740740
: ComplexPattern<vAny, 1, "selectVSplatSimm5Plus1NonZero", [], [], 3>;
741+
def SplatPat_imm64_neg : ComplexPattern<vAny, 1, "selectVSplatImm64Neg", [], [], 3>;
741742

742743
// Selects extends or truncates of splats where we only care about the lowest 8
743744
// bits of each element.
@@ -2122,6 +2123,19 @@ foreach vti = AllIntegerVectors in {
21222123
}
21232124
}
21242125

2126+
// (add v, C) -> (sub v, -C) if -C cheaper to materialize
2127+
foreach vti = I64IntegerVectors in {
2128+
let Predicates = [HasVInstructionsI64] in {
2129+
def : Pat<(riscv_add_vl (vti.Vector vti.RegClass:$rs1),
2130+
(vti.Vector (SplatPat_imm64_neg i64:$rs2)),
2131+
vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag),
2132+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK")
2133+
vti.RegClass:$passthru, vti.RegClass:$rs1,
2134+
negImm:$rs2, (vti.Mask VMV0:$vm),
2135+
GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
2136+
}
2137+
}
2138+
21252139
// 11.2. Vector Widening Integer Add/Subtract
21262140
defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">;
21272141
defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">;

llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,9 @@ define i64 @add_multiuse_const(i64 %x, i64 %y) {
6464
; CHECK-LABEL: add_multiuse_const:
6565
; CHECK: # %bb.0:
6666
; CHECK-NEXT: li a2, -1
67-
; CHECK-NEXT: slli a2, a2, 40
68-
; CHECK-NEXT: addi a2, a2, 1
69-
; CHECK-NEXT: add a0, a0, a2
70-
; CHECK-NEXT: add a1, a1, a2
67+
; CHECK-NEXT: srli a2, a2, 24
68+
; CHECK-NEXT: sub a0, a0, a2
69+
; CHECK-NEXT: sub a1, a1, a2
7170
; CHECK-NEXT: xor a0, a0, a1
7271
; CHECK-NEXT: ret
7372
%a = add i64 %x, -1099511627775

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -452,10 +452,9 @@ define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va) {
452452
; RV64-LABEL: vadd_vx_v2i64_to_sub:
453453
; RV64: # %bb.0:
454454
; RV64-NEXT: li a0, -1
455-
; RV64-NEXT: slli a0, a0, 40
456-
; RV64-NEXT: addi a0, a0, 1
455+
; RV64-NEXT: srli a0, a0, 24
457456
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
458-
; RV64-NEXT: vadd.vx v8, v8, a0
457+
; RV64-NEXT: vsub.vx v8, v8, a0
459458
; RV64-NEXT: ret
460459
%v = add <2 x i64> splat (i64 -1099511627775), %va
461460
ret <2 x i64> %v
@@ -481,10 +480,9 @@ define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va) {
481480
; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
482481
; RV64: # %bb.0:
483482
; RV64-NEXT: li a0, -1
484-
; RV64-NEXT: slli a0, a0, 40
485-
; RV64-NEXT: addi a0, a0, 1
483+
; RV64-NEXT: srli a0, a0, 24
486484
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
487-
; RV64-NEXT: vadd.vx v8, v8, a0
485+
; RV64-NEXT: vsub.vx v8, v8, a0
488486
; RV64-NEXT: ret
489487
%v = add <2 x i64> %va, splat (i64 -1099511627775)
490488
ret <2 x i64> %v

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1445,10 +1445,9 @@ define <2 x i64> @vadd_vx_v2i64_to_sub(<2 x i64> %va, <2 x i1> %m, i32 zeroext %
14451445
; RV64-LABEL: vadd_vx_v2i64_to_sub:
14461446
; RV64: # %bb.0:
14471447
; RV64-NEXT: li a1, -1
1448-
; RV64-NEXT: slli a1, a1, 40
1449-
; RV64-NEXT: addi a1, a1, 1
1448+
; RV64-NEXT: srli a1, a1, 24
14501449
; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1451-
; RV64-NEXT: vadd.vx v8, v8, a1, v0.t
1450+
; RV64-NEXT: vsub.vx v8, v8, a1, v0.t
14521451
; RV64-NEXT: ret
14531452
%v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> splat (i64 -1099511627775), <2 x i64> %va, <2 x i1> %m, i32 %evl)
14541453
ret <2 x i64> %v
@@ -1473,10 +1472,9 @@ define <2 x i64> @vadd_vx_v2i64_to_sub_swapped(<2 x i64> %va, <2 x i1> %m, i32 z
14731472
; RV64-LABEL: vadd_vx_v2i64_to_sub_swapped:
14741473
; RV64: # %bb.0:
14751474
; RV64-NEXT: li a1, -1
1476-
; RV64-NEXT: slli a1, a1, 40
1477-
; RV64-NEXT: addi a1, a1, 1
1475+
; RV64-NEXT: srli a1, a1, 24
14781476
; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1479-
; RV64-NEXT: vadd.vx v8, v8, a1, v0.t
1477+
; RV64-NEXT: vsub.vx v8, v8, a1, v0.t
14801478
; RV64-NEXT: ret
14811479
%v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> splat (i64 -1099511627775), <2 x i1> %m, i32 %evl)
14821480
ret <2 x i64> %v

llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -884,10 +884,9 @@ define <vscale x 1 x i64> @vadd_vx_imm64_to_sub(<vscale x 1 x i64> %va) nounwind
884884
; RV64-LABEL: vadd_vx_imm64_to_sub:
885885
; RV64: # %bb.0:
886886
; RV64-NEXT: li a0, -1
887-
; RV64-NEXT: slli a0, a0, 40
888-
; RV64-NEXT: addi a0, a0, 1
887+
; RV64-NEXT: srli a0, a0, 24
889888
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
890-
; RV64-NEXT: vadd.vx v8, v8, a0
889+
; RV64-NEXT: vsub.vx v8, v8, a0
891890
; RV64-NEXT: ret
892891
%vc = add <vscale x 1 x i64> splat (i64 -1099511627775), %va
893892
ret <vscale x 1 x i64> %vc
@@ -911,10 +910,9 @@ define <vscale x 1 x i64> @vadd_vx_imm64_to_sub_swapped(<vscale x 1 x i64> %va)
911910
; RV64-LABEL: vadd_vx_imm64_to_sub_swapped:
912911
; RV64: # %bb.0:
913912
; RV64-NEXT: li a0, -1
914-
; RV64-NEXT: slli a0, a0, 40
915-
; RV64-NEXT: addi a0, a0, 1
913+
; RV64-NEXT: srli a0, a0, 24
916914
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
917-
; RV64-NEXT: vadd.vx v8, v8, a0
915+
; RV64-NEXT: vsub.vx v8, v8, a0
918916
; RV64-NEXT: ret
919917
%vc = add <vscale x 1 x i64> %va, splat (i64 -1099511627775)
920918
ret <vscale x 1 x i64> %vc

0 commit comments

Comments
 (0)