-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[RISCV] Select (add x, C) -> (sub x, -C) if -C cheaper to materialize #137309
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Piotr Fusik (pfusik) ChangesRV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how #120221 selects inverted bitwise instructions. Full diff: https://github.com/llvm/llvm-project/pull/137309.diff 4 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index ad77106d386c9..86bdb4c7fd24c 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3206,6 +3206,23 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
return false;
}
+bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
+ if (!isa<ConstantSDNode>(N) || !N.hasOneUse())
+ return false;
+ int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
+ if (isInt<32>(Imm))
+ return false;
+ int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
+ /*CompressionCost=*/true);
+ int NegImmCost = RISCVMatInt::getIntMatCost(APInt(64, -Imm), 64, *Subtarget,
+ /*CompressionCost=*/true);
+ if (OrigImmCost <= NegImmCost)
+ return false;
+
+ Val = selectImm(CurDAG, SDLoc(N), N->getSimpleValueType(0), -Imm, *Subtarget);
+ return true;
+}
+
bool RISCVDAGToDAGISel::selectInvLogicImm(SDValue N, SDValue &Val) {
if (!isa<ConstantSDNode>(N))
return false;
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
index 82a47a9a52501..0672b6ad8829e 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.h
@@ -120,6 +120,7 @@ class RISCVDAGToDAGISel : public SelectionDAGISel {
return selectSHXADD_UWOp(N, ShAmt, Val);
}
+ bool selectNegImm(SDValue N, SDValue &Val);
bool selectInvLogicImm(SDValue N, SDValue &Val);
bool hasAllNBitUsers(SDNode *Node, unsigned Bits,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index baf2bae367df1..5431e554f5df6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -2120,11 +2120,16 @@ def : Pat<(XLenVT (add GPR:$rs1, immop_oneuse<AddiPair>:$rs2)),
(ADDI (XLenVT (ADDI GPR:$rs1, (AddiPairImmLarge imm:$rs2))),
(AddiPairImmSmall imm:$rs2))>;
+def negImm : ComplexPattern<XLenVT, 1, "selectNegImm", [], [], 0>;
+
let Predicates = [IsRV64] in {
// Select W instructions if only the lower 32-bits of the result are used.
def : Pat<(binop_allwusers<add> GPR:$rs1, immop_oneuse<AddiPair>:$rs2),
(ADDIW (i64 (ADDIW GPR:$rs1, (AddiPairImmLarge imm:$rs2))),
(AddiPairImmSmall imm:$rs2))>;
+
+// Select SUB if the negated constant is cheaper to materialize.
+def : Pat<(XLenVT (add GPR:$rs1, negImm:$rs2)), (SUB GPR:$rs1, negImm:$rs2)>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
new file mode 100644
index 0000000000000..ddcf4e1a8aa77
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll
@@ -0,0 +1,61 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv64 < %s | FileCheck %s --check-prefixes=CHECK,NOZBS
+; RUN: llc -mtriple=riscv64 -mattr=+zbs < %s | FileCheck %s --check-prefixes=CHECK,ZBS
+
+define i64 @add_b31(i64 %x) {
+; NOZBS-LABEL: add_b31:
+; NOZBS: # %bb.0:
+; NOZBS-NEXT: lui a1, 524288
+; NOZBS-NEXT: sub a0, a0, a1
+; NOZBS-NEXT: ret
+;
+; ZBS-LABEL: add_b31:
+; ZBS: # %bb.0:
+; ZBS-NEXT: bseti a1, zero, 31
+; ZBS-NEXT: add a0, a0, a1
+; ZBS-NEXT: ret
+ %add = add i64 %x, 2147483648
+ ret i64 %add
+}
+
+define i64 @add_b32(i64 %x) {
+; NOZBS-LABEL: add_b32:
+; NOZBS: # %bb.0:
+; NOZBS-NEXT: li a1, -1
+; NOZBS-NEXT: slli a1, a1, 32
+; NOZBS-NEXT: add a0, a0, a1
+; NOZBS-NEXT: ret
+;
+; ZBS-LABEL: add_b32:
+; ZBS: # %bb.0:
+; ZBS-NEXT: bseti a1, zero, 32
+; ZBS-NEXT: sub a0, a0, a1
+; ZBS-NEXT: ret
+ %add = add i64 %x, -4294967296
+ ret i64 %add
+}
+
+define i64 @sub_0xffffffffff(i64 %x) {
+; CHECK-LABEL: sub_0xffffffffff:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: srli a1, a1, 24
+; CHECK-NEXT: sub a0, a0, a1
+; CHECK-NEXT: ret
+ %sub = sub i64 %x, 1099511627775
+ ret i64 %sub
+}
+
+define i64 @add_multiuse(i64 %x) {
+; CHECK-LABEL: add_multiuse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: slli a1, a1, 40
+; CHECK-NEXT: addi a1, a1, 1
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: ret
+ %add = add i64 %x, -1099511627775
+ %xor = and i64 %add, -1099511627775
+ ret i64 %xor
+}
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
RV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how llvm#120221 selects inverted bitwise instructions.
Tests merged into main branch as 0cd3fd4. PR rebased. |
…llvm#137309) RV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how llvm#120221 selects inverted bitwise instructions.
This is a follow-up to llvm#137309, adding: - multi-use of the constant with different adds - vectors (vadd.vx -> vsub.vx)
…llvm#137309) RV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how llvm#120221 selects inverted bitwise instructions.
…llvm#137309) RV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how llvm#120221 selects inverted bitwise instructions.
…llvm#137309) RV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how llvm#120221 selects inverted bitwise instructions.
…llvm#137309) RV64 only. For 32-bit constants, a negated constant is never cheaper. This change is similar to how llvm#120221 selects inverted bitwise instructions.
This is a follow-up to llvm#137309, adding: - multi-use of the constant with different adds - vectors (vadd.vx -> vsub.vx)
This is a follow-up to llvm#137309, adding: - multi-use of the constant with different adds - vectors (vadd.vx -> vsub.vx)
This is a follow-up to #137309, adding: - multi-use of the constant with different adds - vectors (vadd.vx -> vsub.vx)
RV64 only. For 32-bit constants, a negated constant is never cheaper.
This change is similar to how #120221 selects inverted bitwise instructions.