@@ -2686,6 +2686,9 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
2686
2686
getLoadStoreAlignment (const_cast <Instruction *>(&Ingredient));
2687
2687
unsigned AS = cast<PointerType>(Ctx.Types .inferScalarType (getAddr ()))
2688
2688
->getAddressSpace ();
2689
+ unsigned Opcode = isa<VPWidenLoadRecipe, VPWidenLoadEVLRecipe>(this )
2690
+ ? Instruction::Load
2691
+ : Instruction::Store;
2689
2692
2690
2693
if (!Consecutive) {
2691
2694
// TODO: Using the original IR may not be accurate.
@@ -2695,20 +2698,19 @@ InstructionCost VPWidenMemoryRecipe::computeCost(ElementCount VF,
2695
2698
assert (!Reverse &&
2696
2699
" Inconsecutive memory access should not have the order." );
2697
2700
return Ctx.TTI .getAddressComputationCost (Ty) +
2698
- Ctx.TTI .getGatherScatterOpCost (Ingredient.getOpcode (), Ty, Ptr ,
2699
- IsMasked, Alignment, Ctx.CostKind ,
2700
- &Ingredient);
2701
+ Ctx.TTI .getGatherScatterOpCost (Opcode, Ty, Ptr , IsMasked, Alignment,
2702
+ Ctx.CostKind , &Ingredient);
2701
2703
}
2702
2704
2703
2705
InstructionCost Cost = 0 ;
2704
2706
if (IsMasked) {
2705
- Cost += Ctx. TTI . getMaskedMemoryOpCost (Ingredient. getOpcode (), Ty, Alignment,
2706
- AS, Ctx.CostKind );
2707
+ Cost +=
2708
+ Ctx. TTI . getMaskedMemoryOpCost (Opcode, Ty, Alignment, AS, Ctx.CostKind );
2707
2709
} else {
2708
2710
TTI::OperandValueInfo OpInfo =
2709
2711
Ctx.TTI .getOperandInfo (Ingredient.getOperand (0 ));
2710
- Cost += Ctx.TTI .getMemoryOpCost (Ingredient. getOpcode () , Ty, Alignment, AS,
2711
- Ctx. CostKind , OpInfo, &Ingredient);
2712
+ Cost += Ctx.TTI .getMemoryOpCost (Opcode , Ty, Alignment, AS, Ctx. CostKind ,
2713
+ OpInfo, &Ingredient);
2712
2714
}
2713
2715
if (!Reverse)
2714
2716
return Cost;
0 commit comments