@@ -434,8 +434,7 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) {
434
434
435
435
// If there is a dominating store to ReturnValue, we can elide
436
436
// the load, zap the store, and usually zap the alloca.
437
- // NOTE(cir): This seems like a premature optimization case, so I'm
438
- // skipping it.
437
+ // NOTE(cir): This seems like a premature optimization case. Skipping it.
439
438
if (::cir::MissingFeatures::returnValueDominatingStoreOptmiization ()) {
440
439
llvm_unreachable (" NYI" );
441
440
}
@@ -453,12 +452,6 @@ LogicalResult LowerFunction::buildFunctionEpilog(const LowerFunctionInfo &FI) {
453
452
mlir::PatternRewriter::InsertionGuard guard (rewriter);
454
453
NewFn->walk ([&](ReturnOp returnOp) {
455
454
rewriter.setInsertionPoint (returnOp);
456
-
457
- // TODO(cir): I'm not sure if we need this offset here or in CIRGen.
458
- // Perhaps both? For now I'm just ignoring it.
459
- // Value V = emitAddressAtOffset(*this, getResultAlloca(returnOp),
460
- // RetAI);
461
-
462
455
RV = castReturnValue (returnOp->getOperand (0 ), RetAI.getCoerceToType (),
463
456
*this );
464
457
rewriter.replaceOpWithNewOp <ReturnOp>(returnOp, RV);
@@ -655,7 +648,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo,
655
648
656
649
FuncType IRFuncTy = LM.getTypes ().getFunctionType (CallInfo);
657
650
658
- // NOTE(cir): Some target/ABI related checks happen here. I'm skipping them
651
+ // NOTE(cir): Some target/ABI related checks happen here. They are skipped
659
652
// under the assumption that they are handled in CIRGen.
660
653
661
654
// 1. Set up the arguments.
@@ -737,7 +730,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo,
737
730
if (!isa<StructType>(I->getType ())) {
738
731
llvm_unreachable (" NYI" );
739
732
} else {
740
- // NOTE(cir): I'm leaving L/RValue stuff for CIRGen to handle.
733
+ // NOTE(cir): L/RValue stuff are left for CIRGen to handle.
741
734
Src = *I;
742
735
}
743
736
@@ -756,6 +749,7 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo,
756
749
Value Load = createCoercedValue (Src, ArgInfo.getCoerceToType (), *this );
757
750
758
751
// FIXME(cir): We should probably handle CMSE non-secure calls here
752
+ assert (!::cir::MissingFeatures::cmseNonSecureCallAttr ());
759
753
760
754
// since they are a ARM-specific feature.
761
755
if (::cir::MissingFeatures::undef ())
@@ -856,22 +850,22 @@ Value LowerFunction::rewriteCallOp(const LowerFunctionInfo &CallInfo,
856
850
// FIXME(cir): Use return value slot here.
857
851
Value RetVal = callOp.getResult ();
858
852
// TODO(cir): Check for volatile return values.
853
+ assert (!::cir::MissingFeatures::volatileTypes ());
859
854
860
855
// NOTE(cir): If the function returns, there should always be a valid
861
856
// return value present. Instead of setting the return value here, we
862
857
// should have the ReturnValueSlot object set it beforehand.
863
858
if (!RetVal) {
864
859
RetVal = callOp.getResult ();
865
860
// TODO(cir): Check for volatile return values.
861
+ assert (::cir::MissingFeatures::volatileTypes ());
866
862
}
867
863
868
864
// An empty record can overlap other data (if declared with
869
865
// no_unique_address); omit the store for such types - as there is no
870
866
// actual data to store.
871
867
if (dyn_cast<StructType>(RetTy) &&
872
868
cast<StructType>(RetTy).getNumElements () != 0 ) {
873
- // NOTE(cir): I'm assuming we don't need to change any offsets here.
874
- // Value StorePtr = emitAddressAtOffset(*this, RetVal, RetAI);
875
869
RetVal =
876
870
createCoercedValue (newCallOp.getResult (), RetVal.getType (), *this );
877
871
}
0 commit comments