@@ -4849,7 +4849,6 @@ void CodeGen::genCheckUseBlockInit()
4849
4849
// so reserve two extra callee saved
4850
4850
// This is better than pushing eax, ecx, because we in the later
4851
4851
// we will mess up already computed offsets on the stack (for ESP frames)
4852
-
4853
4852
regSet.rsSetRegsModified(RBM_EDI);
4854
4853
4855
4854
// For register arguments we may have to save ECX (and RDI on Amd64 System V OSes.)
@@ -5420,23 +5419,43 @@ void CodeGen::genAllocLclFrame(unsigned frameSize,
5420
5419
5421
5420
#else // !CPU_LOAD_STORE_ARCH
5422
5421
5422
+ // Code size for each instruction. We need this because the
5423
+ // backward branch is hard-coded with the number of bytes to branch.
5424
+
5423
5425
// loop:
5426
+ // For x86
5424
5427
// test [esp + eax], eax 3
5425
5428
// sub eax, 0x1000 5
5426
5429
// cmp EAX, -frameSize 5
5427
5430
// jge loop 2
5431
+ //
5432
+ // For AMD64 using RAX
5433
+ // test [rsp + rax], rax 4
5434
+ // sub rax, 0x1000 6
5435
+ // cmp rax, -frameSize 6
5436
+ // jge loop 2
5437
+ //
5438
+ // For AMD64 using RBP
5439
+ // test [rsp + rbp], rbp 4
5440
+ // sub rbp, 0x1000 7
5441
+ // cmp rbp, -frameSize 7
5442
+ // jge loop 2
5428
5443
getEmitter()->emitIns_R_ARR(INS_TEST, EA_PTRSIZE, initReg, REG_SPBASE, initReg, 0);
5429
5444
inst_RV_IV(INS_sub, initReg, CORINFO_PAGE_SIZE, EA_PTRSIZE);
5430
5445
inst_RV_IV(INS_cmp, initReg, -((ssize_t)frameSize), EA_PTRSIZE);
5431
- inst_IV (INS_jge, -15 AMD64_ONLY(-3)); // Branch backwards to Start of Loop
5446
+ int extraBytesForBackJump = 0;
5447
+ #ifdef _TARGET_AMD64_
5448
+ extraBytesForBackJump = ((initReg == REG_EAX) ? 3 : 5);
5449
+ #endif // _TARGET_AMD64_
5450
+ inst_IV(INS_jge, -15 - extraBytesForBackJump); // Branch backwards to Start of Loop
5432
5451
5433
5452
#endif // !CPU_LOAD_STORE_ARCH
5434
5453
5435
5454
*pInitRegZeroed = false; // The initReg does not contain zero
5436
5455
5437
5456
#ifdef _TARGET_XARCH_
5438
- // The backward branch above depends upon using EAX
5439
- assert(initReg == REG_EAX);
5457
+ // The backward branch above depends upon using EAX (and for Amd64 funclets EBP)
5458
+ assert(( initReg == REG_EAX) AMD64_ONLY(|| (initReg == REG_EBP)) );
5440
5459
5441
5460
if (pushedStubParam)
5442
5461
{
0 commit comments