@@ -291,7 +291,7 @@ void StubCodeCompiler::GenerateExitSafepointStub(Assembler* assembler) {
291
291
// Calls native code within a safepoint.
292
292
//
293
293
// On entry:
294
- // R8 : target to call
294
+ // R9 : target to call
295
295
// Stack: set up for native call (SP), aligned, CSP < SP
296
296
//
297
297
// On exit:
@@ -302,29 +302,29 @@ void StubCodeCompiler::GenerateCallNativeThroughSafepointStub(
302
302
COMPILE_ASSERT ((1 << R19) & kAbiPreservedCpuRegs );
303
303
304
304
__ mov (R19, LR);
305
- __ LoadImmediate (R9 , target::Thread::exit_through_ffi ());
306
- __ TransitionGeneratedToNative (R8 , FPREG, R9 /* volatile*/ ,
305
+ __ LoadImmediate (R10 , target::Thread::exit_through_ffi ());
306
+ __ TransitionGeneratedToNative (R9 , FPREG, R10 /* volatile*/ ,
307
307
/* enter_safepoint=*/ true );
308
308
__ mov (R25, CSP);
309
309
__ mov (CSP, SP);
310
310
311
311
#if defined(DEBUG)
312
312
// Check CSP alignment.
313
- __ andi (R10 /* volatile*/ , SP,
313
+ __ andi (R11 /* volatile*/ , SP,
314
314
Immediate (~(OS::ActivationFrameAlignment () - 1 )));
315
- __ cmp (R10 , Operand (SP));
315
+ __ cmp (R11 , Operand (SP));
316
316
Label done;
317
317
__ b (&done, EQ);
318
318
__ Breakpoint ();
319
319
__ Bind (&done);
320
320
#endif
321
321
322
- __ blr (R8 );
322
+ __ blr (R9 );
323
323
324
324
__ mov (SP, CSP);
325
325
__ mov (CSP, R25);
326
326
327
- __ TransitionNativeToGenerated (R9 , /* leave_safepoint=*/ true );
327
+ __ TransitionNativeToGenerated (R10 , /* leave_safepoint=*/ true );
328
328
__ ret (R19);
329
329
}
330
330
@@ -338,16 +338,16 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
338
338
#else
339
339
Label done;
340
340
341
- // R8 is volatile and not used for passing any arguments.
342
- COMPILE_ASSERT (!IsCalleeSavedRegister (R8 ) && !IsArgumentRegister (R8 ));
341
+ // R9 is volatile and not used for passing any arguments.
342
+ COMPILE_ASSERT (!IsCalleeSavedRegister (R9 ) && !IsArgumentRegister (R9 ));
343
343
for (intptr_t i = 0 ;
344
344
i < NativeCallbackTrampolines::NumCallbackTrampolinesPerPage (); ++i) {
345
345
// We don't use LoadImmediate because we need the trampoline size to be
346
346
// fixed independently of the callback ID.
347
347
//
348
348
// Instead we paste the callback ID directly in the code load it
349
349
// PC-relative.
350
- __ ldr (R8 , compiler::Address::PC (2 * Instr::kInstrSize ));
350
+ __ ldr (R9 , compiler::Address::PC (2 * Instr::kInstrSize ));
351
351
__ b (&done);
352
352
__ Emit (next_callback_id + i);
353
353
}
@@ -362,7 +362,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
362
362
363
363
// The load of the callback ID might have incorrect higher-order bits, since
364
364
// we only emit a 32-bit callback ID.
365
- __ uxtw (R8, R8 );
365
+ __ uxtw (R9, R9 );
366
366
367
367
// Save THR (callee-saved) and LR on real real C stack (CSP). Keeps it
368
368
// aligned.
@@ -374,8 +374,8 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
374
374
RegisterSet all_registers;
375
375
all_registers.AddAllArgumentRegisters ();
376
376
377
- // The call below might clobber R8 (volatile, holding callback_id).
378
- all_registers.Add (Location::RegisterLocation (R8 ));
377
+ // The call below might clobber R9 (volatile, holding callback_id).
378
+ all_registers.Add (Location::RegisterLocation (R9 ));
379
379
380
380
// Load the thread, verify the callback ID and exit the safepoint.
381
381
//
@@ -396,7 +396,7 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
396
396
// loaded anywhere, we use the same trick as before to ensure a predictable
397
397
// instruction sequence.
398
398
Label call;
399
- __ mov (R0, R8 );
399
+ __ mov (R0, R9 );
400
400
__ ldr (R1, compiler::Address::PC (2 * Instr::kInstrSize ));
401
401
__ b (&call);
402
402
@@ -415,29 +415,30 @@ void StubCodeCompiler::GenerateJITCallbackTrampolines(
415
415
__ mov (CSP, SP);
416
416
}
417
417
418
- COMPILE_ASSERT (!IsCalleeSavedRegister (R9 ) && !IsArgumentRegister (R9 ));
418
+ COMPILE_ASSERT (!IsCalleeSavedRegister (R10 ) && !IsArgumentRegister (R10 ));
419
419
420
420
// Load the code object.
421
- __ LoadFromOffset (R9 , THR, compiler::target::Thread::callback_code_offset ());
422
- __ LoadFieldFromOffset (R9, R9 ,
421
+ __ LoadFromOffset (R10 , THR, compiler::target::Thread::callback_code_offset ());
422
+ __ LoadFieldFromOffset (R10, R10 ,
423
423
compiler::target::GrowableObjectArray::data_offset ());
424
- __ ldr (R9, __ ElementAddressForRegIndex (
425
- /* external=*/ false ,
426
- /* array_cid=*/ kArrayCid ,
427
- /* index, smi-tagged=*/ compiler::target::kWordSize * 2 ,
428
- /* index_unboxed=*/ false ,
429
- /* array=*/ R9,
430
- /* index=*/ R8,
431
- /* temp=*/ TMP));
432
- __ LoadFieldFromOffset (R9, R9, compiler::target::Code::entry_point_offset ());
433
-
434
- // Clobbers all volatile registers, including the callback ID in R8.
424
+ __ ldr (R10, __ ElementAddressForRegIndex (
425
+ /* external=*/ false ,
426
+ /* array_cid=*/ kArrayCid ,
427
+ /* index, smi-tagged=*/ compiler::target::kWordSize * 2 ,
428
+ /* index_unboxed=*/ false ,
429
+ /* array=*/ R10,
430
+ /* index=*/ R9,
431
+ /* temp=*/ TMP));
432
+ __ LoadFieldFromOffset (R10, R10,
433
+ compiler::target::Code::entry_point_offset ());
434
+
435
+ // Clobbers all volatile registers, including the callback ID in R9.
435
436
// Resets CSP and SP, important for EnterSafepoint below.
436
- __ blr (R9 );
437
+ __ blr (R10 );
437
438
438
- // EnterSafepoint clobbers TMP, TMP2 and R8 -- all volatile and not holding
439
+ // EnterSafepoint clobbers TMP, TMP2 and R9 -- all volatile and not holding
439
440
// return values.
440
- __ EnterSafepoint (/* scratch=*/ R8 );
441
+ __ EnterSafepoint (/* scratch=*/ R9 );
441
442
442
443
// Pop LR and THR from the real stack (CSP).
443
444
__ ldp (THR, LR, Address (CSP, 2 * target::kWordSize , Address::PairPostIndex));
0 commit comments