@@ -67,9 +67,7 @@ void NativeCall::print() {
67
67
// Inserts a native call instruction at a given pc
68
68
void NativeCall::insert (address code_pos, address entry) {
69
69
intptr_t disp = (intptr_t )entry - ((intptr_t )code_pos + 1 + 4 );
70
- #ifdef AMD64
71
70
guarantee (disp == (intptr_t )(jint)disp, " must be 32-bit offset" );
72
- #endif // AMD64
73
71
*code_pos = instruction_code;
74
72
*((int32_t *)(code_pos+1 )) = (int32_t ) disp;
75
73
ICache::invalidate_range (code_pos, instruction_size);
@@ -157,7 +155,6 @@ void NativeCall::set_destination_mt_safe(address dest) {
157
155
158
156
159
157
void NativeMovConstReg::verify () {
160
- #ifdef AMD64
161
158
// make sure code pattern is actually a mov reg64, imm64 instruction
162
159
bool valid_rex_prefix = ubyte_at (0 ) == Assembler::REX_W || ubyte_at (0 ) == Assembler::REX_WB;
163
160
bool valid_rex2_prefix = ubyte_at (0 ) == Assembler::REX2 &&
@@ -169,12 +166,6 @@ void NativeMovConstReg::verify() {
169
166
print ();
170
167
fatal (" not a REX.W[B] mov reg64, imm64" );
171
168
}
172
- #else
173
- // make sure code pattern is actually a mov reg, imm32 instruction
174
- u_char test_byte = *(u_char *)instruction_address ();
175
- u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
176
- if (test_byte_2 != instruction_code) fatal (" not a mov reg, imm32" );
177
- #endif // AMD64
178
169
}
179
170
180
171
@@ -192,12 +183,10 @@ int NativeMovRegMem::instruction_start() const {
192
183
// See comment in Assembler::locate_operand() about VEX prefixes.
193
184
if (instr_0 == instruction_VEX_prefix_2bytes) {
194
185
assert ((UseAVX > 0 ), " shouldn't have VEX prefix" );
195
- NOT_LP64 (assert ((0xC0 & ubyte_at (1 )) == 0xC0 , " shouldn't have LDS and LES instructions" ));
196
186
return 2 ;
197
187
}
198
188
if (instr_0 == instruction_VEX_prefix_3bytes) {
199
189
assert ((UseAVX > 0 ), " shouldn't have VEX prefix" );
200
- NOT_LP64 (assert ((0xC0 & ubyte_at (1 )) == 0xC0 , " shouldn't have LDS and LES instructions" ));
201
190
return 3 ;
202
191
}
203
192
if (instr_0 == instruction_EVEX_prefix_4bytes) {
@@ -313,8 +302,7 @@ void NativeMovRegMem::print() {
313
302
void NativeLoadAddress::verify () {
314
303
// make sure code pattern is actually a mov [reg+offset], reg instruction
315
304
u_char test_byte = *(u_char *)instruction_address ();
316
- if ( ! ((test_byte == lea_instruction_code)
317
- LP64_ONLY (|| (test_byte == mov64_instruction_code) ))) {
305
+ if ((test_byte != lea_instruction_code) && (test_byte != mov64_instruction_code)) {
318
306
fatal (" not a lea reg, [reg+offs] instruction" );
319
307
}
320
308
}
@@ -340,9 +328,7 @@ void NativeJump::verify() {
340
328
341
329
void NativeJump::insert (address code_pos, address entry) {
342
330
intptr_t disp = (intptr_t )entry - ((intptr_t )code_pos + 1 + 4 );
343
- #ifdef AMD64
344
331
guarantee (disp == (intptr_t )(int32_t )disp, " must be 32-bit offset" );
345
- #endif // AMD64
346
332
347
333
*code_pos = instruction_code;
348
334
*((int32_t *)(code_pos + 1 )) = (int32_t )disp;
@@ -355,11 +341,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
355
341
// in use. The patching in that instance must happen only when certain
356
342
// alignment restrictions are true. These guarantees check those
357
343
// conditions.
358
- #ifdef AMD64
359
344
const int linesize = 64 ;
360
- #else
361
- const int linesize = 32 ;
362
- #endif // AMD64
363
345
364
346
// Must be wordSize aligned
365
347
guarantee (((uintptr_t ) verified_entry & (wordSize -1 )) == 0 ,
@@ -386,7 +368,6 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
386
368
//
387
369
void NativeJump::patch_verified_entry (address entry, address verified_entry, address dest) {
388
370
// complete jump instruction (to be inserted) is in code_buffer;
389
- #ifdef _LP64
390
371
union {
391
372
jlong cb_long;
392
373
unsigned char code_buffer[8 ];
@@ -402,43 +383,6 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
402
383
403
384
Atomic::store ((jlong *) verified_entry, u.cb_long );
404
385
ICache::invalidate_range (verified_entry, 8 );
405
-
406
- #else
407
- unsigned char code_buffer[5 ];
408
- code_buffer[0 ] = instruction_code;
409
- intptr_t disp = (intptr_t )dest - ((intptr_t )verified_entry + 1 + 4 );
410
- *(int32_t *)(code_buffer + 1 ) = (int32_t )disp;
411
-
412
- check_verified_entry_alignment (entry, verified_entry);
413
-
414
- // Can't call nativeJump_at() because it's asserts jump exists
415
- NativeJump* n_jump = (NativeJump*) verified_entry;
416
-
417
- // First patch dummy jmp in place
418
-
419
- unsigned char patch[4 ];
420
- assert (sizeof (patch)==sizeof (int32_t ), " sanity check" );
421
- patch[0 ] = 0xEB ; // jmp rel8
422
- patch[1 ] = 0xFE ; // jmp to self
423
- patch[2 ] = 0xEB ;
424
- patch[3 ] = 0xFE ;
425
-
426
- // First patch dummy jmp in place
427
- *(int32_t *)verified_entry = *(int32_t *)patch;
428
-
429
- n_jump->wrote (0 );
430
-
431
- // Patch 5th byte (from jump instruction)
432
- verified_entry[4 ] = code_buffer[4 ];
433
-
434
- n_jump->wrote (4 );
435
-
436
- // Patch bytes 0-3 (from jump instruction)
437
- *(int32_t *)verified_entry = *(int32_t *)code_buffer;
438
- // Invalidate. Opteron requires a flush after every write.
439
- n_jump->wrote (0 );
440
- #endif // _LP64
441
-
442
386
}
443
387
444
388
void NativeIllegalInstruction::insert (address code_pos) {
@@ -455,9 +399,7 @@ void NativeGeneralJump::verify() {
455
399
456
400
void NativeGeneralJump::insert_unconditional (address code_pos, address entry) {
457
401
intptr_t disp = (intptr_t )entry - ((intptr_t )code_pos + 1 + 4 );
458
- #ifdef AMD64
459
402
guarantee (disp == (intptr_t )(int32_t )disp, " must be 32-bit offset" );
460
- #endif // AMD64
461
403
462
404
*code_pos = unconditional_long_jump;
463
405
*((int32_t *)(code_pos+1 )) = (int32_t ) disp;
0 commit comments