Skip to content

Commit 6a0c24f

Browse files
mhaessigrobcasloz
authored andcommitted
8355472: Clean up x86 nativeInst after 32-bit x86 removal
Reviewed-by: shade, rcastanedalo, jwaters
1 parent 31e70e4 commit 6a0c24f

File tree

2 files changed

+1
-82
lines changed

2 files changed

+1
-82
lines changed

src/hotspot/cpu/x86/nativeInst_x86.cpp

+1-59
Original file line numberDiff line numberDiff line change
@@ -67,9 +67,7 @@ void NativeCall::print() {
6767
// Inserts a native call instruction at a given pc
6868
void NativeCall::insert(address code_pos, address entry) {
6969
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
70-
#ifdef AMD64
7170
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
72-
#endif // AMD64
7371
*code_pos = instruction_code;
7472
*((int32_t *)(code_pos+1)) = (int32_t) disp;
7573
ICache::invalidate_range(code_pos, instruction_size);
@@ -157,7 +155,6 @@ void NativeCall::set_destination_mt_safe(address dest) {
157155

158156

159157
void NativeMovConstReg::verify() {
160-
#ifdef AMD64
161158
// make sure code pattern is actually a mov reg64, imm64 instruction
162159
bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB;
163160
bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 &&
@@ -169,12 +166,6 @@ void NativeMovConstReg::verify() {
169166
print();
170167
fatal("not a REX.W[B] mov reg64, imm64");
171168
}
172-
#else
173-
// make sure code pattern is actually a mov reg, imm32 instruction
174-
u_char test_byte = *(u_char*)instruction_address();
175-
u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
176-
if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
177-
#endif // AMD64
178169
}
179170

180171

@@ -192,12 +183,10 @@ int NativeMovRegMem::instruction_start() const {
192183
// See comment in Assembler::locate_operand() about VEX prefixes.
193184
if (instr_0 == instruction_VEX_prefix_2bytes) {
194185
assert((UseAVX > 0), "shouldn't have VEX prefix");
195-
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
196186
return 2;
197187
}
198188
if (instr_0 == instruction_VEX_prefix_3bytes) {
199189
assert((UseAVX > 0), "shouldn't have VEX prefix");
200-
NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
201190
return 3;
202191
}
203192
if (instr_0 == instruction_EVEX_prefix_4bytes) {
@@ -313,8 +302,7 @@ void NativeMovRegMem::print() {
313302
void NativeLoadAddress::verify() {
314303
// make sure code pattern is actually a mov [reg+offset], reg instruction
315304
u_char test_byte = *(u_char*)instruction_address();
316-
if ( ! ((test_byte == lea_instruction_code)
317-
LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
305+
if ((test_byte != lea_instruction_code) && (test_byte != mov64_instruction_code)) {
318306
fatal ("not a lea reg, [reg+offs] instruction");
319307
}
320308
}
@@ -340,9 +328,7 @@ void NativeJump::verify() {
340328

341329
void NativeJump::insert(address code_pos, address entry) {
342330
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
343-
#ifdef AMD64
344331
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
345-
#endif // AMD64
346332

347333
*code_pos = instruction_code;
348334
*((int32_t*)(code_pos + 1)) = (int32_t)disp;
@@ -355,11 +341,7 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
355341
// in use. The patching in that instance must happen only when certain
356342
// alignment restrictions are true. These guarantees check those
357343
// conditions.
358-
#ifdef AMD64
359344
const int linesize = 64;
360-
#else
361-
const int linesize = 32;
362-
#endif // AMD64
363345

364346
// Must be wordSize aligned
365347
guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
@@ -386,7 +368,6 @@ void NativeJump::check_verified_entry_alignment(address entry, address verified_
386368
//
387369
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
388370
// complete jump instruction (to be inserted) is in code_buffer;
389-
#ifdef _LP64
390371
union {
391372
jlong cb_long;
392373
unsigned char code_buffer[8];
@@ -402,43 +383,6 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
402383

403384
Atomic::store((jlong *) verified_entry, u.cb_long);
404385
ICache::invalidate_range(verified_entry, 8);
405-
406-
#else
407-
unsigned char code_buffer[5];
408-
code_buffer[0] = instruction_code;
409-
intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
410-
*(int32_t*)(code_buffer + 1) = (int32_t)disp;
411-
412-
check_verified_entry_alignment(entry, verified_entry);
413-
414-
// Can't call nativeJump_at() because it's asserts jump exists
415-
NativeJump* n_jump = (NativeJump*) verified_entry;
416-
417-
//First patch dummy jmp in place
418-
419-
unsigned char patch[4];
420-
assert(sizeof(patch)==sizeof(int32_t), "sanity check");
421-
patch[0] = 0xEB; // jmp rel8
422-
patch[1] = 0xFE; // jmp to self
423-
patch[2] = 0xEB;
424-
patch[3] = 0xFE;
425-
426-
// First patch dummy jmp in place
427-
*(int32_t*)verified_entry = *(int32_t *)patch;
428-
429-
n_jump->wrote(0);
430-
431-
// Patch 5th byte (from jump instruction)
432-
verified_entry[4] = code_buffer[4];
433-
434-
n_jump->wrote(4);
435-
436-
// Patch bytes 0-3 (from jump instruction)
437-
*(int32_t*)verified_entry = *(int32_t *)code_buffer;
438-
// Invalidate. Opteron requires a flush after every write.
439-
n_jump->wrote(0);
440-
#endif // _LP64
441-
442386
}
443387

444388
void NativeIllegalInstruction::insert(address code_pos) {
@@ -455,9 +399,7 @@ void NativeGeneralJump::verify() {
455399

456400
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
457401
intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
458-
#ifdef AMD64
459402
guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
460-
#endif // AMD64
461403

462404
*code_pos = unconditional_long_jump;
463405
*((int32_t *)(code_pos+1)) = (int32_t) disp;

src/hotspot/cpu/x86/nativeInst_x86.hpp

-23
Original file line numberDiff line numberDiff line change
@@ -126,10 +126,8 @@ class NativeCall: public NativeInstruction {
126126
address return_address() const { return addr_at(return_address_offset); }
127127
address destination() const;
128128
void set_destination(address dest) {
129-
#ifdef AMD64
130129
intptr_t disp = dest - return_address();
131130
guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
132-
#endif // AMD64
133131
set_int_at(displacement_offset, (int)(dest - return_address()));
134132
}
135133
// Returns whether the 4-byte displacement operand is 4-byte aligned.
@@ -211,15 +209,9 @@ class NativeCallReg: public NativeInstruction {
211209
// Instruction format for implied addressing mode immediate operand move to register instruction:
212210
// [REX/REX2] [OPCODE] [IMM32]
213211
class NativeMovConstReg: public NativeInstruction {
214-
#ifdef AMD64
215212
static const bool has_rex = true;
216213
static const int rex_size = 1;
217214
static const int rex2_size = 2;
218-
#else
219-
static const bool has_rex = false;
220-
static const int rex_size = 0;
221-
static const int rex2_size = 0;
222-
#endif // AMD64
223215
public:
224216
enum Intel_specific_constants {
225217
instruction_code = 0xB8,
@@ -390,13 +382,8 @@ inline NativeMovRegMem* nativeMovRegMem_at (address address) {
390382
// leal reg, [reg + offset]
391383

392384
class NativeLoadAddress: public NativeMovRegMem {
393-
#ifdef AMD64
394385
static const bool has_rex = true;
395386
static const int rex_size = 1;
396-
#else
397-
static const bool has_rex = false;
398-
static const int rex_size = 0;
399-
#endif // AMD64
400387
public:
401388
enum Intel_specific_constants {
402389
instruction_prefix_wide = Assembler::REX_W,
@@ -447,9 +434,7 @@ class NativeJump: public NativeInstruction {
447434
if (dest == (address) -1) {
448435
val = -5; // jump to self
449436
}
450-
#ifdef AMD64
451437
assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
452-
#endif // AMD64
453438
set_int_at(data_offset, (jint)val);
454439
}
455440

@@ -572,19 +557,14 @@ inline bool NativeInstruction::is_jump_reg() {
572557
inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
573558
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
574559
inline bool NativeInstruction::is_safepoint_poll() {
575-
#ifdef AMD64
576560
const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
577561
const int test_offset = has_rex2_prefix() ? 2 : (has_rex_prefix ? 1 : 0);
578-
#else
579-
const int test_offset = 0;
580-
#endif
581562
const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl;
582563
const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
583564
return is_test_opcode && is_rax_target;
584565
}
585566

586567
inline bool NativeInstruction::is_mov_literal64() {
587-
#ifdef AMD64
588568
bool valid_rex_prefix = ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB;
589569
bool valid_rex2_prefix = ubyte_at(0) == Assembler::REX2 &&
590570
(ubyte_at(1) == Assembler::REX2BIT_W ||
@@ -593,9 +573,6 @@ inline bool NativeInstruction::is_mov_literal64() {
593573

594574
int opcode = has_rex2_prefix() ? ubyte_at(2) : ubyte_at(1);
595575
return ((valid_rex_prefix || valid_rex2_prefix) && (opcode & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
596-
#else
597-
return false;
598-
#endif // AMD64
599576
}
600577

601578
class NativePostCallNop: public NativeInstruction {

0 commit comments

Comments
 (0)