Skip to content

Commit

Permalink
8274527: Minimal VM build fails after JDK-8273459
Browse files Browse the repository at this point in the history
Backport-of: a8edd1b360d4e5f35aff371a91fda42eeb00d395
  • Loading branch information
Sandhya Viswanathan authored and Derek White committed Nov 10, 2022
1 parent ccbf2a9 commit 193ccc4
Show file tree
Hide file tree
Showing 4 changed files with 22 additions and 17 deletions.
6 changes: 5 additions & 1 deletion src/hotspot/cpu/x86/macroAssembler_x86.cpp
Expand Up @@ -1178,6 +1178,10 @@ void MacroAssembler::align64() {
align(64, (unsigned long long) pc());
}

void MacroAssembler::align32() {
align(32, (unsigned long long) pc());
}

void MacroAssembler::align(int modulus) {
// 8273459: Ensure alignment is possible with current segment alignment
assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
Expand Down Expand Up @@ -7123,7 +7127,7 @@ void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Regi
// 128 bits per each of 4 parallel streams.
movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));

align(32);
align32();
BIND(L_fold_512b_loop);
fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/cpu/x86/macroAssembler_x86.hpp
Expand Up @@ -194,6 +194,7 @@ class MacroAssembler: public Assembler {
void incrementq(AddressLiteral dst);

// Alignment
void align32();
void align64();
void align(int modulus);
void align(int modulus, int target);
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/cpu/x86/macroAssembler_x86_adler.cpp
Expand Up @@ -80,7 +80,7 @@ void MacroAssembler::updateBytesAdler32(Register init_d, Register data, Register
cmpptr(data, end);
jcc(Assembler::aboveEqual, SKIP_LOOP_1A);

align(32);
align32();
bind(SLOOP1A);
vbroadcastf128(ydata, Address(data, 0), Assembler::AVX_256bit);
addptr(data, CHUNKSIZE);
Expand Down Expand Up @@ -178,7 +178,7 @@ void MacroAssembler::updateBytesAdler32(Register init_d, Register data, Register
movdl(rax, xb);
addl(b_d, rax);

align(32);
align32();
bind(FINAL_LOOP);
movzbl(rax, Address(data, 0)); //movzx eax, byte[data]
addl(a_d, rax);
Expand Down
28 changes: 14 additions & 14 deletions src/hotspot/cpu/x86/stubGenerator_x86_64.cpp
Expand Up @@ -1484,7 +1484,7 @@ class StubGenerator: public StubCodeGenerator {
__ subq(temp1, loop_size[shift]);

// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64);
Expand Down Expand Up @@ -1551,7 +1551,7 @@ class StubGenerator: public StubCodeGenerator {

// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 0 , true);
__ copy64_avx(to, from, temp4, xmm1, false, shift, 64, true);
Expand Down Expand Up @@ -1691,7 +1691,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_main_pre_loop);

// Main loop with aligned copy block size of 192 bytes at 32 byte granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128);
Expand Down Expand Up @@ -1724,7 +1724,7 @@ class StubGenerator: public StubCodeGenerator {

// Main loop with aligned copy block size of 192 bytes at
// 64 byte copy granularity.
__ align(32);
__ align32();
__ BIND(L_main_loop_64bytes);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -64 , true);
__ copy64_avx(to, from, temp1, xmm1, true, shift, -128, true);
Expand Down Expand Up @@ -4274,7 +4274,7 @@ class StubGenerator: public StubCodeGenerator {

//Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
address generate_pshuffle_byte_flip_mask_sha512() {
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "pshuffle_byte_flip_mask_sha512");
address start = __ pc();
if (VM_Version::supports_avx2()) {
Expand Down Expand Up @@ -5307,7 +5307,7 @@ address generate_avx_ghash_processBlocks() {

address base64_avx2_shuffle_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_shuffle_base64");
address start = __ pc();
__ emit_data64(0x0809070805060405, relocInfo::none);
Expand All @@ -5319,7 +5319,7 @@ address generate_avx_ghash_processBlocks() {

address base64_avx2_input_mask_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_input_mask_base64");
address start = __ pc();
__ emit_data64(0x8000000000000000, relocInfo::none);
Expand All @@ -5331,7 +5331,7 @@ address generate_avx_ghash_processBlocks() {

address base64_avx2_lut_addr()
{
__ align(32);
__ align32();
StubCodeMark mark(this, "StubRoutines", "avx2_lut_base64");
address start = __ pc();
__ emit_data64(0xfcfcfcfcfcfc4741, relocInfo::none);
Expand Down Expand Up @@ -5436,7 +5436,7 @@ address generate_avx_ghash_processBlocks() {
__ evmovdquq(xmm2, Address(encode_table, 0), Assembler::AVX_512bit);
__ evpbroadcastq(xmm1, rax, Assembler::AVX_512bit);

__ align(32);
__ align32();
__ BIND(L_vbmiLoop);

__ vpermb(xmm0, xmm3, Address(source, start_offset), Assembler::AVX_512bit);
Expand Down Expand Up @@ -5636,7 +5636,7 @@ address generate_avx_ghash_processBlocks() {
__ cmpl(length, 31);
__ jcc(Assembler::belowEqual, L_process3);

__ align(32);
__ align32();
__ BIND(L_32byteLoop);

// Get next 32 bytes
Expand Down Expand Up @@ -6083,7 +6083,7 @@ address generate_avx_ghash_processBlocks() {
__ evmovdquq(join12, ExternalAddress(StubRoutines::x86::base64_vbmi_join_1_2_addr()), Assembler::AVX_512bit, r13);
__ evmovdquq(join23, ExternalAddress(StubRoutines::x86::base64_vbmi_join_2_3_addr()), Assembler::AVX_512bit, r13);

__ align(32);
__ align32();
__ BIND(L_process256);
// Grab input data
__ evmovdquq(input0, Address(source, start_offset, Address::times_1, 0x00), Assembler::AVX_512bit);
Expand Down Expand Up @@ -6165,7 +6165,7 @@ address generate_avx_ghash_processBlocks() {
__ cmpl(length, 63);
__ jcc(Assembler::lessEqual, L_finalBit);

__ align(32);
__ align32();
__ BIND(L_process64Loop);

// Handle first 64-byte block
Expand Down Expand Up @@ -6301,7 +6301,7 @@ address generate_avx_ghash_processBlocks() {
__ shrq(rax, 1);
__ jmp(L_donePadding);

__ align(32);
__ align32();
__ BIND(L_bruteForce);
} // End of if(avx512_vbmi)

Expand Down Expand Up @@ -6345,7 +6345,7 @@ address generate_avx_ghash_processBlocks() {

__ jmp(L_bottomLoop);

__ align(32);
__ align32();
__ BIND(L_forceLoop);
__ shll(byte1, 18);
__ shll(byte2, 12);
Expand Down

0 comments on commit 193ccc4

Please sign in to comment.