Skip to content
Permalink

Comparing changes

Choose two branches to see what’s changed or to start a new pull request. If you need to, you can also or learn more about diff comparisons.

Open a pull request

Create a new pull request by comparing changes across two branches. If you need to, you can also . Learn more about diff comparisons here.
base repository: openjdk/jdk
Failed to load repositories. Confirm that selected base ref is valid, then try again.
Loading
base: f00a748b
Choose a base ref
...
head repository: openjdk/jdk
Failed to load repositories. Confirm that selected head ref is valid, then try again.
Loading
compare: ce19812e
Choose a head ref
  • 14 commits
  • 39 files changed
  • 1 contributor

Commits on Jan 17, 2023

  1. Verified

    This commit was signed with the committer’s verified signature.
    nanasess Kentaro Ohkouchi
    Copy the full SHA
    d3ae96c View commit details

Commits on Jan 20, 2023

  1. Review fixes

    tkrodriguez committed Jan 20, 2023

    Verified

    This commit was signed with the committer’s verified signature.
    nanasess Kentaro Ohkouchi
    Copy the full SHA
    fe8f0c8 View commit details
  2. Verified

    This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
    Copy the full SHA
    f8b2600 View commit details
  3. Copy the full SHA
    6b5ac41 View commit details

Commits on Jan 23, 2023

  1. Add missing declaration

    tkrodriguez committed Jan 23, 2023
    Copy the full SHA
    d1d1742 View commit details
  2. Copy the full SHA
    eb28d3c View commit details
  3. Copy the full SHA
    05f5532 View commit details

Commits on Apr 20, 2023

  1. Copy the full SHA
    653d9f4 View commit details
  2. Copy the full SHA
    ac13515 View commit details
  3. Copy the full SHA
    5292631 View commit details

Commits on Apr 24, 2023

  1. Copy the full SHA
    c7bb439 View commit details

Commits on Apr 27, 2023

  1. Copy the full SHA
    1bbd3a7 View commit details

Commits on May 1, 2023

  1. Copy the full SHA
    63102cb View commit details
  2. Copy the full SHA
    ce19812 View commit details
Showing with 1,026 additions and 323 deletions.
  1. +1 −1 src/hotspot/cpu/aarch64/assembler_aarch64.hpp
  2. +67 −35 src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
  3. +0 −2 src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
  4. +14 −4 src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp
  5. +0 −2 src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
  6. +61 −34 src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
  7. +0 −2 src/hotspot/cpu/s390/globalDefinitions_s390.hpp
  8. +47 −20 src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp
  9. +0 −6 src/hotspot/cpu/x86/globalDefinitions_x86.hpp
  10. +9 −0 src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp
  11. +0 −2 src/hotspot/cpu/zero/globalDefinitions_zero.hpp
  12. +12 −12 src/hotspot/share/code/nmethod.cpp
  13. +4 −6 src/hotspot/share/code/nmethod.hpp
  14. +12 −3 src/hotspot/share/gc/shared/barrierSetNMethod.cpp
  15. +7 −0 src/hotspot/share/gc/shared/barrierSetNMethod.hpp
  16. +28 −2 src/hotspot/share/jvmci/jvmciCodeInstaller.cpp
  17. +2 −0 src/hotspot/share/jvmci/jvmciCodeInstaller.hpp
  18. +100 −31 src/hotspot/share/jvmci/jvmciCompilerToVM.cpp
  19. +21 −0 src/hotspot/share/jvmci/jvmciCompilerToVM.hpp
  20. +50 −0 src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp
  21. +13 −4 src/hotspot/share/jvmci/jvmciEnv.cpp
  22. +4 −0 src/hotspot/share/jvmci/jvmciEnv.hpp
  23. +3 −0 src/hotspot/share/jvmci/jvmciJavaClasses.hpp
  24. +37 −22 src/hotspot/share/jvmci/jvmciRuntime.cpp
  25. +55 −11 src/hotspot/share/jvmci/jvmciRuntime.hpp
  26. +1 −1 src/hotspot/share/jvmci/jvmci_globals.cpp
  27. +24 −0 src/hotspot/share/jvmci/vmStructs_jvmci.cpp
  28. +2 −1 src/hotspot/share/jvmci/vmSymbols_jvmci.hpp
  29. +4 −0 src/hotspot/share/oops/klass.hpp
  30. +9 −22 src/hotspot/share/runtime/arguments.cpp
  31. +23 −10 src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/CompilerToVM.java
  32. +1 −1 src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotConstantPool.java
  33. +13 −30 src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotMethodData.java
  34. +21 −33 src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotProfilingInfo.java
  35. +1 −18 src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java
  36. +5 −2 src/jdk.internal.vm.ci/share/classes/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java
  37. +0 −5 .../jtreg/compiler/jvmci/common/patches/jdk.internal.vm.ci/jdk/vm/ci/hotspot/CompilerToVMHelper.java
  38. +1 −1 test/hotspot/jtreg/compiler/jvmci/jdk.vm.ci.code.test/src/jdk/vm/ci/code/test/TestAssembler.java
  39. +374 −0 test/hotspot/jtreg/compiler/jvmci/meta/ProfilingInfoTest.java
2 changes: 1 addition & 1 deletion src/hotspot/cpu/aarch64/assembler_aarch64.hpp
Original file line number Diff line number Diff line change
@@ -228,7 +228,7 @@ class Instruction_aarch64 {
static void spatch(address a, int msb, int lsb, int64_t val) {
int nbits = msb - lsb + 1;
int64_t chk = val >> (nbits - 1);
guarantee (chk == -1 || chk == 0, "Field too big for insn");
guarantee (chk == -1 || chk == 0, "Field too big for insn at " INTPTR_FORMAT, p2i(a));
unsigned uval = val;
unsigned mask = checked_cast<unsigned>(right_n_bits(nbits));
uval &= mask;
102 changes: 67 additions & 35 deletions src/hotspot/cpu/aarch64/gc/shared/barrierSetNMethod_aarch64.cpp
Original file line number Diff line number Diff line change
@@ -35,14 +35,18 @@
#include "runtime/sharedRuntime.hpp"
#include "runtime/registerMap.hpp"
#include "utilities/align.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/debug.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#endif

static int slow_path_size(nmethod* nm) {
// The slow path code is out of line with C2
return nm->is_compiled_by_c2() ? 0 : 6;
}

// This is the offset of the entry barrier from where the frame is completed.
// This is the offset of the entry barrier relative to where the frame is completed.
// If any code changes between the end of the verified entry where the entry
// barrier resides, and the completion of the frame, then
// NativeNMethodCmpBarrier::verify() will immediately complain when it does
@@ -62,40 +66,67 @@ static int entry_barrier_offset(nmethod* nm) {
return 0;
}

class NativeNMethodBarrier: public NativeInstruction {
address instruction_address() const { return addr_at(0); }
class NativeNMethodBarrier {
address _instruction_address;
int* _guard_addr;
nmethod* _nm;

address instruction_address() const { return _instruction_address; }

int *guard_addr() {
return _guard_addr;
}

int local_guard_offset(nmethod* nm) {
// It's the last instruction
return (-entry_barrier_offset(nm)) - 4;
}

int *guard_addr(nmethod* nm) {
if (nm->is_compiled_by_c2()) {
// With c2 compiled code, the guard is out-of-line in a stub
// We find it using the RelocIterator.
RelocIterator iter(nm);
while (iter.next()) {
if (iter.type() == relocInfo::entry_guard_type) {
entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
return reinterpret_cast<int*>(reloc->addr());
public:
NativeNMethodBarrier(nmethod* nm): _nm(nm) {
#if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci()) {
address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
RelocIterator iter(nm, pc, pc + 4);
guarantee(iter.next(), "missing relocs");
guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");

_guard_addr = (int*) iter.section_word_reloc()->target();
_instruction_address = pc;
} else
#endif
{
_instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
if (nm->is_compiled_by_c2()) {
// With c2 compiled code, the guard is out-of-line in a stub
// We find it using the RelocIterator.
RelocIterator iter(nm);
while (iter.next()) {
if (iter.type() == relocInfo::entry_guard_type) {
entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
_guard_addr = reinterpret_cast<int*>(reloc->addr());
return;
}
}
ShouldNotReachHere();
}
_guard_addr = reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
}
ShouldNotReachHere();
}
return reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
}

public:
int get_value(nmethod* nm) {
return Atomic::load_acquire(guard_addr(nm));
int get_value() {
return Atomic::load_acquire(guard_addr());
}

void set_value(nmethod* nm, int value) {
Atomic::release_store(guard_addr(nm), value);
void set_value(int value) {
Atomic::release_store(guard_addr(), value);
}

void verify() const;
bool check_barrier(FormatBuffer<>& msg) const;
void verify() const {
FormatBuffer<> msg("%s", "");
assert(check_barrier(msg), "%s", msg.buffer());
}
};

// Store the instruction bitmask, bits and name for checking the barrier.
@@ -107,13 +138,14 @@ struct CheckInsn {

// The first instruction of the nmethod entry barrier is an ldr (literal)
// instruction. Verify that it's really there, so the offsets are not skewed.
void NativeNMethodBarrier::verify() const {
bool NativeNMethodBarrier::check_barrier(FormatBuffer<>& msg) const {
uint32_t* addr = (uint32_t*) instruction_address();
uint32_t inst = *addr;
if ((inst & 0xff000000) != 0x18000000) {
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", (intptr_t)addr, inst);
fatal("not an ldr (literal) instruction.");
msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x not an ldr", p2i(addr), inst);
return false;
}
return true;
}


@@ -156,13 +188,6 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
}

static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
return barrier;
}

void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
@@ -179,15 +204,22 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
bs_asm->increment_patching_epoch();
}

NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(nm, value);
NativeNMethodBarrier barrier(nm);
barrier.set_value(value);
}

int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return disarmed_guard_value();
}

NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
return barrier->get_value(nm);
NativeNMethodBarrier barrier(nm);
return barrier.get_value();
}

#if INCLUDE_JVMCI
bool BarrierSetNMethod::verify_barrier(nmethod* nm, FormatBuffer<>& msg) {
NativeNMethodBarrier barrier(nm);
return barrier.check_barrier(msg);
}
#endif
2 changes: 0 additions & 2 deletions src/hotspot/cpu/aarch64/globalDefinitions_aarch64.hpp
Original file line number Diff line number Diff line change
@@ -58,8 +58,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;

#define SUPPORT_RESERVED_STACK_AREA

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false

#if defined(__APPLE__) || defined(_WIN64)
#define R18_RESERVED
#define R18_RESERVED_ONLY(code) code
18 changes: 14 additions & 4 deletions src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp
Original file line number Diff line number Diff line change
@@ -122,27 +122,28 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
}

void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& method, jint pc_offset, JVMCI_TRAPS) {
NativeCall* call = NULL;
switch (_next_call_type) {
case INLINE_INVOKE:
break;
return;
case INVOKEVIRTUAL:
case INVOKEINTERFACE: {
assert(!method->is_static(), "cannot call static method with invokeinterface");
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
call = nativeCall_at(_instructions->start() + pc_offset);
_instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_virtual_call_stub(), JVMCI_CHECK);
break;
}
case INVOKESTATIC: {
assert(method->is_static(), "cannot call non-static method with invokestatic");
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
call = nativeCall_at(_instructions->start() + pc_offset);
_instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_static_call_stub(), JVMCI_CHECK);
break;
}
case INVOKESPECIAL: {
assert(!method->is_static(), "cannot call static method with invokespecial");
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
call = nativeCall_at(_instructions->start() + pc_offset);
_instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_opt_virtual_call_stub(), JVMCI_CHECK);
break;
@@ -151,6 +152,15 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& metho
JVMCI_ERROR("invalid _next_call_type value");
break;
}
if (Continuations::enabled()) {
// Check for proper post_call_nop
NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address());
if (nop == NULL) {
JVMCI_ERROR("missing post call nop at offset %d", pc_offset);
} else {
_instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type);
}
}
}

void CodeInstaller::pd_relocate_poll(address pc, jint mark, JVMCI_TRAPS) {
2 changes: 0 additions & 2 deletions src/hotspot/cpu/ppc/globalDefinitions_ppc.hpp
Original file line number Diff line number Diff line change
@@ -59,6 +59,4 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// Define the condition to use this -XX flag.
#define USE_POLL_BIT_ONLY UseSIGTRAP

#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false

#endif // CPU_PPC_GLOBALDEFINITIONS_PPC_HPP
95 changes: 61 additions & 34 deletions src/hotspot/cpu/riscv/gc/shared/barrierSetNMethod_riscv.cpp
Original file line number Diff line number Diff line change
@@ -36,6 +36,9 @@
#include "runtime/registerMap.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
#endif

static int slow_path_size(nmethod* nm) {
// The slow path code is out of line with C2.
@@ -57,40 +60,63 @@ static int entry_barrier_offset(nmethod* nm) {
return 0;
}

class NativeNMethodBarrier: public NativeInstruction {
address instruction_address() const { return addr_at(0); }
class NativeNMethodBarrier {
address _instruction_address;
int* _guard_addr;
nmethod* _nm;

address instruction_address() const { return _instruction_address; }

int *guard_addr() {
return _guard_addr;
}

int local_guard_offset(nmethod* nm) {
// It's the last instruction
return (-entry_barrier_offset(nm)) - 4;
}

int *guard_addr(nmethod* nm) {
if (nm->is_compiled_by_c2()) {
// With c2 compiled code, the guard is out-of-line in a stub
// We find it using the RelocIterator.
RelocIterator iter(nm);
while (iter.next()) {
if (iter.type() == relocInfo::entry_guard_type) {
entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
return reinterpret_cast<int*>(reloc->addr());
public:
NativeNMethodBarrier(nmethod* nm): _nm(nm) {
address barrier_address;
#if INCLUDE_JVMCI
if (nm->is_compiled_by_jvmci()) {
_instruction_address = nm->code_begin() + nm->frame_complete_offset();
_guard_addr = reinterpret_cast<int*>(nm->consts_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset());
} else
#endif
{
_instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
if (nm->is_compiled_by_c2()) {
// With c2 compiled code, the guard is out-of-line in a stub
// We find it using the RelocIterator.
RelocIterator iter(nm);
while (iter.next()) {
if (iter.type() == relocInfo::entry_guard_type) {
entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
_guard_addr = reinterpret_cast<int*>(reloc->addr());
return;
}
}
ShouldNotReachHere();
}
_guard_addr = reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
}
ShouldNotReachHere();
}
return reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
}

public:
int get_value(nmethod* nm) {
return Atomic::load_acquire(guard_addr(nm));
int get_value() {
return Atomic::load_acquire(guard_addr());
}

void set_value(nmethod* nm, int value) {
Atomic::release_store(guard_addr(nm), value);
void set_value(int value) {
Atomic::release_store(guard_addr(), value);
}

void verify() const;
bool check_barrier(FormatBuffer<>& msg) const;
void verify() const {
FormatBuffer<> msg("%s", "");
assert(check_barrier(msg), "%s", msg.buffer());
}
};

// Store the instruction bitmask, bits and name for checking the barrier.
@@ -112,16 +138,17 @@ static const struct CheckInsn barrierInsn[] = {
// The encodings must match the instructions emitted by
// BarrierSetAssembler::nmethod_entry_barrier. The matching ignores the specific
// register numbers and immediate values in the encoding.
void NativeNMethodBarrier::verify() const {
bool NativeNMethodBarrier::check_barrier(FormatBuffer<>& msg) const {
intptr_t addr = (intptr_t) instruction_address();
for(unsigned int i = 0; i < sizeof(barrierInsn)/sizeof(struct CheckInsn); i++ ) {
uint32_t inst = *((uint32_t*) addr);
if ((inst & barrierInsn[i].mask) != barrierInsn[i].bits) {
tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", addr, inst);
fatal("not an %s instruction.", barrierInsn[i].name);
msg.print("Addr: " INTPTR_FORMAT " Code: 0x%x not an %s instruction", addr, inst, barrierInsn[i].name);
return false;
}
addr += 4;
}
return true;
}


@@ -164,13 +191,6 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
}

static NativeNMethodBarrier* native_nmethod_barrier(nmethod* nm) {
address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
NativeNMethodBarrier* barrier = reinterpret_cast<NativeNMethodBarrier*>(barrier_address);
debug_only(barrier->verify());
return barrier;
}

void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
if (!supports_entry_barrier(nm)) {
return;
@@ -187,15 +207,22 @@ void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
bs_asm->increment_patching_epoch();
}

NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
barrier->set_value(nm, value);
NativeNMethodBarrier barrier(nm);
barrier.set_value(value);
}

int BarrierSetNMethod::guard_value(nmethod* nm) {
if (!supports_entry_barrier(nm)) {
return disarmed_guard_value();
}

NativeNMethodBarrier* barrier = native_nmethod_barrier(nm);
return barrier->get_value(nm);
NativeNMethodBarrier barrier(nm);
return barrier.get_value();
}

#if INCLUDE_JVMCI
bool BarrierSetNMethod::verify_barrier(nmethod* nm, FormatBuffer<>& msg) {
NativeNMethodBarrier barrier(nm);
return barrier.check_barrier(msg);
}
#endif
Loading