Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8294902: Undefined Behavior in C2 regalloc with null references #1106

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/hotspot/share/memory/arena.cpp
Expand Up @@ -419,10 +419,10 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail

// Determine if pointer belongs to this Arena or not.
bool Arena::contains( const void *ptr ) const {
if (_chunk == NULL) return false;
#ifdef ASSERT
if (UseMallocOnly) {
// really slow, but not easy to make fast
if (_chunk == NULL) return false;
char** bottom = (char**)_chunk->bottom();
for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
if (*p == ptr) return true;
Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/share/opto/bytecodeInfo.cpp
Expand Up @@ -43,7 +43,7 @@ InlineTree::InlineTree(Compile* c,
JVMState* caller_jvms, int caller_bci,
int max_inline_level) :
C(c),
_caller_jvms(caller_jvms),
_caller_jvms(NULL),
_method(callee),
_caller_tree((InlineTree*) caller_tree),
_count_inline_bcs(method()->code_size_for_inlining()),
Expand All @@ -55,13 +55,13 @@ InlineTree::InlineTree(Compile* c,
_count_inlines = 0;
_forced_inline = false;
#endif
if (_caller_jvms != NULL) {
if (caller_jvms != NULL) {
// Keep a private copy of the caller_jvms:
_caller_jvms = new (C) JVMState(caller_jvms->method(), caller_tree->caller_jvms());
_caller_jvms->set_bci(caller_jvms->bci());
assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining");
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
}
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
// Update hierarchical counts, count_inline_bcs() and count_inlines()
Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/opto/chaitin.hpp
Expand Up @@ -730,8 +730,8 @@ class PhaseChaitin : public PhaseRegAlloc {
int yank_if_dead_recurse(Node *old, Node *orig_old, Block *current_block,
Node_List *value, Node_List *regnd);
int yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd );
int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs );
int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List &regnd );
int elide_copy( Node *n, int k, Block *current_block, Node_List *value, Node_List *regnd, bool can_change_regs );
int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List *value, Node_List *regnd );
bool may_be_copy_of_callee( Node *def ) const;

// If nreg already contains the same constant as val then eliminate it
Expand Down
49 changes: 27 additions & 22 deletions src/hotspot/share/opto/postaloc.cpp
Expand Up @@ -30,7 +30,7 @@

// See if this register (or pairs, or vector) already contains the value.
static bool register_contains_value(Node* val, OptoReg::Name reg, int n_regs,
Node_List& value) {
const Node_List &value) {
for (int i = 0; i < n_regs; i++) {
OptoReg::Name nreg = OptoReg::add(reg,-i);
if (value[nreg] != val)
Expand Down Expand Up @@ -77,7 +77,7 @@ bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const {

//------------------------------yank-----------------------------------
// Helper function for yank_if_dead
int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
int PhaseChaitin::yank(Node *old, Block *current_block, Node_List *value, Node_List *regnd) {
int blk_adjust=0;
Block *oldb = _cfg.get_block_for_node(old);
oldb->find_remove(old);
Expand All @@ -87,9 +87,10 @@ int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_
}
_cfg.unmap_node_from_block(old);
OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
value->map(old_reg,NULL); // Yank from value/regnd maps
regnd->map(old_reg,NULL); // This register's value is now unknown
assert(value != NULL || regnd == NULL, "sanity");
if (value != NULL && regnd != NULL && regnd->at(old_reg) == old) { // Instruction is currently available?
value->map(old_reg, NULL); // Yank from value/regnd maps
regnd->map(old_reg, NULL); // This register's value is now unknown
}
return blk_adjust;
}
Expand Down Expand Up @@ -161,7 +162,7 @@ int PhaseChaitin::yank_if_dead_recurse(Node *old, Node *orig_old, Block *current
// Use the prior value instead of the current value, in an effort to make
// the current value go dead. Return block iterator adjustment, in case
// we yank some instructions from this block.
int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *current_block, Node_List &value, Node_List &regnd ) {
int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *current_block, Node_List *value, Node_List *regnd ) {
// No effect?
if( def == n->in(idx) ) return 0;
// Def is currently dead and can be removed? Do not resurrect
Expand Down Expand Up @@ -207,7 +208,7 @@ int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *curre
_post_alloc++;

// Is old def now dead? We successfully yanked a copy?
return yank_if_dead(old,current_block,&value,&regnd);
return yank_if_dead(old,current_block,value,regnd);
}


Expand All @@ -229,7 +230,7 @@ Node *PhaseChaitin::skip_copies( Node *c ) {

//------------------------------elide_copy-------------------------------------
// Remove (bypass) copies along Node n, edge k.
int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs ) {
int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List *value, Node_List *regnd, bool can_change_regs ) {
int blk_adjust = 0;

uint nk_idx = _lrg_map.live_range_id(n->in(k));
Expand All @@ -253,11 +254,14 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v

// Phis and 2-address instructions cannot change registers so easily - their
// outputs must match their input.
if( !can_change_regs )
if (!can_change_regs) {
return blk_adjust; // Only check stupid copies!

}
// Loop backedges won't have a value-mapping yet
if( &value == NULL ) return blk_adjust;
assert(regnd != NULL || value == NULL, "sanity");
if (value == NULL || regnd == NULL) {
return blk_adjust;
}

// Skip through all copies to the _value_ being used. Do not change from
// int to pointer. This attempts to jump through a chain of copies, where
Expand All @@ -273,10 +277,11 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v
// See if it happens to already be in the correct register!
// (either Phi's direct register, or the common case of the name
// never-clobbered original-def register)
if (register_contains_value(val, val_reg, n_regs, value)) {
blk_adjust += use_prior_register(n,k,regnd[val_reg],current_block,value,regnd);
if( n->in(k) == regnd[val_reg] ) // Success! Quit trying
return blk_adjust;
if (register_contains_value(val, val_reg, n_regs, *value)) {
blk_adjust += use_prior_register(n,k,regnd->at(val_reg),current_block,value,regnd);
if (n->in(k) == regnd->at(val_reg)) {
return blk_adjust; // Success! Quit trying
}
}

// See if we can skip the copy by changing registers. Don't change from
Expand Down Expand Up @@ -304,7 +309,7 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v
if (ignore_self) continue;
}

Node *vv = value[reg];
Node *vv = value->at(reg);
// For scalable register, number of registers may be inconsistent between
// "val_reg" and "reg". For example, when "val" resides in register
// but "reg" is located in stack.
Expand All @@ -326,17 +331,17 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v
last = (n_regs-1); // Looking for the last part of a set
}
if ((reg&last) != last) continue; // Wrong part of a set
if (!register_contains_value(vv, reg, n_regs, value)) continue; // Different value
if (!register_contains_value(vv, reg, n_regs, *value)) continue; // Different value
}
if( vv == val || // Got a direct hit?
(t && vv && vv->bottom_type() == t && vv->is_Mach() &&
vv->as_Mach()->rule() == val->as_Mach()->rule()) ) { // Or same constant?
assert( !n->is_Phi(), "cannot change registers at a Phi so easily" );
if( OptoReg::is_stack(nk_reg) || // CISC-loading from stack OR
OptoReg::is_reg(reg) || // turning into a register use OR
regnd[reg]->outcnt()==1 ) { // last use of a spill-load turns into a CISC use
blk_adjust += use_prior_register(n,k,regnd[reg],current_block,value,regnd);
if( n->in(k) == regnd[reg] ) // Success! Quit trying
regnd->at(reg)->outcnt()==1 ) { // last use of a spill-load turns into a CISC use
blk_adjust += use_prior_register(n,k,regnd->at(reg),current_block,value,regnd);
if( n->in(k) == regnd->at(reg) ) // Success! Quit trying
return blk_adjust;
} // End of if not degrading to a stack
} // End of if found value in another register
Expand Down Expand Up @@ -536,7 +541,7 @@ void PhaseChaitin::post_allocate_copy_removal() {
Block* pb = _cfg.get_block_for_node(block->pred(j));
// Remove copies along phi edges
for (uint k = 1; k < phi_dex; k++) {
elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
elide_copy(block->get_node(k), j, block, blk2value[pb->_pre_order], blk2regnd[pb->_pre_order], false);
}
if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
// See if this predecessor's mappings have been used by everybody
Expand Down Expand Up @@ -692,7 +697,7 @@ void PhaseChaitin::post_allocate_copy_removal() {

// Remove copies along input edges
for (k = 1; k < n->req(); k++) {
j -= elide_copy(n, k, block, value, regnd, two_adr != k);
j -= elide_copy(n, k, block, &value, &regnd, two_adr != k);
}

// Unallocated Nodes define no registers
Expand Down
15 changes: 10 additions & 5 deletions src/hotspot/share/runtime/vmStructs.hpp
Expand Up @@ -188,13 +188,18 @@ class VMStructs {
#ifdef ASSERT

// This macro checks the type of a VMStructEntry by comparing pointer types
#define CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{typeName *dummyObj = NULL; type* dummy = &dummyObj->fieldName; \
assert(offset_of(typeName, fieldName) < sizeof(typeName), "Illegal nonstatic struct entry, field offset too large"); }
#define CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) { \
static_assert( \
std::is_convertible< \
std::add_pointer_t<decltype(declval<typeName>().fieldName)>, \
std::add_pointer_t<type>>::value, \
"type mismatch for " XSTR(fieldName) " member of " XSTR(typeName)); \
assert(offset_of(typeName, fieldName) < sizeof(typeName), "..."); \
}

// This macro checks the type of a volatile VMStructEntry by comparing pointer types
#define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
{typedef type dummyvtype; typeName *dummyObj = NULL; volatile dummyvtype* dummy = &dummyObj->fieldName; }
#define CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
CHECK_NONSTATIC_VM_STRUCT_ENTRY(typeName, fieldName, std::add_volatile_t<type>)

// This macro checks the type of a static VMStructEntry by comparing pointer types
#define CHECK_STATIC_VM_STRUCT_ENTRY(typeName, fieldName, type) \
Expand Down
5 changes: 5 additions & 0 deletions src/hotspot/share/utilities/globalDefinitions.hpp
Expand Up @@ -35,6 +35,7 @@
#include COMPILER_HEADER(utilities/globalDefinitions)

#include <cstddef>
#include <type_traits>

class oopDesc;

Expand Down Expand Up @@ -1210,4 +1211,8 @@ template<typename K> bool primitive_equals(const K& k0, const K& k1) {
}


// Converts any type T to a reference type.
template<typename T>
std::add_rvalue_reference_t<T> declval() noexcept;

#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_HPP
19 changes: 15 additions & 4 deletions src/hotspot/share/utilities/globalDefinitions_gcc.hpp
Expand Up @@ -144,10 +144,21 @@ inline int wcslen(const jchar* x) { return wcslen((const wchar_t*)x); }
#endif // _LP64

// gcc warns about applying offsetof() to non-POD object or calculating
// offset directly when base address is NULL. Use 16 to get around the
// warning. The -Wno-invalid-offsetof option could be used to suppress
// this warning, but we instead just avoid the use of offsetof().
#define offset_of(klass,field) (size_t)((intx)&(((klass*)16)->field) - 16)
// offset directly when base address is NULL. The -Wno-invalid-offsetof
// option could be used to suppress this warning, but we instead just
// avoid the use of offsetof().
//
// FIXME: This macro is complex and rather arcane. Perhaps we should
// use offsetof() instead, with the invalid-offsetof warning
// temporarily disabled.
#define offset_of(klass,field) \
[]() { \
char space[sizeof (klass)] ATTRIBUTE_ALIGNED(16); \
klass* dummyObj = (klass*)space; \
char* c = (char*)(void*)&dummyObj->field; \
return (size_t)(c - space); \
}()


#if defined(_LP64) && defined(__APPLE__)
#define JLONG_FORMAT "%ld"
Expand Down