Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8301404: Factor out os::malloc with os::realloc common code, so that we only have 1 code path #24189

Closed
wants to merge 21 commits into from
Closed
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions src/hotspot/share/nmt/nmtPreInit.hpp
Original file line number Diff line number Diff line change
@@ -267,7 +267,6 @@ class NMTPreInit : public AllStatic {
// Returns true if allocation was handled here; in that case,
// *rc contains the return address.
static bool handle_malloc(void** rc, size_t size) {
size = MAX2((size_t)1, size); // malloc(0)
if (!MemTracker::is_initialized()) {
// pre-NMT-init:
// Allocate entry and add address to lookup table
@@ -287,7 +286,6 @@ class NMTPreInit : public AllStatic {
if (old_p == nullptr) { // realloc(null, n)
return handle_malloc(rc, new_size);
}
new_size = MAX2((size_t)1, new_size); // realloc(.., 0)
switch (MemTracker::tracking_level()) {
case NMT_unknown: {
// pre-NMT-init:
125 changes: 64 additions & 61 deletions src/hotspot/share/runtime/os.cpp
Original file line number Diff line number Diff line change
@@ -621,36 +621,67 @@ static void break_if_ptr_caught(void* ptr) {
}
#endif // ASSERT

void* os::malloc(size_t size, MemTag mem_tag) {
return os::malloc(size, mem_tag, CALLER_PC);
}

void* os::malloc(size_t size, MemTag mem_tag, const NativeCallStack& stack) {
size_t os::pre_alloc(void** raw_ptr, void* old_ptr, size_t size, bool check_limit, MemTag mem_tag) {
// On malloc(0), implementations of malloc(3) have the choice to return either
// null or a unique non-null pointer. To unify libc behavior across our platforms
// we chose the latter.
size = MAX2((size_t)1, size);

// Special handling for NMT preinit phase before arguments are parsed
void* rc = nullptr;
if (NMTPreInit::handle_malloc(&rc, size)) {
*raw_ptr = nullptr;
if (NMTPreInit::handle_realloc(raw_ptr, old_ptr, size, mem_tag)) {
// No need to fill with 0 because CDS static dumping doesn't use these
// early allocations.
return rc;
return size;
}

DEBUG_ONLY(check_crash_protection());

// On malloc(0), implementations of malloc(3) have the choice to return either
// null or a unique non-null pointer. To unify libc behavior across our platforms
// we chose the latter.
size = MAX2((size_t)1, size);

// Observe MallocLimit
if (MemTracker::check_exceeds_limit(size, mem_tag)) {
return nullptr;
if (check_limit && MemTracker::check_exceeds_limit(size, mem_tag)) {
return 0;
}

const size_t outer_size = size + MemTracker::overhead_per_malloc();

// Check for overflow.
if (outer_size < size) {
return 0;
}

return outer_size;
}

void* os::post_alloc(void* raw_ptr, size_t size, size_t old_size, MemTag mem_tag, const NativeCallStack& stack) {
// Register alloc with NMT
void* const client_ptr = MemTracker::record_malloc((address)raw_ptr, size, mem_tag, stack);

if (old_size == 0) {
if (CDSConfig::is_dumping_static_archive()) {
// Need to deterministically fill all the alignment gaps in C++ structures.
::memset((char*)client_ptr, 0, size);
} else {
DEBUG_ONLY(::memset((char*)client_ptr, uninitBlockPad, size);)
}
} else if (old_size < size) {
DEBUG_ONLY(::memset((char*)client_ptr + old_size, uninitBlockPad, size - old_size);)
}

DEBUG_ONLY(break_if_ptr_caught(client_ptr);)
return client_ptr;
}

void* os::malloc(size_t size, MemTag mem_tag) {
return os::malloc(size, mem_tag, CALLER_PC);
}

void* os::malloc(size_t size, MemTag mem_tag, const NativeCallStack& stack) {

void* rc = nullptr;
size_t outer_size = os::pre_alloc(&rc, nullptr, size, true, mem_tag);
if (rc != nullptr) {
return rc;
}
if (outer_size == 0) {
return nullptr;
}

@@ -659,16 +690,7 @@ void* os::malloc(size_t size, MemTag mem_tag, const NativeCallStack& stack) {
return nullptr;
}

void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, mem_tag, stack);

if (CDSConfig::is_dumping_static_archive()) {
// Need to deterministically fill all the alignment gaps in C++ structures.
::memset(inner_ptr, 0, size);
} else {
DEBUG_ONLY(::memset(inner_ptr, uninitBlockPad, size);)
}
DEBUG_ONLY(break_if_ptr_caught(inner_ptr);)
return inner_ptr;
return post_alloc(outer_ptr, size, 0, mem_tag, stack);
}

void* os::realloc(void *memblock, size_t size, MemTag mem_tag) {
@@ -677,35 +699,28 @@ void* os::realloc(void *memblock, size_t size, MemTag mem_tag) {

void* os::realloc(void *memblock, size_t size, MemTag mem_tag, const NativeCallStack& stack) {

// Special handling for NMT preinit phase before arguments are parsed
void* rc = nullptr;
if (NMTPreInit::handle_realloc(&rc, memblock, size, mem_tag)) {
return rc;
}

if (memblock == nullptr) {
return os::malloc(size, mem_tag, stack);
}

DEBUG_ONLY(check_crash_protection());

// On realloc(p, 0), implementers of realloc(3) have the choice to return either
// On malloc(0), implementations of malloc(3) have the choice to return either
// null or a unique non-null pointer. To unify libc behavior across our platforms
// we chose the latter.
size = MAX2((size_t)1, size);

void* rc = nullptr;
size_t outer_size = os::pre_alloc(&rc, memblock, size, false, mem_tag);
if (rc != nullptr) {
return rc;
}
if (outer_size == 0) {
return nullptr;
}

if (MemTracker::enabled()) {
// NMT realloc handling

const size_t new_outer_size = size + MemTracker::overhead_per_malloc();

// Handle size overflow.
if (new_outer_size < size) {
return nullptr;
}

const size_t old_size = MallocTracker::malloc_header(memblock)->size();

// Observe MallocLimit
if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, mem_tag)) {
return nullptr;
@@ -721,47 +736,35 @@ void* os::realloc(void *memblock, size_t size, MemTag mem_tag, const NativeCallS
header->mark_block_as_dead();

// the real realloc
ALLOW_C_FUNCTION(::realloc, void* const new_outer_ptr = ::realloc(header, new_outer_size);)

if (new_outer_ptr == nullptr) {
ALLOW_C_FUNCTION(::realloc, void* const outer_ptr = ::realloc(header, outer_size);)
if (outer_ptr == nullptr) {
// realloc(3) failed and the block still exists.
// We have however marked it as dead, revert this change.
header->revive();
return nullptr;
}

// realloc(3) succeeded, variable header now points to invalid memory and we need to deaccount the old block.
MemTracker::deaccount(free_info);

// After a successful realloc(3), we account the resized block with its new size
// to NMT.
void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, mem_tag, stack);

#ifdef ASSERT
assert(old_size == free_info.size, "Sanity");
if (old_size < size) {
// We also zap the newly extended region.
::memset((char*)new_inner_ptr + old_size, uninitBlockPad, size - old_size);
}
#endif

rc = new_inner_ptr;

rc = post_alloc(outer_ptr, size, old_size, mem_tag, stack);
} else {

// NMT disabled.
ALLOW_C_FUNCTION(::realloc, rc = ::realloc(memblock, size);)
if (rc == nullptr) {
return nullptr;
}

DEBUG_ONLY(break_if_ptr_caught(rc);)
}

DEBUG_ONLY(break_if_ptr_caught(rc);)

return rc;
}

void os::free(void *memblock) {
void os::free(void *memblock) {

// Special handling for NMT preinit phase before arguments are parsed
if (NMTPreInit::handle_free(memblock)) {
5 changes: 5 additions & 0 deletions src/hotspot/share/runtime/os.hpp
Original file line number Diff line number Diff line change
@@ -1085,6 +1085,11 @@ class os: AllStatic {
static bool set_boot_path(char fileSep, char pathSep);

static bool pd_dll_unload(void* libhandle, char* ebuf, int ebuflen);

private:
static size_t pre_alloc(void** raw_ptr, void* old_ptr, size_t size, bool check_limit, MemTag mem_tag);
static void* post_alloc(void* raw_ptr, size_t size, size_t chunk, MemTag mem_tag, const NativeCallStack& stack);

};

// Note that "PAUSE" is almost always used with synchronization