Skip to content

Commit bd38188

Browse files
afshin-zafaritstuefe
authored andcommittedDec 5, 2022
8297766: Remove UseMallocOnly development option
Reviewed-by: coleenp, stuefe, dholmes
1 parent b9eec96 commit bd38188

File tree

9 files changed

+23
-219
lines changed

9 files changed

+23
-219
lines changed
 

‎src/hotspot/share/memory/arena.cpp

-83
Original file line numberDiff line numberDiff line change
@@ -273,10 +273,6 @@ Arena::~Arena() {
273273

274274
// Destroy this arenas contents and reset to empty
275275
void Arena::destruct_contents() {
276-
if (UseMallocOnly && _first != NULL) {
277-
char* end = _first->next() ? _first->top() : _hwm;
278-
free_malloced_objects(_first, _first->bottom(), end, _hwm);
279-
}
280276
// reset size before chop to avoid a rare racing condition
281277
// that can have total arena memory exceed total chunk memory
282278
set_size_in_bytes(0);
@@ -342,19 +338,6 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
342338
assert(old_size == 0, "sanity");
343339
return Amalloc(new_size, alloc_failmode); // as with realloc(3), a NULL old ptr is equivalent to malloc(3)
344340
}
345-
#ifdef ASSERT
346-
if (UseMallocOnly) {
347-
// always allocate a new object (otherwise we'll free this one twice)
348-
char* copy = (char*)Amalloc(new_size, alloc_failmode);
349-
if (copy == NULL) {
350-
return NULL;
351-
}
352-
size_t n = MIN2(old_size, new_size);
353-
if (n > 0) memcpy(copy, old_ptr, n);
354-
Afree(old_ptr,old_size); // Mostly done to keep stats accurate
355-
return copy;
356-
}
357-
#endif
358341
char *c_old = (char*)old_ptr; // Handy name
359342
// Stupid fast special case
360343
if( new_size <= old_size ) { // Shrink in-place
@@ -386,24 +369,6 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
386369

387370
// Determine if pointer belongs to this Arena or not.
388371
bool Arena::contains( const void *ptr ) const {
389-
#ifdef ASSERT
390-
if (UseMallocOnly) {
391-
// really slow, but not easy to make fast
392-
if (_chunk == NULL) return false;
393-
char** bottom = (char**)_chunk->bottom();
394-
for (char** p = (char**)_hwm - 1; p >= bottom; p--) {
395-
if (*p == ptr) return true;
396-
}
397-
for (Chunk *c = _first; c != NULL; c = c->next()) {
398-
if (c == _chunk) continue; // current chunk has been processed
399-
char** bottom = (char**)c->bottom();
400-
for (char** p = (char**)c->top() - 1; p >= bottom; p--) {
401-
if (*p == ptr) return true;
402-
}
403-
}
404-
return false;
405-
}
406-
#endif
407372
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
408373
return true; // Check for in this chunk
409374
for (Chunk *c = _first; c; c = c->next()) {
@@ -414,51 +379,3 @@ bool Arena::contains( const void *ptr ) const {
414379
}
415380
return false; // Not in any Chunk, so not in Arena
416381
}
417-
418-
419-
#ifdef ASSERT
420-
void* Arena::malloc(size_t size) {
421-
assert(UseMallocOnly, "shouldn't call");
422-
// use malloc, but save pointer in res. area for later freeing
423-
char** save = (char**)internal_amalloc(sizeof(char*));
424-
return (*save = (char*)os::malloc(size, mtChunk));
425-
}
426-
#endif
427-
428-
429-
//--------------------------------------------------------------------------------------
430-
// Non-product code
431-
432-
#ifndef PRODUCT
433-
434-
// debugging code
435-
inline void Arena::free_all(char** start, char** end) {
436-
for (char** p = start; p < end; p++) if (*p) os::free(*p);
437-
}
438-
439-
void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) {
440-
assert(UseMallocOnly, "should not call");
441-
// free all objects malloced since resource mark was created; resource area
442-
// contains their addresses
443-
if (chunk->next()) {
444-
// this chunk is full, and some others too
445-
for (Chunk* c = chunk->next(); c != NULL; c = c->next()) {
446-
char* top = c->top();
447-
if (c->next() == NULL) {
448-
top = hwm2; // last junk is only used up to hwm2
449-
assert(c->contains(hwm2), "bad hwm2");
450-
}
451-
free_all((char**)c->bottom(), (char**)top);
452-
}
453-
assert(chunk->contains(hwm), "bad hwm");
454-
assert(chunk->contains(max), "bad max");
455-
free_all((char**)hwm, (char**)max);
456-
} else {
457-
// this chunk was partially used
458-
assert(chunk->contains(hwm), "bad hwm");
459-
assert(chunk->contains(hwm2), "bad hwm2");
460-
free_all((char**)hwm, (char**)hwm2);
461-
}
462-
}
463-
464-
#endif // Non-product

‎src/hotspot/share/memory/arena.hpp

-3
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ class Arena : public CHeapObjBase {
125125
// on both 32 and 64 bit platforms. Required for atomic jlong operations on 32 bits.
126126
void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
127127
x = ARENA_ALIGN(x); // note for 32 bits this should align _hwm as well.
128-
debug_only(if (UseMallocOnly) return malloc(x);)
129128
// Amalloc guarantees 64-bit alignment and we need to ensure that in case the preceding
130129
// allocation was AmallocWords. Only needed on 32-bit - on 64-bit Amalloc and AmallocWords are
131130
// identical.
@@ -138,7 +137,6 @@ class Arena : public CHeapObjBase {
138137
// is 4 bytes on 32 bits, hence the name.
139138
void* AmallocWords(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
140139
assert(is_aligned(x, BytesPerWord), "misaligned size");
141-
debug_only(if (UseMallocOnly) return malloc(x);)
142140
return internal_amalloc(x, alloc_failmode);
143141
}
144142

@@ -149,7 +147,6 @@ class Arena : public CHeapObjBase {
149147
}
150148
#ifdef ASSERT
151149
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
152-
if (UseMallocOnly) return true;
153150
#endif
154151
if (((char*)ptr) + size == _hwm) {
155152
_hwm = (char*)ptr;

‎src/hotspot/share/memory/resourceArea.hpp

-4
Original file line numberDiff line numberDiff line change
@@ -105,10 +105,6 @@ class ResourceArea: public Arena {
105105
assert(_nesting > state._nesting, "rollback to inactive mark");
106106
assert((_nesting - state._nesting) == 1, "rollback across another mark");
107107

108-
if (UseMallocOnly) {
109-
free_malloced_objects(state._chunk, state._hwm, state._max, _hwm);
110-
}
111-
112108
if (state._chunk->next() != nullptr) { // Delete later chunks.
113109
// Reset size before deleting chunks. Otherwise, the total
114110
// size could exceed the total chunk size.

‎src/hotspot/share/memory/resourceArea.inline.hpp

-5
Original file line numberDiff line numberDiff line change
@@ -32,11 +32,6 @@
3232
inline char* ResourceArea::allocate_bytes(size_t size, AllocFailType alloc_failmode) {
3333
#ifdef ASSERT
3434
verify_has_resource_mark();
35-
if (UseMallocOnly) {
36-
// use malloc, but save pointer in res. area for later freeing
37-
char** save = (char**)internal_amalloc(sizeof(char*));
38-
return (*save = (char*)os::malloc(size, mtThread, CURRENT_PC));
39-
}
4035
#endif // ASSERT
4136
return (char*)Amalloc(size, alloc_failmode);
4237
}

‎src/hotspot/share/runtime/globals.hpp

-4
Original file line numberDiff line numberDiff line change
@@ -455,10 +455,6 @@ const int ObjectAlignmentInBytes = 8;
455455
notproduct(bool, VerifyCodeCache, false, \
456456
"Verify code cache on memory allocation/deallocation") \
457457
\
458-
develop(bool, UseMallocOnly, false, \
459-
"Use only malloc/free for allocation (no resource area/arena). " \
460-
"Used to help diagnose memory stomping bugs.") \
461-
\
462458
develop(bool, ZapResourceArea, trueInDebug, \
463459
"Zap freed resource/arena space") \
464460
\

‎src/hotspot/share/runtime/handles.hpp

-1
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,6 @@ class HandleArea: public Arena {
196196
// Handle allocation
197197
private:
198198
oop* real_allocate_handle(oop obj) {
199-
// Ignore UseMallocOnly by allocating only in arena.
200199
oop* handle = (oop*)internal_amalloc(oopSize);
201200
*handle = obj;
202201
return handle;

‎test/hotspot/gtest/memory/test_arena.cpp

+23-38
Original file line numberDiff line numberDiff line change
@@ -57,17 +57,13 @@ TEST_VM(Arena, alloc_size_0) {
5757
void* p = ar.Amalloc(0);
5858
ASSERT_NOT_NULL(p);
5959
ASSERT_ALIGN_AMALLOC(p);
60-
if (!UseMallocOnly) {
61-
// contains works differently for malloced mode (and there its broken anyway)
62-
ASSERT_FALSE(ar.contains(p));
63-
}
60+
61+
ASSERT_FALSE(ar.contains(p));
6462
// Allocate again. The new allocations should have the same position as the 0-sized
6563
// first one.
66-
if (!UseMallocOnly) {
67-
void* p2 = ar.Amalloc(1);
68-
ASSERT_AMALLOC(ar, p2);
69-
ASSERT_EQ(p2, p);
70-
}
64+
void* p2 = ar.Amalloc(1);
65+
ASSERT_AMALLOC(ar, p2);
66+
ASSERT_EQ(p2, p);
7167
}
7268

7369
// Test behavior for Arealloc(p, 0)
@@ -81,10 +77,8 @@ TEST_VM(Arena, realloc_size_0) {
8177
ASSERT_NULL(p2);
8278

8379
// a subsequent allocation should get the same pointer
84-
if (!UseMallocOnly) {
85-
void* p3 = ar.Amalloc(0x20);
86-
ASSERT_EQ(p3, p1);
87-
}
80+
void* p3 = ar.Amalloc(0x20);
81+
ASSERT_EQ(p3, p1);
8882
}
8983

9084
// Realloc equal sizes is a noop
@@ -96,9 +90,7 @@ TEST_VM(Arena, realloc_same_size) {
9690

9791
void* p2 = ar.Arealloc(p1, 0x200, 0x200);
9892

99-
if (!UseMallocOnly) {
100-
ASSERT_EQ(p2, p1);
101-
}
93+
ASSERT_EQ(p2, p1);
10294
ASSERT_RANGE_IS_MARKED(p2, 0x200);
10395
}
10496

@@ -157,29 +149,26 @@ TEST_VM(Arena, free_top) {
157149
DEBUG_ONLY(ASSERT_RANGE_IS_MARKED_WITH(p, 0x10, badResourceValue);)
158150

159151
// a subsequent allocation should get the same pointer
160-
if (!UseMallocOnly) {
161-
void* p2 = ar.Amalloc(0x20);
162-
ASSERT_EQ(p2, p);
163-
}
152+
void* p2 = ar.Amalloc(0x20);
153+
ASSERT_EQ(p2, p);
164154
}
165155

156+
166157
// In-place shrinking.
167158
TEST_VM(Arena, realloc_top_shrink) {
168-
if (!UseMallocOnly) {
169-
Arena ar(mtTest);
159+
Arena ar(mtTest);
170160

171-
void* p1 = ar.Amalloc(0x200);
172-
ASSERT_AMALLOC(ar, p1);
173-
GtestUtils::mark_range(p1, 0x200);
161+
void* p1 = ar.Amalloc(0x200);
162+
ASSERT_AMALLOC(ar, p1);
163+
GtestUtils::mark_range(p1, 0x200);
174164

175-
void* p2 = ar.Arealloc(p1, 0x200, 0x100);
176-
ASSERT_EQ(p1, p2);
177-
ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
165+
void* p2 = ar.Arealloc(p1, 0x200, 0x100);
166+
ASSERT_EQ(p1, p2);
167+
ASSERT_RANGE_IS_MARKED(p2, 0x100); // realloc should preserve old content
178168

179-
// A subsequent allocation should be placed right after the end of the first, shrunk, allocation
180-
void* p3 = ar.Amalloc(1);
181-
ASSERT_EQ(p3, ((char*)p1) + 0x100);
182-
}
169+
// A subsequent allocation should be placed right after the end of the first, shrunk, allocation
170+
void* p3 = ar.Amalloc(1);
171+
ASSERT_EQ(p3, ((char*)p1) + 0x100);
183172
}
184173

185174
// not-in-place shrinking.
@@ -193,9 +182,7 @@ TEST_VM(Arena, realloc_nontop_shrink) {
193182
void* p_other = ar.Amalloc(20); // new top, p1 not top anymore
194183

195184
void* p2 = ar.Arealloc(p1, 200, 100);
196-
if (!UseMallocOnly) {
197-
ASSERT_EQ(p1, p2); // should still shrink in place
198-
}
185+
ASSERT_EQ(p1, p2); // should still shrink in place
199186
ASSERT_RANGE_IS_MARKED(p2, 100); // realloc should preserve old content
200187
}
201188

@@ -208,9 +195,7 @@ TEST_VM(Arena, realloc_top_grow) {
208195
GtestUtils::mark_range(p1, 0x10);
209196

210197
void* p2 = ar.Arealloc(p1, 0x10, 0x20);
211-
if (!UseMallocOnly) {
212-
ASSERT_EQ(p1, p2);
213-
}
198+
ASSERT_EQ(p1, p2);
214199
ASSERT_RANGE_IS_MARKED(p2, 0x10); // realloc should preserve old content
215200
}
216201

‎test/hotspot/jtreg/gtest/ArenaGtests.java

-40
This file was deleted.

‎test/hotspot/jtreg/runtime/8007475/StackMapFrameTest.java

-41
This file was deleted.

0 commit comments

Comments
 (0)
Please sign in to comment.