Skip to content

Commit ab19348

Browse files
committedNov 1, 2023
8318647: Serial: Refactor BlockOffsetTable
Reviewed-by: tschatzl, iwalulya
1 parent b4f5379 commit ab19348

9 files changed

+205
-826
lines changed
 

‎src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp

+89-372
Large diffs are not rendered by default.

‎src/hotspot/share/gc/serial/serialBlockOffsetTable.hpp

+52-305
Large diffs are not rendered by default.

‎src/hotspot/share/gc/serial/serialBlockOffsetTable.inline.hpp

+2-25
Original file line numberDiff line numberDiff line change
@@ -27,24 +27,7 @@
2727

2828
#include "gc/serial/serialBlockOffsetTable.hpp"
2929

30-
#include "gc/shared/space.hpp"
31-
#include "runtime/safepoint.hpp"
32-
33-
//////////////////////////////////////////////////////////////////////////
34-
// BlockOffsetTable inlines
35-
//////////////////////////////////////////////////////////////////////////
36-
inline HeapWord* BlockOffsetTable::block_start(const void* addr) const {
37-
if (addr >= _bottom && addr < _end) {
38-
return block_start_unsafe(addr);
39-
} else {
40-
return nullptr;
41-
}
42-
}
43-
44-
//////////////////////////////////////////////////////////////////////////
45-
// BlockOffsetSharedArray inlines
46-
//////////////////////////////////////////////////////////////////////////
47-
inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
30+
inline size_t SerialBlockOffsetSharedArray::index_for(const void* p) const {
4831
char* pc = (char*)p;
4932
assert(pc >= (char*)_reserved.start() &&
5033
pc < (char*)_reserved.end(),
@@ -55,18 +38,12 @@ inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
5538
return result;
5639
}
5740

58-
inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
41+
inline HeapWord* SerialBlockOffsetSharedArray::address_for_index(size_t index) const {
5942
assert(index < _vs.committed_size(), "bad index");
6043
HeapWord* result = _reserved.start() + (index << BOTConstants::log_card_size_in_words());
6144
assert(result >= _reserved.start() && result < _reserved.end(),
6245
"bad address from index");
6346
return result;
6447
}
6548

66-
inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) {
67-
assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() ||
68-
Thread::current()->is_VM_thread() ||
69-
Thread::current()->is_ConcurrentGC_thread(), "Crack");
70-
}
71-
7249
#endif // SHARE_GC_SERIAL_SERIALBLOCKOFFSETTABLE_INLINE_HPP

‎src/hotspot/share/gc/serial/tenuredGeneration.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -295,8 +295,8 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
295295
assert((uintptr_t(start) & 3) == 0, "bad alignment");
296296
assert((reserved_byte_size & 3) == 0, "bad alignment");
297297
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
298-
_bts = new BlockOffsetSharedArray(reserved_mr,
299-
heap_word_size(initial_byte_size));
298+
_bts = new SerialBlockOffsetSharedArray(reserved_mr,
299+
heap_word_size(initial_byte_size));
300300
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
301301
_rs->resize_covered_region(committed_mr);
302302

@@ -474,11 +474,10 @@ void TenuredGeneration::object_iterate(ObjectClosure* blk) {
474474
void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
475475
// Create the BOT for the archive space.
476476
TenuredSpace* space = _the_space;
477-
space->initialize_threshold();
478477
HeapWord* start = archive_space.start();
479478
while (start < archive_space.end()) {
480479
size_t word_size = cast_to_oop(start)->size();;
481-
space->alloc_block(start, start + word_size);
480+
space->update_for_block(start, start + word_size);
482481
start += word_size;
483482
}
484483
}

‎src/hotspot/share/gc/serial/tenuredGeneration.hpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
#include "gc/shared/generationCounters.hpp"
3232
#include "utilities/macros.hpp"
3333

34-
class BlockOffsetSharedArray;
34+
class SerialBlockOffsetSharedArray;
3535
class CardTableRS;
3636
class ContiguousSpace;
3737

@@ -50,7 +50,7 @@ class TenuredGeneration: public Generation {
5050
// This is shared with other generations.
5151
CardTableRS* _rs;
5252
// This is local to this generation.
53-
BlockOffsetSharedArray* _bts;
53+
SerialBlockOffsetSharedArray* _bts;
5454

5555
// Current shrinking effect: this damps shrinking when the heap gets empty.
5656
size_t _shrink_factor;

‎src/hotspot/share/gc/serial/vmStructs_serial.hpp

+27-37
Original file line numberDiff line numberDiff line change
@@ -29,38 +29,31 @@
2929
#include "gc/serial/serialHeap.hpp"
3030
#include "gc/serial/tenuredGeneration.hpp"
3131

32-
#define VM_STRUCTS_SERIALGC(nonstatic_field, \
33-
volatile_nonstatic_field, \
34-
static_field) \
35-
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
36-
nonstatic_field(TenuredGeneration, _bts, BlockOffsetSharedArray*) \
37-
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
38-
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
39-
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
40-
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
41-
nonstatic_field(TenuredGeneration, _the_space, TenuredSpace*) \
42-
\
43-
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
44-
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
45-
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
46-
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
47-
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
48-
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
49-
\
50-
nonstatic_field(BlockOffsetTable, _bottom, HeapWord*) \
51-
nonstatic_field(BlockOffsetTable, _end, HeapWord*) \
52-
\
53-
nonstatic_field(BlockOffsetSharedArray, _reserved, MemRegion) \
54-
nonstatic_field(BlockOffsetSharedArray, _end, HeapWord*) \
55-
nonstatic_field(BlockOffsetSharedArray, _vs, VirtualSpace) \
56-
nonstatic_field(BlockOffsetSharedArray, _offset_array, u_char*) \
57-
\
58-
nonstatic_field(BlockOffsetArray, _array, BlockOffsetSharedArray*) \
59-
nonstatic_field(BlockOffsetArray, _sp, Space*) \
60-
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold, HeapWord*) \
61-
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index, size_t) \
62-
\
63-
nonstatic_field(TenuredSpace, _offsets, BlockOffsetArray)
32+
#define VM_STRUCTS_SERIALGC(nonstatic_field, \
33+
volatile_nonstatic_field, \
34+
static_field) \
35+
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
36+
nonstatic_field(TenuredGeneration, _bts, SerialBlockOffsetSharedArray*) \
37+
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
38+
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
39+
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
40+
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
41+
nonstatic_field(TenuredGeneration, _the_space, TenuredSpace*) \
42+
\
43+
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
44+
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
45+
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
46+
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
47+
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
48+
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
49+
\
50+
nonstatic_field(SerialBlockOffsetTable, _array, SerialBlockOffsetSharedArray*) \
51+
\
52+
nonstatic_field(SerialBlockOffsetSharedArray, _reserved, MemRegion) \
53+
nonstatic_field(SerialBlockOffsetSharedArray, _vs, VirtualSpace) \
54+
nonstatic_field(SerialBlockOffsetSharedArray, _offset_array, u_char*) \
55+
\
56+
nonstatic_field(TenuredSpace, _offsets, SerialBlockOffsetTable)
6457

6558
#define VM_TYPES_SERIALGC(declare_type, \
6659
declare_toplevel_type, \
@@ -73,11 +66,8 @@
7366
declare_type(CardTableRS, CardTable) \
7467
\
7568
declare_toplevel_type(TenuredGeneration*) \
76-
declare_toplevel_type(BlockOffsetSharedArray) \
77-
declare_toplevel_type(BlockOffsetTable) \
78-
declare_type(BlockOffsetArray, BlockOffsetTable) \
79-
declare_type(BlockOffsetArrayContigSpace, BlockOffsetArray) \
80-
declare_toplevel_type(BlockOffsetSharedArray*)
69+
declare_toplevel_type(SerialBlockOffsetSharedArray) \
70+
declare_toplevel_type(SerialBlockOffsetTable)
8171

8272
#define VM_INT_CONSTANTS_SERIALGC(declare_constant, \
8373
declare_constant_with_value)

‎src/hotspot/share/gc/shared/space.cpp

+21-37
Original file line numberDiff line numberDiff line change
@@ -87,25 +87,6 @@ bool ContiguousSpace::is_free_block(const HeapWord* p) const {
8787
return p >= _top;
8888
}
8989

90-
#if INCLUDE_SERIALGC
91-
void TenuredSpace::clear(bool mangle_space) {
92-
ContiguousSpace::clear(mangle_space);
93-
_offsets.initialize_threshold();
94-
}
95-
96-
void TenuredSpace::set_bottom(HeapWord* new_bottom) {
97-
Space::set_bottom(new_bottom);
98-
_offsets.set_bottom(new_bottom);
99-
}
100-
101-
void TenuredSpace::set_end(HeapWord* new_end) {
102-
// Space should not advertise an increase in size
103-
// until after the underlying offset table has been enlarged.
104-
_offsets.resize(pointer_delta(new_end, bottom()));
105-
Space::set_end(new_end);
106-
}
107-
#endif // INCLUDE_SERIALGC
108-
10990
#ifndef PRODUCT
11091

11192
void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
@@ -152,7 +133,6 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size,
152133
}
153134
compact_top = cp->space->bottom();
154135
cp->space->set_compaction_top(compact_top);
155-
cp->space->initialize_threshold();
156136
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
157137
}
158138

@@ -172,7 +152,7 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size,
172152
// We need to update the offset table so that the beginnings of objects can be
173153
// found during scavenge. Note that we are updating the offset table based on
174154
// where the object will be once the compaction phase finishes.
175-
cp->space->alloc_block(compact_top - size, compact_top);
155+
cp->space->update_for_block(compact_top - size, compact_top);
176156
return compact_top;
177157
}
178158

@@ -190,7 +170,6 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
190170
assert(cp->gen != nullptr, "need a generation");
191171
assert(cp->gen->first_compaction_space() == this, "just checking");
192172
cp->space = cp->gen->first_compaction_space();
193-
cp->space->initialize_threshold();
194173
cp->space->set_compaction_top(cp->space->bottom());
195174
}
196175

@@ -384,9 +363,8 @@ void ContiguousSpace::print_on(outputStream* st) const {
384363
#if INCLUDE_SERIALGC
385364
void TenuredSpace::print_on(outputStream* st) const {
386365
print_short_on(st);
387-
st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", "
388-
PTR_FORMAT ", " PTR_FORMAT ")",
389-
p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
366+
st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
367+
p2i(bottom()), p2i(top()), p2i(end()));
390368
}
391369
#endif
392370

@@ -510,20 +488,30 @@ HeapWord* ContiguousSpace::par_allocate(size_t size) {
510488
}
511489

512490
#if INCLUDE_SERIALGC
513-
void TenuredSpace::initialize_threshold() {
514-
_offsets.initialize_threshold();
491+
void TenuredSpace::update_for_block(HeapWord* start, HeapWord* end) {
492+
_offsets.update_for_block(start, end);
515493
}
516494

517-
void TenuredSpace::alloc_block(HeapWord* start, HeapWord* end) {
518-
_offsets.alloc_block(start, end);
495+
HeapWord* TenuredSpace::block_start_const(const void* addr) const {
496+
HeapWord* cur_block = _offsets.block_start_reaching_into_card(addr);
497+
498+
while (true) {
499+
HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
500+
if (next_block > addr) {
501+
assert(cur_block <= addr, "postcondition");
502+
return cur_block;
503+
}
504+
cur_block = next_block;
505+
// Because the BOT is precise, we should never step into the next card
506+
// (i.e. crossing the card boundary).
507+
assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
508+
}
519509
}
520510

521-
TenuredSpace::TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
511+
TenuredSpace::TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
522512
MemRegion mr) :
523-
_offsets(sharedOffsetArray, mr),
524-
_par_alloc_lock(Mutex::safepoint, "TenuredSpaceParAlloc_lock", true)
513+
_offsets(sharedOffsetArray)
525514
{
526-
_offsets.set_contig_space(this);
527515
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
528516
}
529517

@@ -536,10 +524,6 @@ void TenuredSpace::verify() const {
536524
int objs = 0;
537525
int blocks = 0;
538526

539-
if (VerifyObjectStartArray) {
540-
_offsets.verify();
541-
}
542-
543527
while (p < top()) {
544528
size_t size = cast_to_oop(p)->size();
545529
// For a sampling of objects in the space, find it using the

‎src/hotspot/share/gc/shared/space.hpp

+7-25
Original file line numberDiff line numberDiff line change
@@ -47,11 +47,6 @@
4747
// Forward decls.
4848
class Space;
4949
class ContiguousSpace;
50-
#if INCLUDE_SERIALGC
51-
class BlockOffsetArray;
52-
class BlockOffsetArrayContigSpace;
53-
class BlockOffsetTable;
54-
#endif
5550
class Generation;
5651
class ContiguousSpace;
5752
class CardTableRS;
@@ -241,7 +236,7 @@ class ContiguousSpace: public Space {
241236

242237
// This the function to invoke when an allocation of an object covering
243238
// "start" to "end" occurs to update other internal data structures.
244-
virtual void alloc_block(HeapWord* start, HeapWord* the_end) { }
239+
virtual void update_for_block(HeapWord* start, HeapWord* the_end) { }
245240

246241
GenSpaceMangler* mangler() { return _mangler; }
247242

@@ -308,11 +303,6 @@ class ContiguousSpace: public Space {
308303
// live part of a compacted space ("deadwood" support.)
309304
virtual size_t allowed_dead_ratio() const { return 0; };
310305

311-
// Some contiguous spaces may maintain some data structures that should
312-
// be updated whenever an allocation crosses a boundary. This function
313-
// initializes these data structures for further updates.
314-
virtual void initialize_threshold() { }
315-
316306
// "q" is an object of the given "size" that should be forwarded;
317307
// "cp" names the generation ("gen") and containing "this" (which must
318308
// also equal "cp->space"). "compact_top" is where in "this" the
@@ -322,7 +312,7 @@ class ContiguousSpace: public Space {
322312
// be one, since compaction must succeed -- we go to the first space of
323313
// the previous generation if necessary, updating "cp"), reset compact_top
324314
// and then forward. In either case, returns the new value of "compact_top".
325-
// Invokes the "alloc_block" function of the then-current compaction
315+
// Invokes the "update_for_block" function of the then-current compaction
326316
// space.
327317
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
328318
HeapWord* compact_top);
@@ -412,36 +402,28 @@ class ContiguousSpace: public Space {
412402
#if INCLUDE_SERIALGC
413403

414404
// Class TenuredSpace is used by TenuredGeneration; it supports an efficient
415-
// "block_start" operation via a BlockOffsetArray (whose BlockOffsetSharedArray
416-
// may be shared with other spaces.)
405+
// "block_start" operation via a SerialBlockOffsetTable.
417406

418407
class TenuredSpace: public ContiguousSpace {
419408
friend class VMStructs;
420409
protected:
421-
BlockOffsetArrayContigSpace _offsets;
422-
Mutex _par_alloc_lock;
410+
SerialBlockOffsetTable _offsets;
423411

424412
// Mark sweep support
425413
size_t allowed_dead_ratio() const override;
426414
public:
427415
// Constructor
428-
TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
416+
TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
429417
MemRegion mr);
430418

431-
void set_bottom(HeapWord* value) override;
432-
void set_end(HeapWord* value) override;
433-
434-
void clear(bool mangle_space) override;
435-
436-
inline HeapWord* block_start_const(const void* p) const override;
419+
HeapWord* block_start_const(const void* addr) const override;
437420

438421
// Add offset table update.
439422
inline HeapWord* allocate(size_t word_size) override;
440423
inline HeapWord* par_allocate(size_t word_size) override;
441424

442425
// MarkSweep support phase3
443-
void initialize_threshold() override;
444-
void alloc_block(HeapWord* start, HeapWord* end) override;
426+
void update_for_block(HeapWord* start, HeapWord* end) override;
445427

446428
void print_on(outputStream* st) const override;
447429

‎src/hotspot/share/gc/shared/space.inline.hpp

+2-19
Original file line numberDiff line numberDiff line change
@@ -47,36 +47,19 @@ inline HeapWord* Space::block_start(const void* p) {
4747
inline HeapWord* TenuredSpace::allocate(size_t size) {
4848
HeapWord* res = ContiguousSpace::allocate(size);
4949
if (res != nullptr) {
50-
_offsets.alloc_block(res, size);
50+
_offsets.update_for_block(res, res + size);
5151
}
5252
return res;
5353
}
5454

55-
// Because of the requirement of keeping "_offsets" up to date with the
56-
// allocations, we sequentialize these with a lock. Therefore, best if
57-
// this is used for larger LAB allocations only.
5855
inline HeapWord* TenuredSpace::par_allocate(size_t size) {
59-
MutexLocker x(&_par_alloc_lock);
60-
// This ought to be just "allocate", because of the lock above, but that
61-
// ContiguousSpace::allocate asserts that either the allocating thread
62-
// holds the heap lock or it is the VM thread and we're at a safepoint.
63-
// The best I (dld) could figure was to put a field in ContiguousSpace
64-
// meaning "locking at safepoint taken care of", and set/reset that
65-
// here. But this will do for now, especially in light of the comment
66-
// above. Perhaps in the future some lock-free manner of keeping the
67-
// coordination.
6856
HeapWord* res = ContiguousSpace::par_allocate(size);
6957
if (res != nullptr) {
70-
_offsets.alloc_block(res, size);
58+
_offsets.update_for_block(res, res + size);
7159
}
7260
return res;
7361
}
7462

75-
inline HeapWord*
76-
TenuredSpace::block_start_const(const void* p) const {
77-
return _offsets.block_start(p);
78-
}
79-
8063
class DeadSpacer : StackObj {
8164
size_t _allowed_deadspace_words;
8265
bool _active;

0 commit comments

Comments
 (0)
Please sign in to comment.