Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
8210708: Use single mark bitmap in G1
Co-authored-by: Stefan Johansson <sjohanss@openjdk.org>
Co-authored-by: Ivan Walulya <iwalulya@openjdk.org>
Reviewed-by: iwalulya, ayang
  • Loading branch information
3 people committed Jul 7, 2022
1 parent 8e7b45b commit 95e3190
Show file tree
Hide file tree
Showing 48 changed files with 1,243 additions and 1,063 deletions.
13 changes: 0 additions & 13 deletions src/hotspot/share/gc/g1/g1BlockOffsetTable.cpp
Expand Up @@ -79,19 +79,6 @@ G1BlockOffsetTablePart::G1BlockOffsetTablePart(G1BlockOffsetTable* array, HeapRe
{
}

void G1BlockOffsetTablePart::update() {
HeapWord* next_addr = _hr->bottom();
HeapWord* const limit = _hr->top();

HeapWord* prev_addr;
while (next_addr < limit) {
prev_addr = next_addr;
next_addr = prev_addr + block_size(prev_addr);
update_for_block(prev_addr, next_addr);
}
assert(next_addr == limit, "Should stop the scan at the limit.");
}

// Write the backskip value for each region.
//
// offset
Expand Down
12 changes: 7 additions & 5 deletions src/hotspot/share/gc/g1/g1BlockOffsetTable.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -122,15 +122,18 @@ class G1BlockOffsetTablePart {
void set_remainder_to_point_to_start_incl(size_t start, size_t end);

inline size_t block_size(const HeapWord* p) const;
inline size_t block_size(const HeapWord* p, HeapWord* pb) const;

// Returns the address of a block whose start is at most "addr".
inline HeapWord* block_at_or_preceding(const void* addr) const;

// Return the address of the beginning of the block that contains "addr".
// "q" is a block boundary that is <= "addr"; "n" is the address of the
// next block (or the end of the space.)
// "pb" is the current value of the region's parsable_bottom.
inline HeapWord* forward_to_block_containing_addr(HeapWord* q, HeapWord* n,
const void* addr) const;
const void* addr,
HeapWord* pb) const;

// Update BOT entries corresponding to the mem range [blk_start, blk_end).
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);
Expand All @@ -152,16 +155,15 @@ class G1BlockOffsetTablePart {
// The elements of the array are initialized to zero.
G1BlockOffsetTablePart(G1BlockOffsetTable* array, HeapRegion* hr);

void update();

void verify() const;

// Returns the address of the start of the block containing "addr", or
// else "null" if it is covered by no block. (May have side effects,
// namely updating of shared array entries that "point" too far
// backwards. This can occur, for example, when lab allocation is used
// in a space covered by the table.)
inline HeapWord* block_start(const void* addr);
// "pb" is the current value of the region's parsable_bottom.
inline HeapWord* block_start(const void* addr, HeapWord* pb);

void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
Expand Down
17 changes: 11 additions & 6 deletions src/hotspot/share/gc/g1/g1BlockOffsetTable.inline.hpp
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -32,11 +32,11 @@
#include "runtime/atomic.hpp"
#include "oops/oop.inline.hpp"

inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr) {
inline HeapWord* G1BlockOffsetTablePart::block_start(const void* addr, HeapWord* const pb) {
assert(addr >= _hr->bottom() && addr < _hr->top(), "invalid address");
HeapWord* q = block_at_or_preceding(addr);
HeapWord* n = q + block_size(q);
return forward_to_block_containing_addr(q, n, addr);
HeapWord* n = q + block_size(q, pb);
return forward_to_block_containing_addr(q, n, addr, pb);
}

u_char G1BlockOffsetTable::offset_array(size_t index) const {
Expand Down Expand Up @@ -99,6 +99,10 @@ inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p) const {
return _hr->block_size(p);
}

inline size_t G1BlockOffsetTablePart::block_size(const HeapWord* p, HeapWord* const pb) const {
return _hr->block_size(p, pb);
}

inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr) const {
#ifdef ASSERT
if (!_hr->is_continues_humongous()) {
Expand Down Expand Up @@ -126,7 +130,8 @@ inline HeapWord* G1BlockOffsetTablePart::block_at_or_preceding(const void* addr)
}

inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWord* q, HeapWord* n,
const void* addr) const {
const void* addr,
HeapWord* const pb) const {
while (n <= addr) {
// When addr is not covered by the block starting at q we need to
// step forward until we find the correct block. With the BOT
Expand All @@ -138,7 +143,7 @@ inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWo
q = n;
assert(cast_to_oop(q)->klass_or_null() != nullptr,
"start of block must be an initialized object");
n += block_size(q);
n += block_size(q, pb);
}
assert(q <= addr, "wrong order for q and addr");
assert(addr < n, "wrong order for addr and n");
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1CodeBlobClosure.cpp
Expand Up @@ -59,7 +59,7 @@ void G1CodeBlobClosure::MarkingOopClosure::do_oop_work(T* p) {
T oop_or_narrowoop = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(oop_or_narrowoop)) {
oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
_cm->mark_in_next_bitmap(_worker_id, o);
_cm->mark_in_bitmap(_worker_id, o);
}
}

Expand Down
67 changes: 26 additions & 41 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Expand Up @@ -285,8 +285,6 @@ G1CollectedHeap::humongous_obj_allocate_initialize_regions(HeapRegion* first_hr,
assert(hr->bottom() < obj_top && obj_top <= hr->end(),
"obj_top should be in last region");

_verifier->check_bitmaps("Humongous Region Allocation", first_hr);

assert(words_not_fillable == 0 ||
first_hr->bottom() + word_size_sum - words_not_fillable == hr->top(),
"Miscalculation in humongous allocation");
Expand Down Expand Up @@ -436,7 +434,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {

if (should_try_gc) {
GCCause::Cause gc_cause = preventive_collection_required ? GCCause::_g1_preventive_collection
: GCCause::_g1_inc_collection_pause;
: GCCause::_g1_inc_collection_pause;
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded, gc_cause);
if (result != NULL) {
Expand Down Expand Up @@ -985,7 +983,7 @@ void G1CollectedHeap::print_heap_after_full_collection() {
}
}

void G1CollectedHeap::abort_concurrent_cycle() {
bool G1CollectedHeap::abort_concurrent_cycle() {
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
Expand All @@ -1002,7 +1000,7 @@ void G1CollectedHeap::abort_concurrent_cycle() {

// Abandon current iterations of concurrent marking and concurrent
// refinement, if any are in progress.
concurrent_mark()->concurrent_cycle_abort();
return concurrent_mark()->concurrent_cycle_abort();
}

void G1CollectedHeap::prepare_heap_for_full_collection() {
Expand All @@ -1027,7 +1025,7 @@ void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
}
_verifier->verify_region_sets_optional();
_verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
_verifier->check_bitmaps("Full GC Start");
_verifier->verify_bitmap_clear(true /* above_tams_only */);
}

void G1CollectedHeap::prepare_heap_for_mutators() {
Expand Down Expand Up @@ -1076,9 +1074,7 @@ void G1CollectedHeap::verify_after_full_collection() {
_hrm.verify_optional();
_verifier->verify_region_sets_optional();
_verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);

// This call implicitly verifies that the next bitmap is clear after Full GC.
_verifier->check_bitmaps("Full GC End");
_verifier->verify_bitmap_clear(false /* above_tams_only */);

// At this point there should be no regions in the
// entire heap tagged as young.
Expand Down Expand Up @@ -1627,7 +1623,7 @@ jint G1CollectedHeap::initialize() {
heap_rs.size());
heap_storage->set_mapping_changed_listener(&_listener);

// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmaps.
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmap.
G1RegionToSpaceMapper* bot_storage =
create_aux_memory_mapper("Block Offset Table",
G1BlockOffsetTable::compute_size(heap_rs.size() / HeapWordSize),
Expand All @@ -1644,12 +1640,10 @@ jint G1CollectedHeap::initialize() {
G1CardCounts::heap_map_factor());

size_t bitmap_size = G1CMBitMap::compute_size(heap_rs.size());
G1RegionToSpaceMapper* prev_bitmap_storage =
create_aux_memory_mapper("Prev Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
G1RegionToSpaceMapper* next_bitmap_storage =
create_aux_memory_mapper("Next Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
G1RegionToSpaceMapper* bitmap_storage =
create_aux_memory_mapper("Mark Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());

_hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_hrm.initialize(heap_storage, bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
_card_table->initialize(cardtable_storage);

// Do later initialization work for concurrent refinement.
Expand Down Expand Up @@ -1695,7 +1689,7 @@ jint G1CollectedHeap::initialize() {

// Create the G1ConcurrentMark data structure and thread.
// (Must do this late, so that "max_[reserved_]regions" is defined.)
_cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
_cm = new G1ConcurrentMark(this, bitmap_storage);
_cm_thread = _cm->cm_thread();

// Now expand into the initial heap size.
Expand Down Expand Up @@ -2352,12 +2346,12 @@ HeapWord* G1CollectedHeap::block_start(const void* addr) const {
if (addr >= hr->top()) {
return nullptr;
}
return hr->block_start(addr);
return hr->block_start(addr, hr->parsable_bottom_acquire());
}

bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
HeapRegion* hr = heap_region_containing(addr);
return hr->block_is_obj(addr);
return hr->block_is_obj(addr, hr->parsable_bottom_acquire());
}

size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
Expand Down Expand Up @@ -2412,19 +2406,19 @@ bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
const HeapRegion* hr,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption::G1UsePrevMarking: return is_obj_dead(obj, hr);
case VerifyOption::G1UseConcMarking: return is_obj_dead(obj, hr);
case VerifyOption::G1UseFullMarking: return is_obj_dead_full(obj, hr);
default: ShouldNotReachHere();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}

bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
const VerifyOption vo) const {
switch (vo) {
case VerifyOption::G1UsePrevMarking: return is_obj_dead(obj);
case VerifyOption::G1UseConcMarking: return is_obj_dead(obj);
case VerifyOption::G1UseFullMarking: return is_obj_dead_full(obj);
default: ShouldNotReachHere();
default: ShouldNotReachHere();
}
return false; // keep some compilers happy
}
Expand Down Expand Up @@ -2472,7 +2466,8 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const {
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, "
"OA=open archive, CA=closed archive, "
"TAMS=top-at-mark-start (previous, next)");
"TAMS=top-at-mark-start, "
"PB=parsable bottom");
PrintRegionClosure blk(st);
heap_region_iterate(&blk);
}
Expand Down Expand Up @@ -2756,7 +2751,6 @@ void G1CollectedHeap::verify_before_young_collection(G1HeapVerifier::G1VerifyTyp
heap_region_iterate(&v_cl);
}
_verifier->verify_before_gc(type);
_verifier->check_bitmaps("GC Start");
verify_numa_regions("GC Start");
phase_times()->record_verify_before_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
}
Expand All @@ -2772,7 +2766,6 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
heap_region_iterate(&v_cl);
}
_verifier->verify_after_gc(type);
_verifier->check_bitmaps("GC End");
verify_numa_regions("GC End");
_verifier->verify_region_sets_optional();
phase_times()->record_verify_after_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
Expand Down Expand Up @@ -2885,6 +2878,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper(double target_paus
// without its logging output interfering with the logging output
// that came from the pause.
if (should_start_concurrent_mark_operation) {
verifier()->verify_bitmap_clear(true /* above_tams_only */);
// CAUTION: after the start_concurrent_cycle() call below, the concurrent marking
// thread(s) could be running concurrently with us. Make sure that anything
// after this point does not assume that we are the only GC thread running.
Expand Down Expand Up @@ -2916,7 +2910,7 @@ void G1CollectedHeap::make_pending_list_reachable() {
oop pll_head = Universe::reference_pending_list();
if (pll_head != NULL) {
// Any valid worker id is fine here as we are in the VM thread and single-threaded.
_cm->mark_in_next_bitmap(0 /* worker_id */, pll_head);
_cm->mark_in_bitmap(0 /* worker_id */, pll_head);
}
}
}
Expand Down Expand Up @@ -2947,20 +2941,15 @@ void G1CollectedHeap::record_obj_copy_mem_stats() {
create_g1_evac_summary(&_old_evac_stats));
}

void G1CollectedHeap::clear_prev_bitmap_for_region(HeapRegion* hr) {
MemRegion mr(hr->bottom(), hr->end());
concurrent_mark()->clear_range_in_prev_bitmap(mr);
void G1CollectedHeap::clear_bitmap_for_region(HeapRegion* hr) {
concurrent_mark()->clear_bitmap_for_region(hr);
}

void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");

if (G1VerifyBitmaps) {
clear_prev_bitmap_for_region(hr);
}

// Clear the card counts for this region.
// Note: we only need to do this if the region is not young
// (since we don't refine cards in young regions).
Expand Down Expand Up @@ -3208,7 +3197,6 @@ HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
if (new_alloc_region != NULL) {
set_region_short_lived_locked(new_alloc_region);
_hr_printer.alloc(new_alloc_region, !should_allocate);
_verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
_policy->remset_tracker()->update_at_allocate(new_alloc_region);
return new_alloc_region;
}
Expand Down Expand Up @@ -3265,11 +3253,9 @@ HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegionA
if (type.is_survivor()) {
new_alloc_region->set_survivor();
_survivor.add(new_alloc_region);
_verifier->check_bitmaps("Survivor Region Allocation", new_alloc_region);
register_new_survivor_region_with_region_attr(new_alloc_region);
} else {
new_alloc_region->set_old();
_verifier->check_bitmaps("Old Region Allocation", new_alloc_region);
}
_policy->remset_tracker()->update_at_allocate(new_alloc_region);
register_region_with_region_attr(new_alloc_region);
Expand All @@ -3292,7 +3278,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,

bool const during_im = collector_state()->in_concurrent_start_gc();
if (during_im && allocated_bytes > 0) {
_cm->root_regions()->add(alloc_region->next_top_at_mark_start(), alloc_region->top());
_cm->root_regions()->add(alloc_region->top_at_mark_start(), alloc_region->top());
}
_hr_printer.retire(alloc_region);
}
Expand All @@ -3313,13 +3299,12 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() {

void G1CollectedHeap::mark_evac_failure_object(const oop obj) const {
// All objects failing evacuation are live. What we'll do is
// that we'll update the prev marking info so that they are
// all under PTAMS and explicitly marked.
_cm->par_mark_in_prev_bitmap(obj);
// that we'll update the marking info so that they are
// all below TAMS and explicitly marked.
_cm->raw_mark_in_bitmap(obj);
}

// Optimized nmethod scanning

class RegisterNMethodOopClosure: public OopClosure {
G1CollectedHeap* _g1h;
nmethod* _nm;
Expand Down

1 comment on commit 95e3190

@openjdk-notifier
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please sign in to comment.