@@ -129,7 +129,7 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
129
129
// is done by clients of this interface.)
130
130
131
131
void G1RegionMappingChangedListener::reset_from_card_cache (uint start_idx, size_t num_regions) {
132
- HeapRegionRemSet ::invalidate_from_card_cache (start_idx, num_regions);
132
+ G1HeapRegionRemSet ::invalidate_from_card_cache (start_idx, num_regions);
133
133
}
134
134
135
135
void G1RegionMappingChangedListener::on_commit (uint start_idx, size_t num_regions, bool zero_filled) {
@@ -162,7 +162,7 @@ G1HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
162
162
// Private methods.
163
163
164
164
G1HeapRegion* G1CollectedHeap::new_region (size_t word_size,
165
- HeapRegionType type,
165
+ G1HeapRegionType type,
166
166
bool do_expand,
167
167
uint node_index) {
168
168
assert (!is_humongous (word_size) || word_size <= G1HeapRegion::GrainWords,
@@ -710,7 +710,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
710
710
ShouldNotReachHere ();
711
711
}
712
712
713
- class PostCompactionPrinterClosure : public HeapRegionClosure {
713
+ class PostCompactionPrinterClosure : public G1HeapRegionClosure {
714
714
public:
715
715
bool do_heap_region (G1HeapRegion* hr) {
716
716
assert (!hr->is_young (), " not expecting to find young regions" );
@@ -1070,7 +1070,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
1070
1070
_verifier->verify_region_sets_optional ();
1071
1071
}
1072
1072
1073
- class OldRegionSetChecker : public HeapRegionSetChecker {
1073
+ class OldRegionSetChecker : public G1HeapRegionSetChecker {
1074
1074
public:
1075
1075
void check_mt_safety () {
1076
1076
// Master Old Set MT safety protocol:
@@ -1098,7 +1098,7 @@ class OldRegionSetChecker : public HeapRegionSetChecker {
1098
1098
const char * get_description () { return " Old Regions" ; }
1099
1099
};
1100
1100
1101
- class HumongousRegionSetChecker : public HeapRegionSetChecker {
1101
+ class HumongousRegionSetChecker : public G1HeapRegionSetChecker {
1102
1102
public:
1103
1103
void check_mt_safety () {
1104
1104
// Humongous Set MT safety protocol:
@@ -1352,9 +1352,9 @@ jint G1CollectedHeap::initialize() {
1352
1352
guarantee (G1HeapRegion::CardsPerRegion < max_cards_per_region,
1353
1353
" too many cards per region" );
1354
1354
1355
- HeapRegionRemSet ::initialize (_reserved);
1355
+ G1HeapRegionRemSet ::initialize (_reserved);
1356
1356
1357
- FreeRegionList ::set_unrealistically_long_length (max_regions () + 1 );
1357
+ G1FreeRegionList ::set_unrealistically_long_length (max_regions () + 1 );
1358
1358
1359
1359
_bot = new G1BlockOffsetTable (reserved (), bot_storage);
1360
1360
@@ -1536,7 +1536,7 @@ size_t G1CollectedHeap::used_unlocked() const {
1536
1536
return _summary_bytes_used;
1537
1537
}
1538
1538
1539
- class SumUsedClosure : public HeapRegionClosure {
1539
+ class SumUsedClosure : public G1HeapRegionClosure {
1540
1540
size_t _used;
1541
1541
public:
1542
1542
SumUsedClosure () : _used(0 ) {}
@@ -1887,7 +1887,7 @@ bool G1CollectedHeap::is_in(const void* p) const {
1887
1887
1888
1888
// Iterates an ObjectClosure over all objects within a G1HeapRegion.
1889
1889
1890
- class IterateObjectClosureRegionClosure : public HeapRegionClosure {
1890
+ class IterateObjectClosureRegionClosure : public G1HeapRegionClosure {
1891
1891
ObjectClosure* _cl;
1892
1892
public:
1893
1893
IterateObjectClosureRegionClosure (ObjectClosure* cl) : _cl(cl) {}
@@ -1907,7 +1907,7 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
1907
1907
class G1ParallelObjectIterator : public ParallelObjectIteratorImpl {
1908
1908
private:
1909
1909
G1CollectedHeap* _heap;
1910
- HeapRegionClaimer _claimer;
1910
+ G1HeapRegionClaimer _claimer;
1911
1911
1912
1912
public:
1913
1913
G1ParallelObjectIterator (uint thread_num) :
@@ -1923,7 +1923,7 @@ ParallelObjectIteratorImpl* G1CollectedHeap::parallel_object_iterator(uint threa
1923
1923
return new G1ParallelObjectIterator (thread_num);
1924
1924
}
1925
1925
1926
- void G1CollectedHeap::object_iterate_parallel (ObjectClosure* cl, uint worker_id, HeapRegionClaimer * claimer) {
1926
+ void G1CollectedHeap::object_iterate_parallel (ObjectClosure* cl, uint worker_id, G1HeapRegionClaimer * claimer) {
1927
1927
IterateObjectClosureRegionClosure blk (cl);
1928
1928
heap_region_par_iterate_from_worker_offset (&blk, claimer, worker_id);
1929
1929
}
@@ -1932,43 +1932,43 @@ void G1CollectedHeap::keep_alive(oop obj) {
1932
1932
G1BarrierSet::enqueue_preloaded (obj);
1933
1933
}
1934
1934
1935
- void G1CollectedHeap::heap_region_iterate (HeapRegionClosure * cl) const {
1935
+ void G1CollectedHeap::heap_region_iterate (G1HeapRegionClosure * cl) const {
1936
1936
_hrm.iterate(cl);
1937
1937
}
1938
1938
1939
- void G1CollectedHeap::heap_region_iterate (HeapRegionIndexClosure * cl) const {
1939
+ void G1CollectedHeap::heap_region_iterate (G1HeapRegionIndexClosure * cl) const {
1940
1940
_hrm.iterate(cl);
1941
1941
}
1942
1942
1943
- void G1CollectedHeap::heap_region_par_iterate_from_worker_offset (HeapRegionClosure * cl,
1944
- HeapRegionClaimer *hrclaimer,
1943
+ void G1CollectedHeap::heap_region_par_iterate_from_worker_offset (G1HeapRegionClosure * cl,
1944
+ G1HeapRegionClaimer *hrclaimer,
1945
1945
uint worker_id) const {
1946
1946
_hrm.par_iterate (cl, hrclaimer, hrclaimer->offset_for_worker (worker_id));
1947
1947
}
1948
1948
1949
- void G1CollectedHeap::heap_region_par_iterate_from_start (HeapRegionClosure * cl,
1950
- HeapRegionClaimer *hrclaimer) const {
1949
+ void G1CollectedHeap::heap_region_par_iterate_from_start (G1HeapRegionClosure * cl,
1950
+ G1HeapRegionClaimer *hrclaimer) const {
1951
1951
_hrm.par_iterate (cl, hrclaimer, 0 );
1952
1952
}
1953
1953
1954
- void G1CollectedHeap::collection_set_iterate_all (HeapRegionClosure * cl) {
1954
+ void G1CollectedHeap::collection_set_iterate_all (G1HeapRegionClosure * cl) {
1955
1955
_collection_set.iterate(cl);
1956
1956
}
1957
1957
1958
- void G1CollectedHeap::collection_set_par_iterate_all (HeapRegionClosure * cl,
1959
- HeapRegionClaimer * hr_claimer,
1958
+ void G1CollectedHeap::collection_set_par_iterate_all (G1HeapRegionClosure * cl,
1959
+ G1HeapRegionClaimer * hr_claimer,
1960
1960
uint worker_id) {
1961
1961
_collection_set.par_iterate (cl, hr_claimer, worker_id);
1962
1962
}
1963
1963
1964
- void G1CollectedHeap::collection_set_iterate_increment_from (HeapRegionClosure *cl,
1965
- HeapRegionClaimer * hr_claimer,
1964
+ void G1CollectedHeap::collection_set_iterate_increment_from (G1HeapRegionClosure *cl,
1965
+ G1HeapRegionClaimer * hr_claimer,
1966
1966
uint worker_id) {
1967
1967
_collection_set.iterate_incremental_part_from (cl, hr_claimer, worker_id);
1968
1968
}
1969
1969
1970
- void G1CollectedHeap::par_iterate_regions_array (HeapRegionClosure * cl,
1971
- HeapRegionClaimer * hr_claimer,
1970
+ void G1CollectedHeap::par_iterate_regions_array (G1HeapRegionClosure * cl,
1971
+ G1HeapRegionClaimer * hr_claimer,
1972
1972
const uint regions[],
1973
1973
size_t length,
1974
1974
uint worker_id) const {
@@ -2046,10 +2046,10 @@ bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
2046
2046
return true ;
2047
2047
}
2048
2048
2049
- class PrintRegionClosure : public HeapRegionClosure {
2049
+ class G1PrintRegionClosure : public G1HeapRegionClosure {
2050
2050
outputStream* _st;
2051
2051
public:
2052
- PrintRegionClosure (outputStream* st) : _st(st) {}
2052
+ G1PrintRegionClosure (outputStream* st) : _st(st) {}
2053
2053
bool do_heap_region (G1HeapRegion* r) {
2054
2054
r->print_on (_st);
2055
2055
return false ;
@@ -2121,7 +2121,7 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const {
2121
2121
" CS=collection set, F=free, "
2122
2122
" TAMS=top-at-mark-start, "
2123
2123
" PB=parsable bottom" );
2124
- PrintRegionClosure blk (st);
2124
+ G1PrintRegionClosure blk (st);
2125
2125
heap_region_iterate (&blk);
2126
2126
}
2127
2127
@@ -2281,14 +2281,14 @@ void G1CollectedHeap::start_concurrent_cycle(bool concurrent_operation_is_full_m
2281
2281
bool G1CollectedHeap::is_potential_eager_reclaim_candidate (G1HeapRegion* r) const {
2282
2282
// We don't nominate objects with many remembered set entries, on
2283
2283
// the assumption that such objects are likely still live.
2284
- HeapRegionRemSet * rem_set = r->rem_set ();
2284
+ G1HeapRegionRemSet * rem_set = r->rem_set ();
2285
2285
2286
2286
return rem_set->occupancy_less_or_equal_than (G1EagerReclaimRemSetThreshold);
2287
2287
}
2288
2288
2289
2289
#ifndef PRODUCT
2290
2290
void G1CollectedHeap::verify_region_attr_remset_is_tracked () {
2291
- class VerifyRegionAttrRemSet : public HeapRegionClosure {
2291
+ class VerifyRegionAttrRemSet : public G1HeapRegionClosure {
2292
2292
public:
2293
2293
virtual bool do_heap_region (G1HeapRegion* r) {
2294
2294
G1CollectedHeap* g1h = G1CollectedHeap::heap ();
@@ -2538,9 +2538,9 @@ void G1CollectedHeap::unload_classes_and_code(const char* description, BoolObjec
2538
2538
}
2539
2539
2540
2540
class G1BulkUnregisterNMethodTask : public WorkerTask {
2541
- HeapRegionClaimer _hrclaimer;
2541
+ G1HeapRegionClaimer _hrclaimer;
2542
2542
2543
- class UnregisterNMethodsHeapRegionClosure : public HeapRegionClosure {
2543
+ class UnregisterNMethodsHeapRegionClosure : public G1HeapRegionClosure {
2544
2544
public:
2545
2545
2546
2546
bool do_heap_region (G1HeapRegion* hr) {
@@ -2614,7 +2614,7 @@ void G1CollectedHeap::clear_bitmap_for_region(G1HeapRegion* hr) {
2614
2614
concurrent_mark ()->clear_bitmap_for_region (hr);
2615
2615
}
2616
2616
2617
- void G1CollectedHeap::free_region (G1HeapRegion* hr, FreeRegionList * free_list) {
2617
+ void G1CollectedHeap::free_region (G1HeapRegion* hr, G1FreeRegionList * free_list) {
2618
2618
assert (!hr->is_free (), " the region should not be free" );
2619
2619
assert (!hr->is_empty (), " the region should not be empty" );
2620
2620
assert (_hrm.is_available (hr->hrm_index ()), " region should be committed" );
@@ -2636,7 +2636,7 @@ void G1CollectedHeap::retain_region(G1HeapRegion* hr) {
2636
2636
}
2637
2637
2638
2638
void G1CollectedHeap::free_humongous_region (G1HeapRegion* hr,
2639
- FreeRegionList * free_list) {
2639
+ G1FreeRegionList * free_list) {
2640
2640
assert (hr->is_humongous (), " this is only for humongous regions" );
2641
2641
hr->clear_humongous ();
2642
2642
free_region (hr, free_list);
@@ -2652,7 +2652,7 @@ void G1CollectedHeap::remove_from_old_gen_sets(const uint old_regions_removed,
2652
2652
2653
2653
}
2654
2654
2655
- void G1CollectedHeap::prepend_to_freelist (FreeRegionList * list) {
2655
+ void G1CollectedHeap::prepend_to_freelist (G1FreeRegionList * list) {
2656
2656
assert (list != nullptr , " list can't be null" );
2657
2657
if (!list->is_empty ()) {
2658
2658
MutexLocker x (FreeList_lock, Mutex::_no_safepoint_check_flag);
@@ -2678,7 +2678,7 @@ void G1CollectedHeap::rebuild_free_region_list() {
2678
2678
phase_times ()->record_total_rebuild_freelist_time_ms ((Ticks::now () - start).seconds () * 1000.0 );
2679
2679
}
2680
2680
2681
- class G1AbandonCollectionSetClosure : public HeapRegionClosure {
2681
+ class G1AbandonCollectionSetClosure : public G1HeapRegionClosure {
2682
2682
public:
2683
2683
virtual bool do_heap_region (G1HeapRegion* r) {
2684
2684
assert (r->in_collection_set (), " Region %u must have been in collection set" , r->hrm_index ());
@@ -2707,7 +2707,7 @@ void G1CollectedHeap::set_region_short_lived_locked(G1HeapRegion* hr) {
2707
2707
2708
2708
#ifdef ASSERT
2709
2709
2710
- class NoYoungRegionsClosure : public HeapRegionClosure {
2710
+ class NoYoungRegionsClosure : public G1HeapRegionClosure {
2711
2711
private:
2712
2712
bool _success;
2713
2713
public:
@@ -2768,22 +2768,22 @@ void G1CollectedHeap::set_used(size_t bytes) {
2768
2768
_summary_bytes_used = bytes;
2769
2769
}
2770
2770
2771
- class RebuildRegionSetsClosure : public HeapRegionClosure {
2771
+ class RebuildRegionSetsClosure : public G1HeapRegionClosure {
2772
2772
private:
2773
2773
bool _free_list_only;
2774
2774
2775
- HeapRegionSet * _old_set;
2776
- HeapRegionSet * _humongous_set;
2775
+ G1HeapRegionSet * _old_set;
2776
+ G1HeapRegionSet * _humongous_set;
2777
2777
2778
- HeapRegionManager * _hrm;
2778
+ G1HeapRegionManager * _hrm;
2779
2779
2780
2780
size_t _total_used;
2781
2781
2782
2782
public:
2783
2783
RebuildRegionSetsClosure (bool free_list_only,
2784
- HeapRegionSet * old_set,
2785
- HeapRegionSet * humongous_set,
2786
- HeapRegionManager * hrm) :
2784
+ G1HeapRegionSet * old_set,
2785
+ G1HeapRegionSet * humongous_set,
2786
+ G1HeapRegionManager * hrm) :
2787
2787
_free_list_only (free_list_only), _old_set(old_set),
2788
2788
_humongous_set (humongous_set), _hrm(hrm), _total_used(0 ) {
2789
2789
assert (_hrm->num_free_regions () == 0 , " pre-condition" );
@@ -2849,7 +2849,7 @@ G1HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
2849
2849
bool should_allocate = policy ()->should_allocate_mutator_region ();
2850
2850
if (should_allocate) {
2851
2851
G1HeapRegion* new_alloc_region = new_region (word_size,
2852
- HeapRegionType ::Eden,
2852
+ G1HeapRegionType ::Eden,
2853
2853
false /* do_expand */ ,
2854
2854
node_index);
2855
2855
if (new_alloc_region != nullptr ) {
@@ -2895,11 +2895,11 @@ G1HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size, G1HeapRegio
2895
2895
return nullptr ;
2896
2896
}
2897
2897
2898
- HeapRegionType type;
2898
+ G1HeapRegionType type;
2899
2899
if (dest.is_young ()) {
2900
- type = HeapRegionType ::Survivor;
2900
+ type = G1HeapRegionType ::Survivor;
2901
2901
} else {
2902
- type = HeapRegionType ::Old;
2902
+ type = G1HeapRegionType ::Old;
2903
2903
}
2904
2904
2905
2905
G1HeapRegion* new_alloc_region = new_region (word_size,
1 commit comments
openjdk-notifier[bot] commentedon Jul 5, 2024
Review
Issues