@@ -826,15 +826,21 @@ void PSParallelCompact::fill_dense_prefix_end(SpaceId id) {
826
826
}
827
827
}
828
828
829
- bool PSParallelCompact::reassess_maximum_compaction (bool maximum_compaction,
830
- size_t total_live_words,
831
- MutableSpace* const old_space,
832
- HeapWord* full_region_prefix_end) {
829
+ bool PSParallelCompact::check_maximum_compaction (size_t total_live_words,
830
+ MutableSpace* const old_space,
831
+ HeapWord* full_region_prefix_end) {
832
+
833
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap ();
834
+
835
+ // Check System.GC
836
+ bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
837
+ && GCCause::is_user_requested_gc (heap->gc_cause ());
838
+
833
839
// Check if all live objs are larger than old-gen.
834
840
const bool is_old_gen_overflowing = (total_live_words > old_space->capacity_in_words ());
835
841
836
842
// JVM flags
837
- const uint total_invocations = ParallelScavengeHeap:: heap() ->total_full_collections ();
843
+ const uint total_invocations = heap->total_full_collections ();
838
844
assert (total_invocations >= _maximum_compaction_gc_num, " sanity" );
839
845
const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
840
846
const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
@@ -843,15 +849,15 @@ bool PSParallelCompact::reassess_maximum_compaction(bool maximum_compaction,
843
849
const bool is_region_full =
844
850
full_region_prefix_end >= _summary_data.region_align_down (old_space->top ());
845
851
846
- if (maximum_compaction || is_old_gen_overflowing || is_interval_ended || is_region_full) {
852
+ if (is_max_on_system_gc || is_old_gen_overflowing || is_interval_ended || is_region_full) {
847
853
_maximum_compaction_gc_num = total_invocations;
848
854
return true ;
849
855
}
850
856
851
857
return false ;
852
858
}
853
859
854
- void PSParallelCompact::summary_phase (bool maximum_compaction )
860
+ void PSParallelCompact::summary_phase ()
855
861
{
856
862
GCTraceTime (Info, gc, phases) tm (" Summary Phase" , &_gc_timer);
857
863
@@ -874,10 +880,9 @@ void PSParallelCompact::summary_phase(bool maximum_compaction)
874
880
_space_info[i].set_dense_prefix (space->bottom ());
875
881
}
876
882
877
- maximum_compaction = reassess_maximum_compaction (maximum_compaction,
878
- total_live_words,
879
- old_space,
880
- full_region_prefix_end);
883
+ bool maximum_compaction = check_maximum_compaction (total_live_words,
884
+ old_space,
885
+ full_region_prefix_end);
881
886
HeapWord* dense_prefix_end =
882
887
maximum_compaction ? full_region_prefix_end
883
888
: compute_dense_prefix_for_old_space (old_space,
@@ -958,26 +963,23 @@ void PSParallelCompact::summary_phase(bool maximum_compaction)
958
963
// may be true because this method can be called without intervening
959
964
// activity. For example when the heap space is tight and full measure
960
965
// are being taken to free space.
961
- bool PSParallelCompact::invoke (bool maximum_heap_compaction ) {
966
+ bool PSParallelCompact::invoke (bool clear_all_soft_refs ) {
962
967
assert (SafepointSynchronize::is_at_safepoint (), " should be at safepoint" );
963
968
assert (Thread::current () == (Thread*)VMThread::vm_thread (),
964
969
" should be in vm thread" );
965
970
966
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap ();
967
- assert (!heap->is_stw_gc_active (), " not reentrant" );
968
-
969
971
IsSTWGCActiveMark mark;
970
972
971
- const bool clear_all_soft_refs =
972
- heap->soft_ref_policy ()->should_clear_all_soft_refs ();
973
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap ();
974
+ clear_all_soft_refs = clear_all_soft_refs
975
+ || heap->soft_ref_policy ()->should_clear_all_soft_refs ();
973
976
974
- return PSParallelCompact::invoke_no_policy (clear_all_soft_refs ||
975
- maximum_heap_compaction);
977
+ return PSParallelCompact::invoke_no_policy (clear_all_soft_refs);
976
978
}
977
979
978
980
// This method contains no policy. You should probably
979
981
// be calling invoke() instead.
980
- bool PSParallelCompact::invoke_no_policy (bool maximum_heap_compaction ) {
982
+ bool PSParallelCompact::invoke_no_policy (bool clear_all_soft_refs ) {
981
983
assert (SafepointSynchronize::is_at_safepoint (), " must be at a safepoint" );
982
984
assert (ref_processor () != nullptr , " Sanity" );
983
985
@@ -998,7 +1000,7 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
998
1000
999
1001
// The scope of casr should end after code that can change
1000
1002
// SoftRefPolicy::_should_clear_all_soft_refs.
1001
- ClearedAllSoftRefs casr (maximum_heap_compaction ,
1003
+ ClearedAllSoftRefs casr (clear_all_soft_refs ,
1002
1004
heap->soft_ref_policy ());
1003
1005
1004
1006
// Make sure data structures are sane, make the heap parsable, and do other
@@ -1033,17 +1035,15 @@ bool PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
1033
1035
DerivedPointerTable::clear ();
1034
1036
#endif
1035
1037
1036
- ref_processor ()->start_discovery (maximum_heap_compaction );
1038
+ ref_processor ()->start_discovery (clear_all_soft_refs );
1037
1039
1038
1040
ClassUnloadingContext ctx (1 /* num_nmethod_unlink_workers */ ,
1039
1041
false /* unregister_nmethods_during_purge */ ,
1040
1042
false /* lock_nmethod_free_separately */ );
1041
1043
1042
1044
marking_phase (&_gc_tracer);
1043
1045
1044
- bool max_on_system_gc = UseMaximumCompactionOnSystemGC
1045
- && GCCause::is_user_requested_gc (gc_cause);
1046
- summary_phase (maximum_heap_compaction || max_on_system_gc);
1046
+ summary_phase ();
1047
1047
1048
1048
#if COMPILER2_OR_JVMCI
1049
1049
assert (DerivedPointerTable::is_active (), " Sanity" );
0 commit comments