Skip to content

Commit b32e4a6

Browse files
Xiaolong Pengshipilev
Xiaolong Peng
authored andcommittedJul 11, 2024
8335356: Shenandoah: Improve concurrent cleanup locking
Reviewed-by: ysr, shade
1 parent 62cbf70 commit b32e4a6

File tree

2 files changed

+17
-4
lines changed

2 files changed

+17
-4
lines changed
 

‎src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

+16-4
Original file line numberDiff line numberDiff line change
@@ -577,6 +577,7 @@ void ShenandoahRegionPartitions::assert_bounds() {
577577
ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
578578
_heap(heap),
579579
_partitions(max_regions, this),
580+
_trash_regions(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, max_regions, mtGC)),
580581
_right_to_left_bias(false),
581582
_alloc_bias_weight(0)
582583
{
@@ -899,7 +900,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
899900
return _heap->get_region(beg)->bottom();
900901
}
901902

902-
void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
903+
void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion* r) {
903904
if (r->is_trash()) {
904905
_heap->decrease_used(r->used());
905906
r->recycle();
@@ -910,13 +911,24 @@ void ShenandoahFreeSet::recycle_trash() {
910911
// lock is not reentrable, check we don't have it
911912
shenandoah_assert_not_heaplocked();
912913

914+
size_t count = 0;
913915
for (size_t i = 0; i < _heap->num_regions(); i++) {
914916
ShenandoahHeapRegion* r = _heap->get_region(i);
915917
if (r->is_trash()) {
916-
ShenandoahHeapLocker locker(_heap->lock());
917-
try_recycle_trashed(r);
918+
_trash_regions[count++] = r;
919+
}
920+
}
921+
922+
// Relinquish the lock after this much time passed.
923+
static constexpr jlong deadline_ns = 30000; // 30 us
924+
size_t idx = 0;
925+
while (idx < count) {
926+
os::naked_yield(); // Yield to allow allocators to take the lock
927+
ShenandoahHeapLocker locker(_heap->lock());
928+
const jlong deadline = os::javaTimeNanos() + deadline_ns;
929+
while (idx < count && os::javaTimeNanos() < deadline) {
930+
try_recycle_trashed(_trash_regions[idx++]);
918931
}
919-
SpinPause(); // allow allocators to take the lock
920932
}
921933
}
922934

‎src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -258,6 +258,7 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> {
258258
private:
259259
ShenandoahHeap* const _heap;
260260
ShenandoahRegionPartitions _partitions;
261+
ShenandoahHeapRegion** _trash_regions;
261262

262263
// Mutator allocations are biased from left-to-right or from right-to-left based on which end of mutator range
263264
// is most likely to hold partially used regions. In general, we want to finish consuming partially used

0 commit comments

Comments
 (0)
Please sign in to comment.