@@ -89,9 +89,7 @@ size_t ShenandoahGenerationalHeap::calculate_min_plab() {
89
89
90
90
size_t ShenandoahGenerationalHeap::calculate_max_plab () {
91
91
size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words ();
92
- return ((ShenandoahMaxEvacLABRatio > 0 )?
93
- align_down (MIN2 (MaxTLABSizeWords, PLAB::min_size () * ShenandoahMaxEvacLABRatio), CardTable::card_size_in_words ()):
94
- align_down (MaxTLABSizeWords, CardTable::card_size_in_words ()));
92
+ return align_down (MaxTLABSizeWords, CardTable::card_size_in_words ());
95
93
}
96
94
97
95
// Returns size in bytes
@@ -395,51 +393,50 @@ HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, si
395
393
396
394
assert (mode ()->is_generational (), " PLABs only relevant to generational GC" );
397
395
const size_t plab_min_size = this ->plab_min_size ();
396
+ // PLABs are aligned to card boundaries to avoid synchronization with concurrent
397
+ // allocations in other PLABs.
398
398
const size_t min_size = (size > plab_min_size)? align_up (size, CardTable::card_size_in_words ()): plab_min_size;
399
399
400
- // Figure out size of new PLAB, looking back at heuristics. Expand aggressively. PLABs must align on size
401
- // of card table in order to avoid the need for synchronization when registering newly allocated objects within
402
- // the card table.
400
+ // Figure out size of new PLAB, using value determined at last refill.
403
401
size_t cur_size = ShenandoahThreadLocalData::plab_size (thread);
404
402
if (cur_size == 0 ) {
405
403
cur_size = plab_min_size;
406
404
}
407
405
408
- // Limit growth of PLABs to the smaller of ShenandoahMaxEvacLABRatio * the minimum size and ShenandoahHumongousThreshold.
409
- // This minimum value is represented by generational_heap->plab_max_size(). Enforcing this limit enables more equitable
410
- // distribution of available evacuation budget between the many threads that are coordinating in the evacuation effort.
406
+ // Expand aggressively, doubling at each refill in this epoch, ceiling at plab_max_size()
411
407
size_t future_size = MIN2 (cur_size * 2 , plab_max_size ());
412
- assert (is_aligned (future_size, CardTable::card_size_in_words ()), " Align by design, future_size: " SIZE_FORMAT
413
- " , alignment: " SIZE_FORMAT " , cur_size: " SIZE_FORMAT " , max: " SIZE_FORMAT,
408
+ // Doubling, starting at a card-multiple, should give us a card-multiple. (Ceiling and floor
409
+ // are card multiples.)
410
+ assert (is_aligned (future_size, CardTable::card_size_in_words ()), " Card multiple by construction, future_size: " SIZE_FORMAT
411
+ " , card_size: " SIZE_FORMAT " , cur_size: " SIZE_FORMAT " , max: " SIZE_FORMAT,
414
412
future_size, (size_t ) CardTable::card_size_in_words (), cur_size, plab_max_size ());
415
413
416
414
// Record new heuristic value even if we take any shortcut. This captures
417
415
// the case when moderately-sized objects always take a shortcut. At some point,
418
416
// heuristics should catch up with them. Note that the requested cur_size may
419
417
// not be honored, but we remember that this is the preferred size.
418
+ log_debug (gc, free)(" Set new PLAB size: " SIZE_FORMAT, future_size);
420
419
ShenandoahThreadLocalData::set_plab_size (thread, future_size);
421
420
if (cur_size < size) {
422
421
// The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
423
422
// This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
423
+ log_debug (gc, free)(" Current PLAB size (" SIZE_FORMAT " ) is too small for " SIZE_FORMAT, cur_size, size);
424
424
return nullptr ;
425
425
}
426
426
427
427
// Retire current PLAB, and allocate a new one.
428
428
PLAB* plab = ShenandoahThreadLocalData::plab (thread);
429
429
if (plab->words_remaining () < plab_min_size) {
430
- // Retire current PLAB, and allocate a new one.
431
- // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock. This
432
- // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
433
- // aligned with the start of a card's memory range.
430
+ // Retire current PLAB. This takes care of any PLAB book-keeping.
431
+ // retire_plab() registers the remnant filler object with the remembered set scanner without a lock.
432
+ // Since PLABs are card-aligned, concurrent registrations in other PLABs don't interfere.
434
433
retire_plab (plab, thread);
435
434
436
435
size_t actual_size = 0 ;
437
- // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
438
- // less than the remaining evacuation need. It also adjusts plab_preallocated and expend_promoted if appropriate.
439
436
HeapWord* plab_buf = allocate_new_plab (min_size, cur_size, &actual_size);
440
437
if (plab_buf == nullptr ) {
441
438
if (min_size == plab_min_size) {
442
- // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size. This allows us
439
+ // Disable PLAB promotions for this thread because we cannot even allocate a minimal PLAB. This allows us
443
440
// to fail faster on subsequent promotion attempts.
444
441
ShenandoahThreadLocalData::disable_plab_promotions (thread);
445
442
}
@@ -468,7 +465,7 @@ HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, si
468
465
}
469
466
return plab->allocate (size);
470
467
} else {
471
- // If there's still at least min_size() words available within the current plab, don't retire it. Let's gnaw
468
+ // If there's still at least min_size() words available within the current plab, don't retire it. Let's nibble
472
469
// away on this plab as long as we can. Meanwhile, return nullptr to force this particular allocation request
473
470
// to be satisfied with a shared allocation. By packing more promotions into the previously allocated PLAB, we
474
471
// reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
0 commit comments