|
41 | 41 | #include "runtime/globals_extension.hpp"
|
42 | 42 | #include "utilities/quickSort.hpp"
|
43 | 43 |
|
| 44 | +inline void assert_no_in_place_promotions() { |
| 45 | +#ifdef ASSERT |
| 46 | + class ShenandoahNoInPlacePromotions : public ShenandoahHeapRegionClosure { |
| 47 | + public: |
| 48 | + void heap_region_do(ShenandoahHeapRegion *r) override { |
| 49 | + assert(r->get_top_before_promote() == nullptr, |
| 50 | + "Region " SIZE_FORMAT " should not be ready for in-place promotion", r->index()); |
| 51 | + } |
| 52 | + } cl; |
| 53 | + ShenandoahHeap::heap()->heap_region_iterate(&cl); |
| 54 | +#endif |
| 55 | +} |
| 56 | + |
| 57 | + |
44 | 58 | // sort by decreasing garbage (so most garbage comes first)
|
45 | 59 | int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
|
46 | 60 | if (a._u._garbage > b._u._garbage)
|
@@ -125,6 +139,10 @@ static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
|
125 | 139 | // Returns bytes of old-gen memory consumed by selected aged regions
|
126 | 140 | size_t ShenandoahHeuristics::select_aged_regions(size_t old_available, size_t num_regions,
|
127 | 141 | bool candidate_regions_for_promotion_by_copy[]) {
|
| 142 | + |
| 143 | + // There should be no regions configured for subsequent in-place-promotions carried over from the previous cycle. |
| 144 | + assert_no_in_place_promotions(); |
| 145 | + |
128 | 146 | ShenandoahHeap* heap = ShenandoahHeap::heap();
|
129 | 147 | assert(heap->mode()->is_generational(), "Only in generational mode");
|
130 | 148 | ShenandoahMarkingContext* const ctx = heap->marking_context();
|
@@ -153,22 +171,27 @@ size_t ShenandoahHeuristics::select_aged_regions(size_t old_available, size_t nu
|
153 | 171 | continue;
|
154 | 172 | }
|
155 | 173 | if (r->age() >= InitialTenuringThreshold) {
|
156 |
| - r->save_top_before_promote(); |
157 | 174 | if ((r->garbage() < old_garbage_threshold)) {
|
158 | 175 | HeapWord* tams = ctx->top_at_mark_start(r);
|
159 | 176 | HeapWord* original_top = r->top();
|
160 | 177 | if (tams == original_top) {
|
161 |
| - // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, |
162 |
| - // newly allocated objects will not be parseable when promote in place tries to register them. Furthermore, any |
163 |
| - // new allocations would not necessarily be eligible for promotion. This addresses both issues. |
| 178 | + // No allocations from this region have been made during concurrent mark. It meets all the criteria |
| 179 | + // for in-place-promotion. Though we only need the value of top when we fill the end of the region, |
| 180 | + // we use this field to indicate that this region should be promoted in place during the evacuation |
| 181 | + // phase. |
| 182 | + r->save_top_before_promote(); |
| 183 | + |
164 | 184 | size_t remnant_size = r->free() / HeapWordSize;
|
165 | 185 | if (remnant_size > ShenandoahHeap::min_fill_size()) {
|
166 | 186 | ShenandoahHeap::fill_with_object(original_top, remnant_size);
|
| 187 | + // Fill the remnant memory within this region to assure no allocations prior to promote in place. Otherwise, |
| 188 | + // newly allocated objects will not be parseable when promote in place tries to register them. Furthermore, any |
| 189 | + // new allocations would not necessarily be eligible for promotion. This addresses both issues. |
167 | 190 | r->set_top(r->end());
|
168 | 191 | promote_in_place_pad += remnant_size * HeapWordSize;
|
169 | 192 | } else {
|
170 | 193 | // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
|
171 |
| - // allocations occuring within this region before the region is promoted in place. |
| 194 | + // allocations occurring within this region before the region is promoted in place. |
172 | 195 | }
|
173 | 196 | promote_in_place_regions++;
|
174 | 197 | promote_in_place_live += r->get_live_data_bytes();
|
@@ -319,7 +342,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
|
319 | 342 | // ShenandoahOldGarbageThreshold so it will be promoted in place, or because there is not sufficient room
|
320 | 343 | // in old gen to hold the evacuated copies of this region's live data. In both cases, we choose not to
|
321 | 344 | // place this region into the collection set.
|
322 |
| - if (region->garbage_before_padded_for_promote() < old_garbage_threshold) { |
| 345 | + if (region->get_top_before_promote() != nullptr) { |
323 | 346 | regular_regions_promoted_in_place++;
|
324 | 347 | regular_regions_promoted_usage += region->used_before_promote();
|
325 | 348 | }
|
|
0 commit comments