diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
index a6d2dc47f6b..aba8f693cf8 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp
@@ -30,6 +30,8 @@
 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahGeneration.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
+#include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 #include "logging/log.hpp"
 #include "logging/logTag.hpp"
@@ -56,9 +58,6 @@ const double ShenandoahAdaptiveHeuristics::HIGHEST_EXPECTED_AVAILABLE_AT_END = 0
 const double ShenandoahAdaptiveHeuristics::MINIMUM_CONFIDENCE = 0.319; // 25%
 const double ShenandoahAdaptiveHeuristics::MAXIMUM_CONFIDENCE = 3.291; // 99.9%
 
-// TODO: Provide comment here or remove if not used
-const uint ShenandoahAdaptiveHeuristics::MINIMUM_RESIZE_INTERVAL = 10;
-
 ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics(ShenandoahGeneration* generation) :
   ShenandoahHeuristics(generation),
   _margin_of_error_sd(ShenandoahAdaptiveInitialConfidence),
@@ -110,10 +109,27 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
   QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
 
   if (is_generational) {
+    for (size_t idx = 0; idx < size; idx++) {
+      ShenandoahHeapRegion* r = data[idx]._region;
+      if (cset->is_preselected(r->index())) {
+        assert(r->age() >= InitialTenuringThreshold, "Preselected regions must have tenure age");
+        // Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
+        // This region has been pre-selected and its impact on promotion reserve is already accounted for.
+
+        // r->used() is r->garbage() + r->get_live_data_bytes()
+        // Since all live data in this region is being evacuated from young-gen, it is as if this memory
+        // is garbage insofar as young-gen is concerned.  Counting this as garbage reduces the need to
+        // reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
+        // within youn-gen memory.
+
+        cur_young_garbage += r->garbage();
+        cset->add_region(r);
+      }
+    }
     if (is_global) {
       size_t max_young_cset    = (size_t) (heap->get_young_evac_reserve() / ShenandoahEvacWaste);
       size_t young_cur_cset = 0;
-      size_t max_old_cset    = (size_t) (heap->get_old_evac_reserve() / ShenandoahEvacWaste);
+      size_t max_old_cset    = (size_t) (heap->get_old_evac_reserve() / ShenandoahOldEvacWaste);
       size_t old_cur_cset = 0;
       size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_young_cset;
       size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
@@ -126,6 +142,9 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
 
       for (size_t idx = 0; idx < size; idx++) {
         ShenandoahHeapRegion* r = data[idx]._region;
+        if (cset->is_preselected(r->index())) {
+          continue;
+        }
         bool add_region = false;
         if (r->is_old()) {
           size_t new_cset = old_cur_cset + r->get_live_data_bytes();
@@ -133,17 +152,6 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
             add_region = true;
             old_cur_cset = new_cset;
           }
-        } else if (cset->is_preselected(r->index())) {
-          assert(r->age() >= InitialTenuringThreshold, "Preselected regions must have tenure age");
-          // Entire region will be promoted, This region does not impact young-gen or old-gen evacuation reserve.
-          // This region has been pre-selected and its impact on promotion reserve is already accounted for.
-          add_region = true;
-          // r->used() is r->garbage() + r->get_live_data_bytes()
-          // Since all live data in this region is being evacuated from young-gen, it is as if this memory
-          // is garbage insofar as young-gen is concerned.  Counting this as garbage reduces the need to
-          // reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
-          // within youn-gen memory.
-          cur_young_garbage += r->used();
         } else if (r->age() < InitialTenuringThreshold) {
           size_t new_cset = young_cur_cset + r->get_live_data_bytes();
           size_t region_garbage = r->garbage();
@@ -176,42 +184,29 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
 
       for (size_t idx = 0; idx < size; idx++) {
         ShenandoahHeapRegion* r = data[idx]._region;
-        bool add_region = false;
-
-        if (!r->is_old()) {
-          if (cset->is_preselected(r->index())) {
-            assert(r->age() >= InitialTenuringThreshold, "Preselected regions must have tenure age");
-            // Entire region will be promoted, This region does not impact young-gen evacuation reserve.  Memory has already
-            // been set aside to hold evacuation results as advance_promotion_reserve.
-            add_region = true;
-            // Since all live data in this region is being evacuated from young-gen, it is as if this memory
-            // is garbage insofar as young-gen is concerned.  Counting this as garbage reduces the need to
-            // reclaim highly utilized young-gen regions just for the sake of finding min_garbage to reclaim
-            // within youn-gen memory
-            cur_young_garbage += r->get_live_data_bytes();
-          } else if  (r->age() < InitialTenuringThreshold) {
-            size_t new_cset = cur_cset + r->get_live_data_bytes();
-            size_t region_garbage = r->garbage();
-            size_t new_garbage = cur_young_garbage + region_garbage;
-            bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
-            if ((new_cset <= max_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
-              add_region = true;
-              cur_cset = new_cset;
-              cur_young_garbage = new_garbage;
-            }
-          }
-          // Note that we do not add aged regions if they were not pre-selected.  The reason they were not preselected
-          // is because there is not sufficient room in old-gen to hold their to-be-promoted live objects.
-
-          if (add_region) {
+        if (cset->is_preselected(r->index())) {
+          continue;
+        }
+        if  (r->age() < InitialTenuringThreshold) {
+          size_t new_cset = cur_cset + r->get_live_data_bytes();
+          size_t region_garbage = r->garbage();
+          size_t new_garbage = cur_young_garbage + region_garbage;
+          bool add_regardless = (region_garbage > ignore_threshold) && (new_garbage < min_garbage);
+          assert(r->is_young(), "Only young candidates expected in the data array");
+          if ((new_cset <= max_cset) && (add_regardless || (region_garbage > garbage_threshold))) {
+            cur_cset = new_cset;
+            cur_young_garbage = new_garbage;
             cset->add_region(r);
           }
         }
+        // Note that we do not add aged regions if they were not pre-selected.  The reason they were not preselected
+        // is because there is not sufficient room in old-gen to hold their to-be-promoted live objects or because
+        // they are to be promoted in place.
       }
     }
   } else {
     // Traditional Shenandoah (non-generational)
-    size_t capacity    = ShenandoahHeap::heap()->soft_max_capacity();
+    size_t capacity    = ShenandoahHeap::heap()->max_capacity();
     size_t max_cset    = (size_t)((1.0 * capacity / 100 * ShenandoahEvacReserve) / ShenandoahEvacWaste);
     size_t free_target = (capacity * ShenandoahMinFreeThreshold) / 100 + max_cset;
     size_t min_garbage = (free_target > actual_free) ? (free_target - actual_free) : 0;
@@ -243,12 +238,21 @@ void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(Shenand
       }
     }
   }
+
+  size_t collected_old = cset->get_old_bytes_reserved_for_evacuation();
+  size_t collected_promoted = cset->get_young_bytes_to_be_promoted();
+  size_t collected_young = cset->get_young_bytes_reserved_for_evacuation();
+
+  log_info(gc, ergo)("Chosen CSet evacuates young: " SIZE_FORMAT "%s (of which at least: " SIZE_FORMAT "%s are to be promoted), "
+                     "old: " SIZE_FORMAT "%s",
+                     byte_size_in_proper_unit(collected_young),    proper_unit_for_byte_size(collected_young),
+                     byte_size_in_proper_unit(collected_promoted), proper_unit_for_byte_size(collected_promoted),
+                     byte_size_in_proper_unit(collected_old),      proper_unit_for_byte_size(collected_old));
 }
 
 void ShenandoahAdaptiveHeuristics::record_cycle_start() {
   ShenandoahHeuristics::record_cycle_start();
   _allocation_rate.allocation_counter_reset();
-  ++_cycles_since_last_resize;
 }
 
 void ShenandoahAdaptiveHeuristics::record_success_concurrent(bool abbreviated) {
@@ -324,6 +328,84 @@ static double saturate(double value, double min, double max) {
   return MAX2(MIN2(value, max), min);
 }
 
+// Return a conservative estimate of how much memory can be allocated before we need to start GC. The estimate is based
+// on memory that is currently available within young generation plus all of the memory that will be added to the young
+// generation at the end of the current cycle (as represented by young_regions_to_be_reclaimed) and on the anticipated
+// amount of time required to perform a GC.
+size_t ShenandoahAdaptiveHeuristics::bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_reclaimed) {
+  assert(_generation->is_young(), "Only meaningful for young-gen heuristic");
+
+  size_t max_capacity = _generation->max_capacity();
+  size_t capacity = _generation->soft_max_capacity();
+  size_t usage = _generation->used();
+  size_t available = (capacity > usage)? capacity - usage: 0;
+  size_t allocated = _generation->bytes_allocated_since_gc_start();
+
+  size_t available_young_collected = ShenandoahHeap::heap()->collection_set()->get_young_available_bytes_collected();
+  size_t anticipated_available =
+    available + young_regions_to_be_reclaimed * ShenandoahHeapRegion::region_size_bytes() - available_young_collected;
+  size_t allocation_headroom = anticipated_available;
+  size_t spike_headroom = capacity * ShenandoahAllocSpikeFactor / 100;
+  size_t penalties      = capacity * _gc_time_penalties / 100;
+
+  double rate = _allocation_rate.sample(allocated);
+
+  // At what value of available, would avg and spike triggers occur?
+  //  if allocation_headroom < avg_cycle_time * avg_alloc_rate, then we experience avg trigger
+  //  if allocation_headroom < avg_cycle_time * rate, then we experience spike trigger if is_spiking
+  //
+  // allocation_headroom =
+  //     0, if penalties > available or if penalties + spike_headroom > available
+  //     available - penalties - spike_headroom, otherwise
+  //
+  // so we trigger if available - penalties - spike_headroom < avg_cycle_time * avg_alloc_rate, which is to say
+  //                  available < avg_cycle_time * avg_alloc_rate + penalties + spike_headroom
+  //            or if available < penalties + spike_headroom
+  //
+  // since avg_cycle_time * avg_alloc_rate > 0, the first test is sufficient to test both conditions
+  //
+  // thus, evac_slack_avg is MIN2(0,  available - avg_cycle_time * avg_alloc_rate + penalties + spike_headroom)
+  //
+  // similarly, evac_slack_spiking is MIN2(0, available - avg_cycle_time * rate + penalties + spike_headroom)
+  // but evac_slack_spiking is only relevant if is_spiking, as defined below.
+
+  double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd());
+
+  // TODO: Consider making conservative adjustments to avg_cycle_time, such as: (avg_cycle_time *= 2) in cases where
+  // we expect a longer-than-normal GC duration.  This includes mixed evacuations, evacuation that perform promotion
+  // including promotion in place, and OLD GC bootstrap cycles.  It has been observed that these cycles sometimes
+  // require twice or more the duration of "normal" GC cycles.  We have experimented with this approach.  While it
+  // does appear to reduce the frequency of degenerated cycles due to late triggers, it also has the effect of reducing
+  // evacuation slack so that there is less memory available to be transferred to OLD.  The result is that we
+  // throttle promotion and it takes too long to move old objects out of the young generation.
+
+  double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd);
+  size_t evac_slack_avg;
+  if (anticipated_available > avg_cycle_time * avg_alloc_rate + penalties + spike_headroom) {
+    evac_slack_avg = anticipated_available - (avg_cycle_time * avg_alloc_rate + penalties + spike_headroom);
+  } else {
+    // we have no slack because it's already time to trigger
+    evac_slack_avg = 0;
+  }
+
+  bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd);
+  size_t evac_slack_spiking;
+  if (is_spiking) {
+    if (anticipated_available > avg_cycle_time * rate + penalties + spike_headroom) {
+      evac_slack_spiking = anticipated_available - (avg_cycle_time * rate + penalties + spike_headroom);
+    } else {
+      // we have no slack because it's already time to trigger
+      evac_slack_spiking = 0;
+    }
+  } else {
+    evac_slack_spiking = evac_slack_avg;
+  }
+
+  size_t threshold = min_free_threshold();
+  size_t evac_min_threshold = (anticipated_available > threshold)? anticipated_available - threshold: 0;
+  return MIN3(evac_slack_spiking, evac_slack_avg, evac_min_threshold);
+}
+
 bool ShenandoahAdaptiveHeuristics::should_start_gc() {
   size_t capacity = _generation->soft_max_capacity();
   size_t available = _generation->soft_available();
@@ -347,137 +429,144 @@ bool ShenandoahAdaptiveHeuristics::should_start_gc() {
   double rate = _allocation_rate.sample(allocated);
   _last_trigger = OTHER;
 
-  size_t min_threshold = min_free_threshold();
-
-  if (available < min_threshold) {
-    log_info(gc)("Trigger (%s): Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)",
-                 _generation->name(),
-                 byte_size_in_proper_unit(available),     proper_unit_for_byte_size(available),
-                 byte_size_in_proper_unit(min_threshold), proper_unit_for_byte_size(min_threshold));
-    return resize_and_evaluate();
-  }
-
-  // Check if we need to learn a bit about the application
-  const size_t max_learn = ShenandoahLearningSteps;
-  if (_gc_times_learned < max_learn) {
-    size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
-    if (available < init_threshold) {
-      log_info(gc)("Trigger (%s): Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)",
-                   _generation->name(), _gc_times_learned + 1, max_learn,
-                   byte_size_in_proper_unit(available),       proper_unit_for_byte_size(available),
-                   byte_size_in_proper_unit(init_threshold),  proper_unit_for_byte_size(init_threshold));
+  // OLD generation is maintained to be as small as possible.  Depletion-of-free-pool triggers do not apply to old generation.
+  if (!_generation->is_old()) {
+    size_t min_threshold = min_free_threshold();
+    if (available < min_threshold) {
+      log_info(gc)("Trigger (%s): Free (" SIZE_FORMAT "%s) is below minimum threshold (" SIZE_FORMAT "%s)",
+                   _generation->name(),
+                   byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+                   byte_size_in_proper_unit(min_threshold),       proper_unit_for_byte_size(min_threshold));
       return true;
     }
-  }
 
-  //  Rationale:
-  //    The idea is that there is an average allocation rate and there are occasional abnormal bursts (or spikes) of
-  //    allocations that exceed the average allocation rate.  What do these spikes look like?
-  //
-  //    1. At certain phase changes, we may discard large amounts of data and replace it with large numbers of newly
-  //       allocated objects.  This "spike" looks more like a phase change.  We were in steady state at M bytes/sec
-  //       allocation rate and now we're in a "reinitialization phase" that looks like N bytes/sec.  We need the "spike"
-  //       accommodation to give us enough runway to recalibrate our "average allocation rate".
-  //
-  //   2. The typical workload changes.  "Suddenly", our typical workload of N TPS increases to N+delta TPS.  This means
-  //       our average allocation rate needs to be adjusted.  Once again, we need the "spike" accomodation to give us
-  //       enough runway to recalibrate our "average allocation rate".
-  //
-  //    3. Though there is an "average" allocation rate, a given workload's demand for allocation may be very bursty.  We
-  //       allocate a bunch of LABs during the 5 ms that follow completion of a GC, then we perform no more allocations for
-  //       the next 150 ms.  It seems we want the "spike" to represent the maximum divergence from average within the
-  //       period of time between consecutive evaluation of the should_start_gc() service.  Here's the thinking:
-  //
-  //       a) Between now and the next time I ask whether should_start_gc(), we might experience a spike representing
-  //          the anticipated burst of allocations.  If that would put us over budget, then we should start GC immediately.
-  //       b) Between now and the anticipated depletion of allocation pool, there may be two or more bursts of allocations.
-  //          If there are more than one of these bursts, we can "approximate" that these will be separated by spans of
-  //          time with very little or no allocations so the "average" allocation rate should be a suitable approximation
-  //          of how this will behave.
-  //
-  //    For cases 1 and 2, we need to "quickly" recalibrate the average allocation rate whenever we detect a change
-  //    in operation mode.  We want some way to decide that the average rate has changed.  Make average allocation rate
-  //    computations an independent effort.
-
-
-  // TODO: Account for inherent delays in responding to GC triggers
-  //  1. It has been observed that delays of 200 ms or greater are common between the moment we return true from should_start_gc()
-  //     and the moment at which we begin execution of the concurrent reset phase.  Add this time into the calculation of
-  //     avg_cycle_time below.  (What is "this time"?  Perhaps we should remember recent history of this delay for the
-  //     running workload and use the maximum delay recently seen for "this time".)
-  //  2. The frequency of inquiries to should_start_gc() is adaptive, ranging between ShenandoahControlIntervalMin and
-  //     ShenandoahControlIntervalMax.  The current control interval (or the max control interval) should also be added into
-  //     the calculation of avg_cycle_time below.
-
-  // Check if allocation headroom is still okay. This also factors in:
-  //   1. Some space to absorb allocation spikes (ShenandoahAllocSpikeFactor)
-  //   2. Accumulated penalties from Degenerated and Full GC
-  size_t allocation_headroom = available;
-  size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor;
-  size_t penalties      = capacity / 100 * _gc_time_penalties;
-
-  allocation_headroom -= MIN2(allocation_headroom, penalties);
-  allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
+    // Check if we need to learn a bit about the application
+    const size_t max_learn = ShenandoahLearningSteps;
+    if (_gc_times_learned < max_learn) {
+      size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
+      if (available < init_threshold) {
+        log_info(gc)("Trigger (%s): Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free ("
+                     SIZE_FORMAT "%s) is below initial threshold (" SIZE_FORMAT "%s)",
+                     _generation->name(), _gc_times_learned + 1, max_learn,
+                     byte_size_in_proper_unit(available), proper_unit_for_byte_size(available),
+                     byte_size_in_proper_unit(init_threshold),      proper_unit_for_byte_size(init_threshold));
+        return true;
+      }
+    }
 
-  double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd());
+    //  Rationale:
+    //    The idea is that there is an average allocation rate and there are occasional abnormal bursts (or spikes) of
+    //    allocations that exceed the average allocation rate.  What do these spikes look like?
+    //
+    //    1. At certain phase changes, we may discard large amounts of data and replace it with large numbers of newly
+    //       allocated objects.  This "spike" looks more like a phase change.  We were in steady state at M bytes/sec
+    //       allocation rate and now we're in a "reinitialization phase" that looks like N bytes/sec.  We need the "spike"
+    //       accomodation to give us enough runway to recalibrate our "average allocation rate".
+    //
+    //   2. The typical workload changes.  "Suddenly", our typical workload of N TPS increases to N+delta TPS.  This means
+    //       our average allocation rate needs to be adjusted.  Once again, we need the "spike" accomodation to give us
+    //       enough runway to recalibrate our "average allocation rate".
+    //
+    //    3. Though there is an "average" allocation rate, a given workload's demand for allocation may be very bursty.  We
+    //       allocate a bunch of LABs during the 5 ms that follow completion of a GC, then we perform no more allocations for
+    //       the next 150 ms.  It seems we want the "spike" to represent the maximum divergence from average within the
+    //       period of time between consecutive evaluation of the should_start_gc() service.  Here's the thinking:
+    //
+    //       a) Between now and the next time I ask whether should_start_gc(), we might experience a spike representing
+    //          the anticipated burst of allocations.  If that would put us over budget, then we should start GC immediately.
+    //       b) Between now and the anticipated depletion of allocation pool, there may be two or more bursts of allocations.
+    //          If there are more than one of these bursts, we can "approximate" that these will be separated by spans of
+    //          time with very little or no allocations so the "average" allocation rate should be a suitable approximation
+    //          of how this will behave.
+    //
+    //    For cases 1 and 2, we need to "quickly" recalibrate the average allocation rate whenever we detect a change
+    //    in operation mode.  We want some way to decide that the average rate has changed.  Make average allocation rate
+    //    computations an independent effort.
 
-  double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd);
-  log_debug(gc)("%s: average GC time: %.2f ms, allocation rate: %.0f %s/s",
-          _generation->name(), avg_cycle_time * 1000,
-          byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate));
-
-  if (avg_cycle_time > allocation_headroom / avg_alloc_rate) {
-    log_info(gc)("Trigger (%s): Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)",
-                 _generation->name(), avg_cycle_time * 1000,
-                 byte_size_in_proper_unit(avg_alloc_rate),      proper_unit_for_byte_size(avg_alloc_rate),
-                 byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom),
-                 _margin_of_error_sd);
-
-    log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - " SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s",
-                       byte_size_in_proper_unit(available),           proper_unit_for_byte_size(available),
-                       byte_size_in_proper_unit(spike_headroom),      proper_unit_for_byte_size(spike_headroom),
-                       byte_size_in_proper_unit(penalties),           proper_unit_for_byte_size(penalties),
-                       byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom));
-
-    _last_trigger = RATE;
-    return resize_and_evaluate();
-  }
 
-  bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd);
-  if (is_spiking && avg_cycle_time > allocation_headroom / rate) {
-    log_info(gc)("Trigger (%s): Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s) to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)",
-                 _generation->name(), avg_cycle_time * 1000,
-                 byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate),
-                 byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom),
-                 _spike_threshold_sd);
-    _last_trigger = SPIKE;
-    return resize_and_evaluate();
-  }
+    // Check if allocation headroom is still okay. This also factors in:
+    //   1. Some space to absorb allocation spikes (ShenandoahAllocSpikeFactor)
+    //   2. Accumulated penalties from Degenerated and Full GC
 
-  return ShenandoahHeuristics::should_start_gc();
-}
+    size_t allocation_headroom = available;
+    size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor;
+    size_t penalties      = capacity / 100 * _gc_time_penalties;
 
-bool ShenandoahAdaptiveHeuristics::resize_and_evaluate() {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  if (!heap->mode()->is_generational()) {
-    // We only attempt to resize the generations in generational mode.
-    return true;
-  }
+    allocation_headroom -= MIN2(allocation_headroom, penalties);
+    allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
 
-  if (_cycles_since_last_resize <= MINIMUM_RESIZE_INTERVAL) {
-    log_info(gc, ergo)("Not resizing %s for another " UINT32_FORMAT " cycles",
-            _generation->name(), _cycles_since_last_resize);
-    return true;
-  }
+    double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd());
+    double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd);
+    log_debug(gc)("%s: average GC time: %.2f ms, allocation rate: %.0f %s/s",
+                  _generation->name(),
+                  avg_cycle_time * 1000, byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate));
 
-  if (!heap->generation_sizer()->transfer_capacity(_generation)) {
-    // We could not enlarge our generation, so we must start a gc cycle.
-    log_info(gc, ergo)("Could not increase size of %s, begin gc cycle", _generation->name());
-    return true;
-  }
+    if (avg_cycle_time > allocation_headroom / avg_alloc_rate) {
+
+      log_info(gc)("Trigger (%s): Average GC time (%.2f ms) is above the time for average allocation rate (%.0f %sB/s)"
+                   " to deplete free headroom (" SIZE_FORMAT "%s) (margin of error = %.2f)",
+                   _generation->name(), avg_cycle_time * 1000,
+                   byte_size_in_proper_unit(avg_alloc_rate), proper_unit_for_byte_size(avg_alloc_rate),
+                   byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom),
+                   _margin_of_error_sd);
+
+      log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "%s (free) - " SIZE_FORMAT "%s (spike) - "
+                         SIZE_FORMAT "%s (penalties) = " SIZE_FORMAT "%s",
+                         byte_size_in_proper_unit(available),           proper_unit_for_byte_size(available),
+                         byte_size_in_proper_unit(spike_headroom),      proper_unit_for_byte_size(spike_headroom),
+                         byte_size_in_proper_unit(penalties),           proper_unit_for_byte_size(penalties),
+                         byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom));
 
-  log_info(gc)("Increased size of %s generation, re-evaluate trigger criteria", _generation->name());
-  return should_start_gc();
+      _last_trigger = RATE;
+      return true;
+    }
+
+    bool is_spiking = _allocation_rate.is_spiking(rate, _spike_threshold_sd);
+    if (is_spiking && avg_cycle_time > allocation_headroom / rate) {
+      log_info(gc)("Trigger (%s): Average GC time (%.2f ms) is above the time for instantaneous allocation rate (%.0f %sB/s)"
+                   " to deplete free headroom (" SIZE_FORMAT "%s) (spike threshold = %.2f)",
+                   _generation->name(), avg_cycle_time * 1000,
+                   byte_size_in_proper_unit(rate), proper_unit_for_byte_size(rate),
+                   byte_size_in_proper_unit(allocation_headroom), proper_unit_for_byte_size(allocation_headroom),
+                   _spike_threshold_sd);
+      _last_trigger = SPIKE;
+      return true;
+    }
+
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    if (heap->mode()->is_generational()) {
+      // Get through promotions and mixed evacuations as quickly as possible.  These cycles sometimes require significantly
+      // more time than traditional young-generation cycles so start them up as soon as possible.  This is a "mitigation"
+      // for the reality that old-gen and young-gen activities are not truly "concurrent".  If there is old-gen work to
+      // be done, we start up the young-gen GC threads so they can do some of this old-gen work.  As implemented, promotion
+      // gets priority over old-gen marking.
+
+      size_t promo_potential = heap->get_promotion_potential();
+      size_t promo_in_place_potential = heap->get_promotion_in_place_potential();
+      ShenandoahOldHeuristics* old_heuristics = (ShenandoahOldHeuristics*) heap->old_generation()->heuristics();
+      size_t mixed_candidates = old_heuristics->unprocessed_old_collection_candidates();
+      if (promo_potential > 0) {
+        // Detect unsigned arithmetic underflow
+        assert(promo_potential < heap->capacity(), "Sanity");
+        log_info(gc)("Trigger (%s): expedite promotion of " SIZE_FORMAT "%s",
+                     _generation->name(), byte_size_in_proper_unit(promo_potential), proper_unit_for_byte_size(promo_potential));
+        return true;
+      } else if (promo_in_place_potential > 0) {
+        // Detect unsigned arithmetic underflow
+        assert(promo_in_place_potential < heap->capacity(), "Sanity");
+        log_info(gc)("Trigger (%s): expedite promotion in place of " SIZE_FORMAT "%s", _generation->name(),
+                     byte_size_in_proper_unit(promo_in_place_potential),
+                     proper_unit_for_byte_size(promo_in_place_potential));
+        return true;
+      } else if (mixed_candidates > 0) {
+        // We need to run young GC in order to open up some free heap regions so we can finish mixed evacuations.
+        log_info(gc)("Trigger (%s): expedite mixed evacuation of " SIZE_FORMAT " regions",
+                     _generation->name(), mixed_candidates);
+        return true;
+      }
+    }
+  }
+  return ShenandoahHeuristics::should_start_gc();
 }
 
 void ShenandoahAdaptiveHeuristics::adjust_last_trigger_parameters(double amount) {
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
index 85b563f146d..494173751ea 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp
@@ -39,7 +39,6 @@ class ShenandoahAllocationRate : public CHeapObj<mtGC> {
 
   double upper_bound(double sds) const;
   bool is_spiking(double rate, double threshold) const;
-
  private:
 
   double instantaneous_rate(double time, size_t allocated) const;
@@ -72,6 +71,8 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
   virtual bool is_diagnostic()   { return false; }
   virtual bool is_experimental() { return false; }
 
+  virtual size_t bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_recycled);
+
  private:
   // These are used to adjust the margin of error and the spike threshold
   // in response to GC cycle outcomes. These values are shared, but the
@@ -85,13 +86,6 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
   const static double LOWEST_EXPECTED_AVAILABLE_AT_END;
   const static double HIGHEST_EXPECTED_AVAILABLE_AT_END;
 
-  // At least this many cycles must execute before the heuristic will attempt
-  // to resize its generation. This is to prevent the heuristic from rapidly
-  // maxing out the generation size (which only forces the collector for the
-  // other generation to run more frequently, defeating the purpose of improving
-  // MMU).
-  const static uint MINIMUM_RESIZE_INTERVAL;
-
   friend class ShenandoahAllocationRate;
 
   // Used to record the last trigger that signaled to start a GC.
@@ -106,8 +100,6 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
   void adjust_margin_of_error(double amount);
   void adjust_spike_threshold(double amount);
 
-  bool resize_and_evaluate();
-
   ShenandoahAllocationRate _allocation_rate;
 
   // The margin of error expressed in standard deviations to add to our
@@ -135,10 +127,6 @@ class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
   // establishes what is 'normal' for the application and is used as a
   // source of feedback to adjust trigger parameters.
   TruncatedSeq _available;
-
-  // Do not attempt to resize the generation for this heuristic until this
-  // value is greater than MINIMUM_RESIZE_INTERVAL.
-  uint _cycles_since_last_resize;
 };
 
 #endif // SHARE_GC_SHENANDOAH_HEURISTICS_SHENANDOAHADAPTIVEHEURISTICS_HPP
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
index a84c23b678c..250d1317306 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahCompactHeuristics.cpp
@@ -49,7 +49,8 @@ ShenandoahCompactHeuristics::ShenandoahCompactHeuristics(ShenandoahGeneration* g
 bool ShenandoahCompactHeuristics::should_start_gc() {
   size_t max_capacity = _generation->max_capacity();
   size_t capacity     = _generation->soft_max_capacity();
-  size_t available    = _generation->available();
+  size_t usage        = _generation->used();
+  size_t available    = (capacity > usage)? capacity - usage: 0;
 
   // Make sure the code below treats available without the soft tail.
   size_t soft_tail = max_capacity - capacity;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
index ec0c365b873..93a4f414325 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.cpp
@@ -39,11 +39,22 @@
 #include "logging/log.hpp"
 #include "logging/logTag.hpp"
 #include "runtime/globals_extension.hpp"
+#include "utilities/quickSort.hpp"
 
+// sort by decreasing garbage (so most garbage comes first)
 int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
-  if (a._garbage > b._garbage)
+  if (a._u._garbage > b._u._garbage)
     return -1;
-  else if (a._garbage < b._garbage)
+  else if (a._u._garbage < b._u._garbage)
+    return 1;
+  else return 0;
+}
+
+// sort by increasing live (so least live comes first)
+int ShenandoahHeuristics::compare_by_live(RegionData a, RegionData b) {
+  if (a._u._live_data < b._u._live_data)
+    return -1;
+  else if (a._u._live_data > b._u._live_data)
     return 1;
   else return 0;
 }
@@ -76,29 +87,170 @@ ShenandoahHeuristics::~ShenandoahHeuristics() {
   FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
 }
 
-size_t ShenandoahHeuristics::select_aged_regions(size_t old_available, size_t num_regions, bool* preselected_regions) {
+typedef struct {
+  ShenandoahHeapRegion* _region;
+  size_t _live_data;
+} AgedRegionData;
+
+static int compare_by_aged_live(AgedRegionData a, AgedRegionData b) {
+  if (a._live_data < b._live_data)
+    return -1;
+  else if (a._live_data > b._live_data)
+    return 1;
+  else return 0;
+}
+
+// Preselect for inclusion into the collection set regions whose age is at or above tenure age which contain more than
+// ShenandoahOldGarbageThreshold amounts of garbage.  We identify these regions by setting the appropriate entry of
+// candidate_regions_for_promotion_by_copy[] to true.  All entries are initialized to false before calling this
+// function.
+//
+// During the subsequent selection of the collection set, we give priority to these promotion set candidates.
+// Without this prioritization, we found that the aged regions tend to be ignored because they typically have
+// much less garbage and much more live data than the recently allocated "eden" regions.  When aged regions are
+// repeatedly excluded from the collection set, the amount of live memory within the young generation tends to
+// accumulate and this has the undesirable side effect of causing young-generation collections to require much more
+// CPU and wall-clock time.
+//
+// A second benefit of treating aged regions differently than other regions during collection set selection is
+// that this allows us to more accurately budget memory to hold the results of evacuation.  Memory for evacuation
+// of aged regions must be reserved in the old generations.  Memory for evacuation of all other regions must be
+// reserved in the young generation.
+//
+// A side effect performed by this function is to tally up the number of regions and the number of live bytes
+// that we plan to promote-in-place during the current GC cycle.  This information, which is stored with
+// an invocation of heap->set_promotion_in_place_potential(), feeds into subsequent decisions about when to
+// trigger the next GC and may identify special work to be done during this GC cycle if we choose to abbreviate it.
+//
+// Returns bytes of old-gen memory consumed by selected aged regions
+size_t ShenandoahHeuristics::select_aged_regions(size_t old_available, size_t num_regions,
+                                                 bool candidate_regions_for_promotion_by_copy[]) {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   assert(heap->mode()->is_generational(), "Only in generational mode");
-
+  ShenandoahMarkingContext* const ctx = heap->marking_context();
   size_t old_consumed = 0;
+  size_t promo_potential = 0;
+  size_t anticipated_promote_in_place_live = 0;
+
+  heap->clear_promotion_in_place_potential();
+  heap->clear_promotion_potential();
+  size_t candidates = 0;
+  size_t candidates_live = 0;
+  size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
+  size_t promote_in_place_regions = 0;
+  size_t promote_in_place_live = 0;
+  size_t promote_in_place_pad = 0;
+  size_t anticipated_candidates = 0;
+  size_t anticipated_promote_in_place_regions = 0;
+
+  // Sort the promotion-eligible regions according to live-data-bytes so that we can first reclaim regions that require
+  // less evacuation effort.  This prioritizes garbage first, expanding the allocation pool before we begin the work of
+  // reclaiming regions that require more effort.
+  AgedRegionData* sorted_regions = (AgedRegionData*) alloca(num_regions * sizeof(AgedRegionData));
   for (size_t i = 0; i < num_regions; i++) {
-    ShenandoahHeapRegion* region = heap->get_region(i);
-    if (in_generation(region) && !region->is_empty() && region->is_regular() && (region->age() >= InitialTenuringThreshold)) {
-      size_t promotion_need = (size_t) (region->get_live_data_bytes() * ShenandoahEvacWaste);
-      if (old_consumed + promotion_need < old_available) {
+    ShenandoahHeapRegion* r = heap->get_region(i);
+    if (r->is_empty() || !r->has_live() || !r->is_young() || !r->is_regular()) {
+      continue;
+    }
+    if (r->age() >= InitialTenuringThreshold) {
+      r->save_top_before_promote();
+      if ((r->garbage() < old_garbage_threshold)) {
+        HeapWord* tams = ctx->top_at_mark_start(r);
+        HeapWord* original_top = r->top();
+        if (tams == original_top) {
+          // Fill the remnant memory within this region to assure no allocations prior to promote in place.  Otherwise,
+          // newly allocated objects will not be parseable when promote in place tries to register them.  Furthermore, any
+          // new allocations would not necessarily be eligible for promotion.  This addresses both issues.
+          size_t remnant_size = r->free() / HeapWordSize;
+          if (remnant_size > ShenandoahHeap::min_fill_size()) {
+            ShenandoahHeap::fill_with_object(original_top, remnant_size);
+            r->set_top(r->end());
+            promote_in_place_pad += remnant_size * HeapWordSize;
+          } else {
+            // Since the remnant is so small that it cannot be filled, we don't have to worry about any accidental
+            // allocations occuring within this region before the region is promoted in place.
+          }
+          promote_in_place_regions++;
+          promote_in_place_live += r->get_live_data_bytes();
+        }
+        // Else, we do not promote this region (either in place or by copy) because it has received new allocations.
+
+        // During evacuation, we exclude from promotion regions for which age > tenure threshold, garbage < garbage-threshold,
+        //  and get_top_before_promote() != tams
+      } else {
+        // After sorting and selecting best candidates below, we may decide to exclude this promotion-eligible region
+        // from the current collection sets.  If this happens, we will consider this region as part of the anticipated
+        // promotion potential for the next GC pass.
+        size_t live_data = r->get_live_data_bytes();
+        candidates_live += live_data;
+        sorted_regions[candidates]._region = r;
+        sorted_regions[candidates++]._live_data = live_data;
+      }
+    } else {
+      // We only anticipate to promote regular regions if garbage() is above threshold.  Tenure-aged regions with less
+      // garbage are promoted in place.  These take a different path to old-gen.  Note that certain regions that are
+      // excluded from anticipated promotion because their garbage content is too low (causing us to anticipate that
+      // the region would be promoted in place) may be eligible for evacuation promotion by the time promotion takes
+      // place during a subsequent GC pass because more garbage is found within the region between now and then.  This
+      // should not happen if we are properly adapting the tenure age.  The theory behind adaptive tenuring threshold
+      // is to choose the youngest age that demonstrates no "significant" futher loss of population since the previous
+      // age.  If not this, we expect the tenure age to demonstrate linear population decay for at least two population
+      // samples, whereas we expect to observe exponetial population decay for ages younger than the tenure age.
+      //
+      // In the case that certain regions which were anticipated to be promoted in place need to be promoted by
+      // evacuation, it may be the case that there is not sufficient reserve within old-gen to hold evacuation of
+      // these regions.  The likely outcome is that these regions will not be selected for evacuation or promotion
+      // in the current cycle and we will anticipate that they will be promoted in the next cycle.  This will cause
+      // us to reserve more old-gen memory so that these objects can be promoted in the subsequent cycle.
+      //
+      // TODO:
+      //   If we are auto-tuning the tenure age and regions that were anticipated to be promoted in place end up
+      //   being promoted by evacuation, this event should feed into the tenure-age-selection heuristic so that
+      //   the tenure age can be increased.
+      if (heap->is_aging_cycle() && (r->age() + 1 == InitialTenuringThreshold)) {
+        if (r->garbage() >= old_garbage_threshold) {
+          anticipated_candidates++;
+          promo_potential += r->get_live_data_bytes();
+        }
+        else {
+          anticipated_promote_in_place_regions++;
+          anticipated_promote_in_place_live += r->get_live_data_bytes();
+        }
+      }
+    }
+    // Note that we keep going even if one region is excluded from selection.
+    // Subsequent regions may be selected if they have smaller live data.
+  }
+  // Sort in increasing order according to live data bytes.  Note that candidates represents the number of regions
+  // that qualify to be promoted by evacuation.
+  if (candidates > 0) {
+    QuickSort::sort<AgedRegionData>(sorted_regions, candidates, compare_by_aged_live, false);
+    for (size_t i = 0; i < candidates; i++) {
+      size_t region_live_data = sorted_regions[i]._live_data;
+      size_t promotion_need = (size_t) (region_live_data * ShenandoahPromoEvacWaste);
+      if (old_consumed + promotion_need <= old_available) {
+        ShenandoahHeapRegion* region = sorted_regions[i]._region;
         old_consumed += promotion_need;
-        preselected_regions[i] = true;
+        candidate_regions_for_promotion_by_copy[region->index()] = true;
+      } else {
+        // We rejected this promotable region from the collection set because we had no room to hold its copy.
+        // Add this region to promo potential for next GC.
+        promo_potential += region_live_data;
       }
-      // Note that we keep going even if one region is excluded from selection.
-      // Subsequent regions may be selected if they have smaller live data.
+      // We keep going even if one region is excluded from selection because we need to accumulate all eligible
+      // regions that are not preselected into promo_potential
     }
   }
+  heap->set_pad_for_promote_in_place(promote_in_place_pad);
+  heap->set_promotion_potential(promo_potential);
+  heap->set_promotion_in_place_potential(anticipated_promote_in_place_live);
   return old_consumed;
 }
 
 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, ShenandoahOldHeuristics* old_heuristics) {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   bool is_generational = heap->mode()->is_generational();
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 
   assert(collection_set->count() == 0, "Must be empty");
   assert(!is_generational || !_generation->is_old(), "Old GC invokes ShenandoahOldHeuristics::choose_collection_set()");
@@ -113,6 +265,7 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
   RegionData* candidates = _region_data;
 
   size_t cand_idx = 0;
+  size_t preselected_candidates = 0;
 
   size_t total_garbage = 0;
 
@@ -122,12 +275,21 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
   size_t free = 0;
   size_t free_regions = 0;
 
+  size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100;
+  // This counts number of humongous regions that we intend to promote in this cycle.
+  size_t humongous_regions_promoted = 0;
+  // This counts bytes of memory used by hunongous regions to be promoted in place.
+  size_t humongous_bytes_promoted = 0;
+  // This counts number of regular regions that will be promoted in place.
+  size_t regular_regions_promoted_in_place = 0;
+  // This counts bytes of memory used by regular regions to be promoted in place.
+  size_t regular_regions_promoted_usage = 0;
+
   for (size_t i = 0; i < num_regions; i++) {
     ShenandoahHeapRegion* region = heap->get_region(i);
     if (is_generational && !in_generation(region)) {
       continue;
     }
-
     size_t garbage = region->garbage();
     total_garbage += garbage;
     if (region->is_empty()) {
@@ -141,17 +303,37 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
         region->make_trash_immediate();
       } else {
         assert(!_generation->is_old(), "OLD is handled elsewhere");
+        bool is_candidate;
         // This is our candidate for later consideration.
-        candidates[cand_idx]._region = region;
         if (is_generational && collection_set->is_preselected(i)) {
-          // If region is preselected, we know mode()->is_generational() and region->age() >= InitialTenuringThreshold)
-          garbage = ShenandoahHeapRegion::region_size_bytes();
+          // If !is_generational, we cannot ask if is_preselected.  If is_preselected, we know
+          //   region->age() >= InitialTenuringThreshold).
+          is_candidate = true;
+          preselected_candidates++;
+          // Set garbage value to maximum value to force this into the sorted collection set.
+          garbage = region_size_bytes;
+        } else if (is_generational && region->is_young() && (region->age() >= InitialTenuringThreshold)) {
+          // Note that for GLOBAL GC, region may be OLD, and OLD regions do not qualify for pre-selection
+
+          // This region is old enough to be promoted but it was not preselected, either because its garbage is below
+          // ShenandoahOldGarbageThreshold so it will be promoted in place, or because there is not sufficient room
+          // in old gen to hold the evacuated copies of this region's live data.  In both cases, we choose not to
+          // place this region into the collection set.
+          if (region->garbage_before_padded_for_promote() < old_garbage_threshold) {
+            regular_regions_promoted_in_place++;
+            regular_regions_promoted_usage += region->used_before_promote();
+          }
+          is_candidate = false;
+        } else {
+          is_candidate = true;
+        }
+        if (is_candidate) {
+          candidates[cand_idx]._region = region;
+          candidates[cand_idx]._u._garbage = garbage;
+          cand_idx++;
         }
-        candidates[cand_idx]._garbage = garbage;
-        cand_idx++;
       }
     } else if (region->is_humongous_start()) {
-
       // Reclaim humongous regions here, and count them as the immediate garbage
 #ifdef ASSERT
       bool reg_live = region->has_live();
@@ -166,6 +348,13 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
         // Count only the start. Continuations would be counted on "trash" path
         immediate_regions++;
         immediate_garbage += garbage;
+      } else {
+        if (region->is_young() && region->age() >= InitialTenuringThreshold) {
+          oop obj = cast_to_oop(region->bottom());
+          size_t humongous_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
+          humongous_regions_promoted += humongous_regions;
+          humongous_bytes_promoted += obj->size() * HeapWordSize;
+        }
       }
     } else if (region->is_trash()) {
       // Count in just trashed collection set, during coalesced CM-with-UR
@@ -173,6 +362,14 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
       immediate_garbage += garbage;
     }
   }
+  heap->reserve_promotable_humongous_regions(humongous_regions_promoted);
+  heap->reserve_promotable_humongous_usage(humongous_bytes_promoted);
+  heap->reserve_promotable_regular_regions(regular_regions_promoted_in_place);
+  heap->reserve_promotable_regular_usage(regular_regions_promoted_usage);
+  log_info(gc, ergo)("Planning to promote in place " SIZE_FORMAT " humongous regions and " SIZE_FORMAT
+                     " regular regions, spanning a total of " SIZE_FORMAT " used bytes",
+                     humongous_regions_promoted, regular_regions_promoted_in_place,
+                     humongous_regions_promoted * ShenandoahHeapRegion::region_size_bytes() + regular_regions_promoted_usage);
 
   // Step 2. Look back at garbage statistics, and decide if we want to collect anything,
   // given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
@@ -185,7 +382,9 @@ void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collec
   size_t immediate_percent = (total_garbage == 0) ? 0 : (immediate_garbage * 100 / total_garbage);
   collection_set->set_immediate_trash(immediate_garbage);
 
-  if (immediate_percent <= ShenandoahImmediateThreshold) {
+  ShenandoahGeneration* young_gen = heap->young_generation();
+  bool doing_promote_in_place = (humongous_regions_promoted + regular_regions_promoted_in_place > 0);
+  if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
     if (old_heuristics != nullptr) {
       old_heuristics->prime_collection_set(collection_set);
     } else {
@@ -359,6 +558,12 @@ void ShenandoahHeuristics::initialize() {
   // Nothing to do by default.
 }
 
+size_t ShenandoahHeuristics::bytes_of_allocation_runway_before_gc_trigger(size_t young_regions_to_be_recycled) {
+  assert(false, "Only implemented for young Adaptive Heuristics");
+  return 0;
+}
+
+
 double ShenandoahHeuristics::elapsed_cycle_time() const {
   return os::elapsedTime() - _cycle_start;
 }
@@ -370,6 +575,10 @@ bool ShenandoahHeuristics::in_generation(ShenandoahHeapRegion* region) {
 }
 
 size_t ShenandoahHeuristics::min_free_threshold() {
-  size_t min_free_threshold = _generation->is_old() ? ShenandoahOldMinFreeThreshold : ShenandoahMinFreeThreshold;
-  return _generation->soft_max_capacity() / 100 * min_free_threshold;
+  assert(!_generation->is_old(), "min_free_threshold is only relevant to young GC");
+  size_t min_free_threshold = ShenandoahMinFreeThreshold;
+  // Note that soft_max_capacity() / 100 * min_free_threshold is smaller than max_capacity() / 100 * min_free_threshold.
+  // We want to behave conservatively here, so use max_capacity().  By returning a larger value, we cause the GC to
+  // trigger when the remaining amount of free shrinks below the larger threshold.
+  return _generation->max_capacity() / 100 * min_free_threshold;
 }
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
index aba842c2455..6b33025dfa8 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
@@ -70,7 +70,10 @@ class ShenandoahHeuristics : public CHeapObj<mtGC> {
 
   typedef struct {
     ShenandoahHeapRegion* _region;
-    size_t _garbage;
+    union {
+      size_t _garbage;          // Not used by old-gen heuristics.
+      size_t _live_data;        // Only used for old-gen heuristics, which prioritizes retention of _live_data over garbage reclaim
+    } _u;
   } RegionData;
 
   ShenandoahGeneration* _generation;
@@ -106,6 +109,14 @@ class ShenandoahHeuristics : public CHeapObj<mtGC> {
 
   static int compare_by_garbage(RegionData a, RegionData b);
 
+  // Compare by live is used to prioritize compaction of old-gen regions.  With old-gen compaction, the goal is
+  // to tightly pack long-lived objects into available regions.  In most cases, there has not been an accumulation
+  // of garbage within old-gen regions.  The more likely opportunity will be to combine multiple sparsely populated
+  // old-gen regions which may have been promoted in place into a smaller number of densely packed old-gen regions.
+  // This improves subsequent allocation efficiency and reduces the likelihood of allocation failure (including
+  // humongous allocation failure) due to fragmentation of the available old-gen allocation pool
+  static int compare_by_live(RegionData a, RegionData b);
+
   // TODO: We need to enhance this API to give visibility to accompanying old-gen evacuation effort.
   // In the case that the old-gen evacuation effort is small or zero, the young-gen heuristics
   // should feel free to dedicate increased efforts to young-gen evacuation.
@@ -156,7 +167,7 @@ class ShenandoahHeuristics : public CHeapObj<mtGC> {
 
   virtual void reset_gc_learning();
 
-  virtual size_t select_aged_regions(size_t old_available, size_t num_regions, bool* preselected_regions);
+  virtual size_t select_aged_regions(size_t old_available, size_t num_regions, bool candidate_regions_for_promotion_by_copy[]);
 
   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, ShenandoahOldHeuristics* old_heuristics);
 
@@ -169,6 +180,8 @@ class ShenandoahHeuristics : public CHeapObj<mtGC> {
   virtual bool is_experimental() = 0;
   virtual void initialize();
 
+  virtual size_t bytes_of_allocation_runway_before_gc_trigger(size_t region_to_be_recycled);
+
   double elapsed_cycle_time() const;
 };
 
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
index ad8535ebe33..5722103d73a 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
@@ -43,20 +43,23 @@ ShenandoahOldHeuristics::ShenandoahOldHeuristics(ShenandoahOldGeneration* genera
   _next_old_collection_candidate(0),
   _last_old_region(0),
   _trigger_heuristic(trigger_heuristic),
+  _old_generation(generation),
   _promotion_failed(false),
-  _old_generation(generation)
+  _cannot_expand_trigger(false),
+  _fragmentation_trigger(false),
+  _growth_trigger(false)
 {
   assert(_generation->is_old(), "This service only available for old-gc heuristics");
 }
 
 bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* collection_set) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
   if (unprocessed_old_collection_candidates() == 0) {
     return false;
   }
 
   _first_pinned_candidate = NOT_FOUND;
 
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
   uint included_old_regions = 0;
   size_t evacuated_old_bytes = 0;
   size_t collected_old_bytes = 0;
@@ -66,15 +69,36 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
   // of memory that can still be evacuated.  We address this by reducing the evacuation budget by the amount
   // of live memory in that region and by the amount of unallocated memory in that region if the evacuation
   // budget is constrained by availability of free memory.
-  size_t old_evacuation_budget = (size_t) ((double) heap->get_old_evac_reserve() / ShenandoahEvacWaste);
+  size_t old_evacuation_budget = (size_t) ((double) heap->get_old_evac_reserve() / ShenandoahOldEvacWaste);
+  size_t unfragmented_available = _old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
+  size_t fragmented_available;
+  size_t excess_fragmented_available;
+
+  if (unfragmented_available > old_evacuation_budget) {
+    unfragmented_available = old_evacuation_budget;
+    fragmented_available = 0;
+    excess_fragmented_available = 0;
+  } else {
+    assert(_old_generation->available() > old_evacuation_budget, "Cannot budget more than is available");
+    fragmented_available = _old_generation->available() - unfragmented_available;
+    assert(fragmented_available + unfragmented_available >= old_evacuation_budget, "Budgets do not add up");
+    if (fragmented_available + unfragmented_available > old_evacuation_budget) {
+      excess_fragmented_available = (fragmented_available + unfragmented_available) - old_evacuation_budget;
+      fragmented_available -= excess_fragmented_available;
+    }
+  }
+
   size_t remaining_old_evacuation_budget = old_evacuation_budget;
-  size_t lost_evacuation_capacity = 0;
   log_info(gc)("Choose old regions for mixed collection: old evacuation budget: " SIZE_FORMAT "%s, candidates: %u",
                byte_size_in_proper_unit(old_evacuation_budget), proper_unit_for_byte_size(old_evacuation_budget),
                unprocessed_old_collection_candidates());
 
+  size_t lost_evacuation_capacity = 0;
+
   // The number of old-gen regions that were selected as candidates for collection at the end of the most recent old-gen
-  // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates()
+  // concurrent marking phase and have not yet been collected is represented by unprocessed_old_collection_candidates().
+  // Candidate regions are ordered according to increasing amount of live data.  If there is not sufficient room to
+  // evacuate region N, then there is no need to even consider evacuating region N+1.
   while (unprocessed_old_collection_candidates() > 0) {
     // Old collection candidates are sorted in order of decreasing garbage contained therein.
     ShenandoahHeapRegion* r = next_old_collection_candidate();
@@ -82,31 +106,74 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
       break;
     }
 
-    // If we choose region r to be collected, then we need to decrease the capacity to hold other evacuations by
-    // the size of r's free memory.
-
-    // It's probably overkill to compensate with lost_evacuation_capacity.
-    // But it's the safe thing to do and has minimal impact on content of primed collection set.
-    size_t live = r->get_live_data_bytes();
-    if (live + lost_evacuation_capacity <= remaining_old_evacuation_budget) {
-      // Decrement remaining evacuation budget by bytes that will be copied.
-      lost_evacuation_capacity += r->free();
-      remaining_old_evacuation_budget -= live;
-      collection_set->add_region(r);
-      included_old_regions++;
-      evacuated_old_bytes += live;
-      collected_old_bytes += r->garbage();
-      consume_old_collection_candidate();
+    // If region r is evacuated to fragmented memory (to free memory within a partially used region), then we need
+    // to decrease the capacity of the fragmented memory by the scaled loss.
+
+    size_t live_data_for_evacuation = r->get_live_data_bytes();
+    size_t lost_available = r->free();
+
+    if ((lost_available > 0) && (excess_fragmented_available > 0)) {
+      if (lost_available < excess_fragmented_available) {
+        excess_fragmented_available -= lost_available;
+        lost_evacuation_capacity -= lost_available;
+        lost_available  = 0;
+      } else {
+        lost_available -= excess_fragmented_available;
+        lost_evacuation_capacity -= excess_fragmented_available;
+        excess_fragmented_available = 0;
+      }
+    }
+    size_t scaled_loss = (size_t) ((double) lost_available / ShenandoahOldEvacWaste);
+    if ((lost_available > 0) && (fragmented_available > 0)) {
+      if (scaled_loss + live_data_for_evacuation < fragmented_available) {
+        fragmented_available -= scaled_loss;
+        scaled_loss = 0;
+      } else {
+        // We will have to allocate this region's evacuation memory from unfragmented memory, so don't bother
+        // to decrement scaled_loss
+      }
+    }
+    if (scaled_loss > 0) {
+      // We were not able to account for the lost free memory within fragmented memory, so we need to take this
+      // allocation out of unfragmented memory.  Unfragmented memory does not need to account for loss of free.
+      if (live_data_for_evacuation > unfragmented_available) {
+        // There is not room to evacuate this region or any that come after it in within the candidates array.
+        break;
+      } else {
+        unfragmented_available -= live_data_for_evacuation;
+      }
     } else {
-      break;
+      // Since scaled_loss == 0, we have accounted for the loss of free memory, so we can allocate from either
+      // fragmented or unfragmented available memory.  Use up the fragmented memory budget first.
+      size_t evacuation_need = live_data_for_evacuation;
+
+      if (evacuation_need > fragmented_available) {
+        evacuation_need -= fragmented_available;
+        fragmented_available = 0;
+      } else {
+        fragmented_available -= evacuation_need;
+        evacuation_need = 0;
+      }
+      if (evacuation_need > unfragmented_available) {
+        // There is not room to evacuate this region or any that come after it in within the candidates array.
+        break;
+      } else {
+        unfragmented_available -= evacuation_need;
+        // dead code: evacuation_need == 0;
+      }
     }
+    collection_set->add_region(r);
+    included_old_regions++;
+    evacuated_old_bytes += live_data_for_evacuation;
+    collected_old_bytes += r->garbage();
+    consume_old_collection_candidate();
   }
 
   if (_first_pinned_candidate != NOT_FOUND) {
     // Need to deal with pinned regions
     slide_pinned_regions_to_front();
   }
-
+  decrease_unprocessed_old_collection_candidates_live_memory(evacuated_old_bytes);
   if (included_old_regions > 0) {
     log_info(gc)("Old-gen piggyback evac (" UINT32_FORMAT " regions, evacuating " SIZE_FORMAT "%s, reclaiming: " SIZE_FORMAT "%s)",
                  included_old_regions,
@@ -116,6 +183,8 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
 
   if (unprocessed_old_collection_candidates() == 0) {
     // We have added the last of our collection candidates to a mixed collection.
+    // Any triggers that occurred during mixed evacuations may no longer be valid.  They can retrigger if appropriate.
+    clear_triggers();
     _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
   } else if (included_old_regions == 0) {
     // We have candidates, but none were included for evacuation - are they all pinned?
@@ -202,7 +271,7 @@ void ShenandoahOldHeuristics::slide_pinned_regions_to_front() {
     if (skipped._region->is_pinned()) {
       RegionData& available_slot = _region_data[write_index];
       available_slot._region = skipped._region;
-      available_slot._garbage = skipped._garbage;
+      available_slot._u._live_data = skipped._u._live_data;
       --write_index;
     }
   }
@@ -232,6 +301,7 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
   size_t num_regions = heap->num_regions();
   size_t immediate_garbage = 0;
   size_t immediate_regions = 0;
+  size_t live_data = 0;
 
   RegionData* candidates = _region_data;
   for (size_t i = 0; i < num_regions; i++) {
@@ -241,7 +311,9 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
     }
 
     size_t garbage = region->garbage();
+    size_t live_bytes = region->get_live_data_bytes();
     total_garbage += garbage;
+    live_data += live_bytes;
 
     if (region->is_regular() || region->is_pinned()) {
       if (!region->has_live()) {
@@ -252,7 +324,7 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
       } else {
         region->begin_preemptible_coalesce_and_fill();
         candidates[cand_idx]._region = region;
-        candidates[cand_idx]._garbage = garbage;
+        candidates[cand_idx]._u._live_data = live_bytes;
         cand_idx++;
       }
     } else if (region->is_humongous_start()) {
@@ -273,42 +345,63 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
     }
   }
 
+  _old_generation->set_live_bytes_after_last_mark(live_data);
+
   // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage.
   // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate
   // threshold for the old generation.
 
-  // Prioritize regions to select garbage-first regions
-  QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_garbage, false);
+  // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first.  We sort by live-data.
+  // Some regular regions may have been promoted in place with no garbage but also with very little live data.  When we "compact"
+  // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions
+  // in old-gen.
+  QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live, false);
 
-  // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25))% garbage or more is to
-  // be evacuated.
+  // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be
+  // added to the list of candidates for subsequent mixed evacuations.
   //
   // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics.
 
+  const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+  // The convention is to collect regions that have more than this amount of garbage.
+  const size_t garbage_threshold = region_size_bytes * ShenandoahOldGarbageThreshold / 100;
+
+  // Englightened interpretation: collect regions that have less than this amount of live.
+  const size_t live_threshold = region_size_bytes - garbage_threshold;
 
-  const size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold / 100;
   size_t candidates_garbage = 0;
   _last_old_region = (uint)cand_idx;
   _last_old_collection_candidate = (uint)cand_idx;
   _next_old_collection_candidate = 0;
 
+  size_t unfragmented = 0;
+
   for (size_t i = 0; i < cand_idx; i++) {
-    if (candidates[i]._garbage < garbage_threshold) {
-      // Candidates are sorted in decreasing order of garbage, so no regions after this will be above the threshold
+    size_t live = candidates[i]._u._live_data;
+    if (live > live_threshold) {
+      // Candidates are sorted in increasing order of live data, so no regions after this will be below the threshold.
       _last_old_collection_candidate = (uint)i;
       break;
     }
-    candidates_garbage += candidates[i]._garbage;
+    size_t region_garbage = candidates[i]._region->garbage();
+    size_t region_free = candidates[i]._region->free();
+    candidates_garbage += region_garbage;
+    unfragmented += region_free;
   }
 
   // Note that we do not coalesce and fill occupied humongous regions
   // HR: humongous regions, RR: regular regions, CF: coalesce and fill regions
   size_t collectable_garbage = immediate_garbage + candidates_garbage;
-  log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s over " UINT32_FORMAT " regions, "
+  size_t old_candidates = _last_old_collection_candidate;
+  log_info(gc)("Old-Gen Collectable Garbage: " SIZE_FORMAT "%s "
+               "consolidated with free: " SIZE_FORMAT "%s, over " SIZE_FORMAT " regions, "
                "Old-Gen Immediate Garbage: " SIZE_FORMAT "%s over " SIZE_FORMAT " regions.",
-               byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage), _last_old_collection_candidate,
-               byte_size_in_proper_unit(immediate_garbage),   proper_unit_for_byte_size(immediate_garbage),   immediate_regions);
-
+               byte_size_in_proper_unit(collectable_garbage), proper_unit_for_byte_size(collectable_garbage),
+               byte_size_in_proper_unit(unfragmented),        proper_unit_for_byte_size(unfragmented), old_candidates,
+               byte_size_in_proper_unit(immediate_garbage),   proper_unit_for_byte_size(immediate_garbage), immediate_regions);
+  size_t mixed_evac_live = old_candidates * region_size_bytes - (candidates_garbage + unfragmented);
+  set_unprocessed_old_collection_candidates_live_memory(mixed_evac_live);
   if (unprocessed_old_collection_candidates() == 0) {
     _old_generation->transition_to(ShenandoahOldGeneration::IDLE);
   } else {
@@ -316,6 +409,20 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
   }
 }
 
+size_t ShenandoahOldHeuristics::unprocessed_old_collection_candidates_live_memory() const {
+  return _live_bytes_in_unprocessed_candidates;
+}
+
+void ShenandoahOldHeuristics::set_unprocessed_old_collection_candidates_live_memory(size_t initial_live) {
+  _live_bytes_in_unprocessed_candidates = initial_live;
+}
+
+void ShenandoahOldHeuristics::decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live) {
+  assert(evacuated_live <= _live_bytes_in_unprocessed_candidates, "Cannot evacuate more than was present");
+  _live_bytes_in_unprocessed_candidates -= evacuated_live;
+}
+
+
 // TODO: Unused?
 uint ShenandoahOldHeuristics::last_old_collection_candidate_index() {
   return _last_old_collection_candidate;
@@ -356,7 +463,7 @@ unsigned int ShenandoahOldHeuristics::get_coalesce_and_fill_candidates(Shenandoa
   while (index < end) {
     *buffer++ = _region_data[index++]._region;
   }
-  return _last_old_region - _next_old_collection_candidate;
+  return (_last_old_region - _next_old_collection_candidate);
 }
 
 void ShenandoahOldHeuristics::abandon_collection_candidates() {
@@ -366,24 +473,31 @@ void ShenandoahOldHeuristics::abandon_collection_candidates() {
 }
 
 void ShenandoahOldHeuristics::handle_promotion_failure() {
-  if (!_promotion_failed) {
-    if (ShenandoahHeap::heap()->generation_sizer()->transfer_capacity(_old_generation)) {
-      log_info(gc)("Increased size of old generation due to promotion failure.");
-    }
-    // TODO: Increase tenuring threshold to push back on promotions.
-  }
   _promotion_failed = true;
 }
 
 void ShenandoahOldHeuristics::record_cycle_start() {
-  _promotion_failed = false;
   _trigger_heuristic->record_cycle_start();
 }
 
 void ShenandoahOldHeuristics::record_cycle_end() {
   _trigger_heuristic->record_cycle_end();
+  clear_triggers();
+}
+
+void ShenandoahOldHeuristics::trigger_old_has_grown() {
+  _growth_trigger = true;
 }
 
+
+void ShenandoahOldHeuristics::clear_triggers() {
+  // Clear any triggers that were set during mixed evacuations.  Conditions may be different now that this phase has finished.
+  _promotion_failed = false;
+  _cannot_expand_trigger = false;
+  _fragmentation_trigger = false;
+  _growth_trigger = false;
+ }
+
 bool ShenandoahOldHeuristics::should_start_gc() {
   // Cannot start a new old-gen GC until previous one has finished.
   //
@@ -393,13 +507,49 @@ bool ShenandoahOldHeuristics::should_start_gc() {
     return false;
   }
 
-  // If there's been a promotion failure (and we don't have regions already scheduled for evacuation),
-  // start a new old generation collection.
-  if (_promotion_failed) {
-    log_info(gc)("Trigger: Promotion Failure");
+  if (_cannot_expand_trigger) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t old_gen_capacity = _old_generation->max_capacity();
+    size_t heap_capacity = heap->capacity();
+    double percent = 100.0 * ((double) old_gen_capacity) / heap_capacity;
+    log_info(gc)("Trigger (OLD): Expansion failure, current size: " SIZE_FORMAT "%s which is %.1f%% of total heap size",
+                 byte_size_in_proper_unit(old_gen_capacity), proper_unit_for_byte_size(old_gen_capacity), percent);
     return true;
   }
 
+  if (_fragmentation_trigger) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t used = _old_generation->used();
+    size_t used_regions_size = _old_generation->used_regions_size();
+    size_t used_regions = _old_generation->used_regions();
+    assert(used_regions_size > used_regions, "Cannot have more used than used regions");
+    size_t fragmented_free = used_regions_size - used;
+    double percent = 100.0 * ((double) fragmented_free) / used_regions_size;
+    log_info(gc)("Trigger (OLD): Old has become fragmented: "
+                 SIZE_FORMAT "%s available bytes spread between " SIZE_FORMAT " regions (%.1f%% free)",
+                 byte_size_in_proper_unit(fragmented_free), proper_unit_for_byte_size(fragmented_free), used_regions, percent);
+    return true;
+  }
+
+  if (_growth_trigger) {
+    // Growth may be falsely triggered during mixed evacuations, before the mixed-evacuation candidates have been
+    // evacuated.  Before acting on a false trigger, we check to confirm the trigger condition is still satisfied.
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
+    size_t current_usage = _old_generation->used();
+    size_t trigger_threshold = _old_generation->usage_trigger_threshold();
+    if (current_usage > trigger_threshold) {
+      size_t live_at_previous_old = _old_generation->get_live_bytes_after_last_mark();
+      double percent_growth = 100.0 * ((double) current_usage - live_at_previous_old) / live_at_previous_old;
+      log_info(gc)("Trigger (OLD): Old has overgrown, live at end of previous OLD marking: "
+                   SIZE_FORMAT "%s, current usage: " SIZE_FORMAT "%s, percent growth: %.1f%%",
+                   byte_size_in_proper_unit(live_at_previous_old), proper_unit_for_byte_size(live_at_previous_old),
+                   byte_size_in_proper_unit(current_usage), proper_unit_for_byte_size(current_usage), percent_growth);
+      return true;
+    } else {
+      _growth_trigger = false;
+    }
+  }
+
   // Otherwise, defer to configured heuristic for gc trigger.
   return _trigger_heuristic->should_start_gc();
 }
@@ -409,14 +559,20 @@ bool ShenandoahOldHeuristics::should_degenerate_cycle() {
 }
 
 void ShenandoahOldHeuristics::record_success_concurrent(bool abbreviated) {
+  // Forget any triggers that occured while OLD GC was ongoing.  If we really need to start another, it will retrigger.
+  clear_triggers();
   _trigger_heuristic->record_success_concurrent(abbreviated);
 }
 
 void ShenandoahOldHeuristics::record_success_degenerated() {
+  // Forget any triggers that occured while OLD GC was ongoing.  If we really need to start another, it will retrigger.
+  clear_triggers();
   _trigger_heuristic->record_success_degenerated();
 }
 
 void ShenandoahOldHeuristics::record_success_full() {
+  // Forget any triggers that occured while OLD GC was ongoing.  If we really need to start another, it will retrigger.
+  clear_triggers();
   _trigger_heuristic->record_success_full();
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
index f8581c2cd01..d6789cb87ec 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp
@@ -69,15 +69,24 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics {
   // the end of old final mark.
   uint _last_old_region;
 
+  // How much live data must be evacuated from within the unprocessed mixed evacuation candidates?
+  size_t _live_bytes_in_unprocessed_candidates;
+
   // This can be the 'static' or 'adaptive' heuristic.
   ShenandoahHeuristics* _trigger_heuristic;
 
+  // Keep a pointer to our generation that we can use without down casting a protected member from the base class.
+  ShenandoahOldGeneration* _old_generation;
+
   // Flag is set when promotion failure is detected (by gc thread), and cleared when
   // old generation collection begins (by control thread).
   volatile bool _promotion_failed;
 
-  // Keep a pointer to our generation that we can use without down casting a protected member from the base class.
-  ShenandoahOldGeneration* _old_generation;
+  // Flags are set when promotion failure is detected (by gc thread), and cleared when
+  // old generation collection begins (by control thread).  Flags are set and cleared at safepoints.
+  bool _cannot_expand_trigger;
+  bool _fragmentation_trigger;
+  bool _growth_trigger;
 
  protected:
   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set, RegionData* data, size_t data_size,
@@ -97,6 +106,13 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics {
   // How many old-collection candidates have not yet been processed?
   uint unprocessed_old_collection_candidates();
 
+  // How much live memory must be evacuated from within old-collection candidates that have not yet been processed?
+  size_t unprocessed_old_collection_candidates_live_memory() const;
+
+  void set_unprocessed_old_collection_candidates_live_memory(size_t initial_live);
+
+  void decrease_unprocessed_old_collection_candidates_live_memory(size_t evacuated_live);
+
   // How many old or hidden collection candidates have not yet been processed?
   uint last_old_collection_candidate_index();
 
@@ -122,11 +138,17 @@ class ShenandoahOldHeuristics : public ShenandoahHeuristics {
   // held by this heuristic for supplying mixed collections.
   void abandon_collection_candidates();
 
-  // Notify the heuristic of promotion failures. The promotion attempt will be skipped and the object will
-  // be evacuated into the young generation. The collection should complete normally, but we want to schedule
-  // an old collection as soon as possible.
+  // Promotion failure does not currently trigger old-gen collections.  Often, promotion failures occur because
+  // old-gen is sized too small rather than because it is necessary to collect old gen.  We keep the method
+  // here in case we decide to feed this signal to sizing or triggering heuristics in the future.
   void handle_promotion_failure();
 
+  void trigger_cannot_expand() { _cannot_expand_trigger = true; };
+  void trigger_old_is_fragmented() { _fragmentation_trigger = true; }
+  void trigger_old_has_grown();
+
+  void clear_triggers();
+
   virtual void record_cycle_start() override;
 
   virtual void record_cycle_end() override;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
index c797ad21a7d..736354e9282 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp
@@ -90,13 +90,13 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
   assert(!r->is_humongous(), "Only add regular regions to the collection set");
 
   _cset_map[r->index()] = 1;
-
-  size_t live = r->get_live_data_bytes();
+  size_t live    = r->get_live_data_bytes();
   size_t garbage = r->garbage();
-
+  size_t free    = r->free();
   if (r->is_young()) {
     _young_region_count++;
     _young_bytes_to_evacuate += live;
+    _young_available_bytes_collected += free;
     if (r->age() >= InitialTenuringThreshold) {
       _young_bytes_to_promote += live;
     }
@@ -104,6 +104,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
     _old_region_count++;
     _old_bytes_to_evacuate += live;
     _old_garbage += garbage;
+    _old_available_bytes_collected += free;
   }
 
   _region_count++;
@@ -117,6 +118,7 @@ void ShenandoahCollectionSet::add_region(ShenandoahHeapRegion* r) {
 
 void ShenandoahCollectionSet::clear() {
   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
+
   Copy::zero_to_bytes(_cset_map, _map_size);
 
 #ifdef ASSERT
@@ -140,6 +142,9 @@ void ShenandoahCollectionSet::clear() {
   _old_region_count = 0;
   _old_bytes_to_evacuate = 0;
 
+  _young_available_bytes_collected = 0;
+  _old_available_bytes_collected = 0;
+
   _has_old_regions = false;
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
index 112fff383ba..80a5e9dc028 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.hpp
@@ -66,6 +66,11 @@ class ShenandoahCollectionSet : public CHeapObj<mtGC> {
   // spans of time while collection set is being constructed.
   bool*                 _preselected_regions;
 
+  // When a region having memory available to be allocated is added to the collection set, the region's available memory
+  // should be subtracted from what's available.
+  size_t                _young_available_bytes_collected;
+  size_t                _old_available_bytes_collected;
+
   shenandoah_padding(0);
   volatile size_t       _current_index;
   shenandoah_padding(1);
@@ -111,6 +116,10 @@ class ShenandoahCollectionSet : public CHeapObj<mtGC> {
 
   inline size_t get_young_bytes_to_be_promoted();
 
+  size_t get_young_available_bytes_collected() { return _young_available_bytes_collected; }
+
+  size_t get_old_available_bytes_collected() { return _old_available_bytes_collected; }
+
   inline size_t get_old_region_count();
   inline size_t get_young_region_count();
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
index 73e1d5dbf2c..918aff26add 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp
@@ -128,6 +128,12 @@ bool ShenandoahCollectorPolicy::is_at_shutdown() {
   return _in_shutdown.is_set();
 }
 
+// This may be called by mutator threads.  We declare _success_full_gcs volatile to force the value not to be cached
+// in a local register or variable by a mutator thread that is checking this value in a loop.
+size_t ShenandoahCollectorPolicy::get_fullgc_count() {
+  return _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full;
+}
+
 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
   out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
   out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
index 3ca9965dbda..244663e6ae3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp
@@ -45,9 +45,9 @@ class ShenandoahCollectorPolicy : public CHeapObj<mtGC> {
   size_t _success_old_gcs;
   size_t _interrupted_old_gcs;
   size_t _success_degenerated_gcs;
-  size_t _success_full_gcs;
+  volatile size_t _success_full_gcs;
   size_t _alloc_failure_degenerated;
-  size_t _alloc_failure_degenerated_upgrade_to_full;
+  volatile size_t _alloc_failure_degenerated_upgrade_to_full;
   size_t _alloc_failure_full;
   size_t _explicit_concurrent;
   size_t _explicit_full;
@@ -88,6 +88,8 @@ class ShenandoahCollectorPolicy : public CHeapObj<mtGC> {
 
   size_t cycle_counter() const;
 
+  size_t get_fullgc_count();
+
   void print_gc_stats(outputStream* out) const;
 };
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index 6c165ade4e3..b09b736ce6e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -217,28 +217,56 @@ bool ShenandoahConcurrentGC::collect(GCCause::Cause cause) {
     _abbreviated = true;
   }
 
+  // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
+  // abbreviated cycle.
   if (heap->mode()->is_generational()) {
+    bool success;
+    size_t region_xfer;
+    const char* region_destination;
+    ShenandoahYoungGeneration* young_gen = heap->young_generation();
+    ShenandoahGeneration* old_gen = heap->old_generation();
     {
-      ShenandoahYoungGeneration* young_gen = heap->young_generation();
-      ShenandoahGeneration* old_gen = heap->old_generation();
       ShenandoahHeapLocker locker(heap->lock());
 
+      size_t old_region_surplus = heap->get_old_region_surplus();
+      size_t old_region_deficit = heap->get_old_region_deficit();
+      if (old_region_surplus) {
+        success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
+        region_destination = "young";
+        region_xfer = old_region_surplus;
+      } else if (old_region_deficit) {
+        success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
+        region_destination = "old";
+        region_xfer = old_region_deficit;
+        if (!success) {
+          ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
+        }
+      } else {
+        region_destination = "none";
+        region_xfer = 0;
+        success = true;
+      }
+      heap->set_old_region_surplus(0);
+      heap->set_old_region_deficit(0);
+
       size_t old_usage_before_evac = heap->capture_old_usage(0);
       size_t old_usage_now = old_gen->used();
       size_t promoted_bytes = old_usage_now - old_usage_before_evac;
       heap->set_previous_promotion(promoted_bytes);
-
-      young_gen->unadjust_available();
-      old_gen->unadjust_available();
-      // No need to old_gen->increase_used().
-      // That was done when plabs were allocated, accounting for both old evacs and promotions.
-
-      heap->set_alloc_supplement_reserve(0);
       heap->set_young_evac_reserve(0);
       heap->set_old_evac_reserve(0);
       heap->reset_old_evac_expended();
       heap->set_promoted_reserve(0);
     }
+
+    // Report outside the heap lock
+    size_t young_available = young_gen->available();
+    size_t old_available = old_gen->available();
+    log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
+                       SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
+                       success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
+                       byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
+                       byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
   }
   return true;
 }
@@ -733,64 +761,95 @@ void ShenandoahConcurrentGC::op_final_mark() {
     //  been set aside to hold objects evacuated from the young-gen collection set.  Conservatively, this value
     //  equals the entire amount of live young-gen memory within the collection set, even though some of this memory
     //  will likely be promoted.
-    //
-    // heap->get_alloc_supplement_reserve() represents the amount of old-gen memory that can be allocated during evacuation
-    // and update-refs phases of gc.  The young evacuation reserve has already been removed from this quantity.
 
     // Has to be done after cset selection
     heap->prepare_concurrent_roots();
 
-    if (!heap->collection_set()->is_empty()) {
-      LogTarget(Debug, gc, cset) lt;
-      if (lt.is_enabled()) {
-        ResourceMark rm;
-        LogStream ls(lt);
-        heap->collection_set()->print_on(&ls);
-      }
+    if (heap->mode()->is_generational()) {
+      ShenandoahGeneration* young_gen = heap->young_generation();
+      size_t humongous_regions_promoted = heap->get_promotable_humongous_regions();
+      size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place();
+      if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) {
+        // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
+        // Concurrent evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
+
+        LogTarget(Debug, gc, cset) lt;
+        if (lt.is_enabled()) {
+          ResourceMark rm;
+          LogStream ls(lt);
+          heap->collection_set()->print_on(&ls);
+        }
 
-      if (ShenandoahVerify) {
-        heap->verifier()->verify_before_evacuation();
-      }
+        if (ShenandoahVerify) {
+          heap->verifier()->verify_before_evacuation();
+        }
+        // TODO: we do not need to run update-references following evacuation if collection_set->is_empty().
 
-      heap->set_evacuation_in_progress(true);
-      // From here on, we need to update references.
-      heap->set_has_forwarded_objects(true);
+        heap->set_evacuation_in_progress(true);
+        // From here on, we need to update references.
+        heap->set_has_forwarded_objects(true);
 
-      // Verify before arming for concurrent processing.
-      // Otherwise, verification can trigger stack processing.
-      if (ShenandoahVerify) {
-        heap->verifier()->verify_during_evacuation();
-      }
+        // Verify before arming for concurrent processing.
+        // Otherwise, verification can trigger stack processing.
+        if (ShenandoahVerify) {
+          heap->verifier()->verify_during_evacuation();
+        }
 
-      // Arm nmethods/stack for concurrent processing
-      ShenandoahCodeRoots::arm_nmethods();
-      ShenandoahStackWatermark::change_epoch_id();
-
-      if (heap->mode()->is_generational()) {
-        // Calculate the temporary evacuation allowance supplement to young-gen memory capacity (for allocations
-        // and young-gen evacuations).
-        intptr_t adjustment = heap->get_alloc_supplement_reserve();
-        size_t young_available = heap->young_generation()->adjust_available(adjustment);
-        // old_available is memory that can hold promotions and evacuations.  Subtract out the memory that is being
-        // loaned for young-gen allocations or evacuations.
-        size_t old_available = heap->old_generation()->adjust_available(-adjustment);
-
-        log_info(gc, ergo)("After generational memory budget adjustments, old available: " SIZE_FORMAT
-                           "%s, young_available: " SIZE_FORMAT "%s",
-                           byte_size_in_proper_unit(old_available),   proper_unit_for_byte_size(old_available),
-                           byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
-      }
+        // Arm nmethods/stack for concurrent processing
+        ShenandoahCodeRoots::arm_nmethods();
+        ShenandoahStackWatermark::change_epoch_id();
 
-      if (ShenandoahPacing) {
-        heap->pacer()->setup_for_evac();
+        if (ShenandoahPacing) {
+          heap->pacer()->setup_for_evac();
+        }
+      } else {
+        if (ShenandoahVerify) {
+          heap->verifier()->verify_after_concmark();
+        }
+
+        if (VerifyAfterGC) {
+          Universe::verify();
+        }
       }
     } else {
-      if (ShenandoahVerify) {
-        heap->verifier()->verify_after_concmark();
-      }
+      // Not is_generational()
+      if (!heap->collection_set()->is_empty()) {
+        LogTarget(Info, gc, ergo) lt;
+        if (lt.is_enabled()) {
+          ResourceMark rm;
+          LogStream ls(lt);
+          heap->collection_set()->print_on(&ls);
+        }
 
-      if (VerifyAfterGC) {
-        Universe::verify();
+        if (ShenandoahVerify) {
+          heap->verifier()->verify_before_evacuation();
+        }
+
+        heap->set_evacuation_in_progress(true);
+        // From here on, we need to update references.
+        heap->set_has_forwarded_objects(true);
+
+        // Verify before arming for concurrent processing.
+        // Otherwise, verification can trigger stack processing.
+        if (ShenandoahVerify) {
+          heap->verifier()->verify_during_evacuation();
+        }
+
+        // Arm nmethods/stack for concurrent processing
+        ShenandoahCodeRoots::arm_nmethods();
+        ShenandoahStackWatermark::change_epoch_id();
+
+        if (ShenandoahPacing) {
+          heap->pacer()->setup_for_evac();
+        }
+      } else {
+        if (ShenandoahVerify) {
+          heap->verifier()->verify_after_concmark();
+        }
+
+        if (VerifyAfterGC) {
+          Universe::verify();
+        }
       }
     }
   }
@@ -812,6 +871,7 @@ ShenandoahConcurrentEvacThreadClosure::ShenandoahConcurrentEvacThreadClosure(Oop
 void ShenandoahConcurrentEvacThreadClosure::do_thread(Thread* thread) {
   JavaThread* const jt = JavaThread::cast(thread);
   StackWatermarkSet::finish_processing(jt, _oops, StackWatermarkKind::gc);
+  ShenandoahThreadLocalData::enable_plab_promotions(thread);
 }
 
 class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
@@ -825,6 +885,9 @@ class ShenandoahConcurrentEvacUpdateThreadTask : public WorkerTask {
   }
 
   void work(uint worker_id) {
+    Thread* worker_thread = Thread::current();
+    ShenandoahThreadLocalData::enable_plab_promotions(worker_thread);
+
     // ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
     // Otherwise, may deadlock with watermark lock
     ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
@@ -1200,7 +1263,6 @@ void ShenandoahConcurrentGC::op_final_updaterefs() {
     Universe::verify();
   }
 
-  heap->adjust_generation_sizes();
   heap->rebuild_free_set(true /*concurrent*/);
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
index 144d9095b0b..09e15a09568 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp
@@ -113,6 +113,7 @@ void ShenandoahControlThread::run_service() {
   // degenerated cycle should be 'promoted' to a full cycle. The decision to
   // trigger a cycle or not is evaluated on the regulator thread.
   ShenandoahHeuristics* global_heuristics = heap->global_generation()->heuristics();
+  bool old_bootstrap_requested = false;
   while (!in_graceful_shutdown() && !should_terminate()) {
     // Figure out if we have pending requests.
     bool alloc_failure_pending = _alloc_failure_gc.is_set();
@@ -207,10 +208,19 @@ void ShenandoahControlThread::run_service() {
           // the heuristic to run a young collection so that we can evacuate some old regions.
           assert(!heap->is_concurrent_old_mark_in_progress(), "Should not be running mixed collections and concurrent marking");
           generation = YOUNG;
+        } else if (_requested_generation == OLD && !old_bootstrap_requested) {
+          // Arrange to perform a young GC immediately followed by a bootstrap OLD GC.  OLD GC typically requires more
+          // than twice the time required for YOUNG GC, so we run a YOUNG GC to replenish the YOUNG allocation pool before
+          // we start the longer OLD GC effort.
+          old_bootstrap_requested = true;
+          generation = YOUNG;
         } else {
+          // if (old_bootstrap_requested && (_requested_generation == OLD)), this starts the bootstrap GC that
+          //  immediately follows the preparatory young GC.
+          // But we will abandon the planned bootstrap GC if a GLOBAL GC has been now been requested.
           generation = _requested_generation;
+          old_bootstrap_requested = false;
         }
-
         // preemption was requested or this is a regular cycle
         cause = GCCause::_shenandoah_concurrent_gc;
         set_gc_mode(default_mode);
@@ -391,10 +401,18 @@ void ShenandoahControlThread::run_service() {
 
     // Don't wait around if there was an allocation failure - start the next cycle immediately.
     if (!is_alloc_failure_gc()) {
-      // The timed wait is necessary because this thread has a responsibility to send
-      // 'alloc_words' to the pacer when it does not perform a GC.
-      MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
-      lock.wait(ShenandoahControlIntervalMax);
+      if (old_bootstrap_requested) {
+        _requested_generation = OLD;
+        _requested_gc_cause = GCCause::_shenandoah_concurrent_gc;
+      } else {
+        // The timed wait is necessary because this thread has a responsibility to send
+        // 'alloc_words' to the pacer when it does not perform a GC.
+        MonitorLocker lock(&_control_lock, Mutex::_no_safepoint_check_flag);
+        lock.wait(ShenandoahControlIntervalMax);
+      }
+    } else {
+      // in case of alloc_failure, abandon any plans to do immediate OLD Bootstrap
+      old_bootstrap_requested = false;
     }
   }
 
@@ -457,10 +475,11 @@ void ShenandoahControlThread::process_phase_timings(const ShenandoahHeap* heap)
 //      |        v                                   v       |
 //      +--->  Global Degen +--------------------> Full <----+
 //
-void ShenandoahControlThread::service_concurrent_normal_cycle(const ShenandoahHeap* heap,
+void ShenandoahControlThread::service_concurrent_normal_cycle(ShenandoahHeap* heap,
                                                               const ShenandoahGenerationType generation,
                                                               GCCause::Cause cause) {
   GCIdMark gc_id_mark;
+  ShenandoahGeneration* the_generation = nullptr;
   switch (generation) {
     case YOUNG: {
       // Run a young cycle. This might or might not, have interrupted an ongoing
@@ -469,44 +488,34 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(const ShenandoahHe
       // they end up in, but we have to be sure we don't promote into any regions
       // that are in the cset.
       log_info(gc, ergo)("Start GC cycle (YOUNG)");
-      service_concurrent_cycle(heap->young_generation(), cause, false);
+      the_generation = heap->young_generation();
+      service_concurrent_cycle(the_generation, cause, false);
       break;
     }
     case OLD: {
       log_info(gc, ergo)("Start GC cycle (OLD)");
+      the_generation = heap->old_generation();
       service_concurrent_old_cycle(heap, cause);
       break;
     }
     case GLOBAL_GEN: {
       log_info(gc, ergo)("Start GC cycle (GLOBAL)");
-      service_concurrent_cycle(heap->global_generation(), cause, false);
+      the_generation = heap->global_generation();
+      service_concurrent_cycle(the_generation, cause, false);
       break;
     }
     case GLOBAL_NON_GEN: {
       log_info(gc, ergo)("Start GC cycle");
-      service_concurrent_cycle(heap->global_generation(), cause, false);
+      the_generation = heap->global_generation();
+      service_concurrent_cycle(the_generation, cause, false);
       break;
     }
     default:
       ShouldNotReachHere();
   }
-  const char* msg;
-  if (heap->mode()->is_generational()) {
-    if (heap->cancelled_gc()) {
-      msg = (generation == YOUNG) ? "At end of Interrupted Concurrent Young GC" :
-            "At end of Interrupted Concurrent Bootstrap GC";
-    } else {
-      msg = (generation == YOUNG) ? "At end of Concurrent Young GC" :
-            "At end of Concurrent Bootstrap GC";
-    }
-  } else {
-    msg = heap->cancelled_gc() ? "At end of cancelled GC" :
-                                 "At end of GC";
-  }
-  heap->log_heap_status(msg);
 }
 
-void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap* heap, GCCause::Cause &cause) {
+void ShenandoahControlThread::service_concurrent_old_cycle(ShenandoahHeap* heap, GCCause::Cause &cause) {
   ShenandoahOldGeneration* old_generation = heap->old_generation();
   ShenandoahYoungGeneration* young_generation = heap->young_generation();
   ShenandoahOldGeneration::State original_state = old_generation->state();
@@ -562,7 +571,7 @@ void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap*
       set_gc_mode(bootstrapping_old);
       young_generation->set_old_gen_task_queues(old_generation->task_queues());
       ShenandoahGCSession session(cause, young_generation);
-      service_concurrent_cycle(heap,young_generation, cause, true);
+      service_concurrent_cycle(heap, young_generation, cause, true);
       process_phase_timings(heap);
       if (heap->cancelled_gc()) {
         // Young generation bootstrap cycle has failed. Concurrent mark for old generation
@@ -588,9 +597,13 @@ void ShenandoahControlThread::service_concurrent_old_cycle(const ShenandoahHeap*
       if (marking_complete) {
         assert(old_generation->state() != ShenandoahOldGeneration::MARKING, "Should not still be marking");
         if (original_state == ShenandoahOldGeneration::MARKING) {
+          heap->mmu_tracker()->record_old_marking_increment(old_generation, GCId::current(), true,
+                                                            heap->collection_set()->has_old_regions());
           heap->log_heap_status("At end of Concurrent Old Marking finishing increment");
         }
       } else if (original_state == ShenandoahOldGeneration::MARKING) {
+        heap->mmu_tracker()->record_old_marking_increment(old_generation, GCId::current(), false,
+                                                          heap->collection_set()->has_old_regions());
         heap->log_heap_status("At end of Concurrent Old Marking increment");
       }
       break;
@@ -697,7 +710,7 @@ void ShenandoahControlThread::service_concurrent_cycle(ShenandoahGeneration* gen
   service_concurrent_cycle(heap, generation, cause, do_old_gc_bootstrap);
 }
 
-void ShenandoahControlThread::service_concurrent_cycle(const ShenandoahHeap* heap,
+void ShenandoahControlThread::service_concurrent_cycle(ShenandoahHeap* heap,
                                                        ShenandoahGeneration* generation,
                                                        GCCause::Cause& cause,
                                                        bool do_old_gc_bootstrap) {
@@ -713,6 +726,32 @@ void ShenandoahControlThread::service_concurrent_cycle(const ShenandoahHeap* hea
     // collection.  Same for global collections.
     _degen_generation = generation;
   }
+  const char* msg;
+  if (heap->mode()->is_generational()) {
+    if (heap->cancelled_gc()) {
+      msg = (generation->is_young()) ? "At end of Interrupted Concurrent Young GC" :
+                                       "At end of Interrupted Concurrent Bootstrap GC";
+    } else {
+      msg = (generation->is_young()) ? "At end of Concurrent Young GC" :
+                                       "At end of Concurrent Bootstrap GC";
+      // We only record GC results if GC was successful
+      ShenandoahMmuTracker* mmu_tracker = heap->mmu_tracker();
+      if (generation->is_young()) {
+        if (heap->collection_set()->has_old_regions()) {
+          bool mixed_is_done = (heap->old_heuristics()->unprocessed_old_collection_candidates() == 0);
+          mmu_tracker->record_mixed(generation, get_gc_id(), mixed_is_done);
+        } else {
+          mmu_tracker->record_young(generation, get_gc_id());
+        }
+      } else {
+        mmu_tracker->record_bootstrap(generation, get_gc_id(), heap->collection_set()->has_old_regions());
+      }
+    }
+  } else {
+    msg = heap->cancelled_gc() ? "At end of cancelled GC" :
+                                 "At end of GC";
+  }
+  heap->log_heap_status(msg);
 }
 
 bool ShenandoahControlThread::check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point) {
@@ -928,7 +967,6 @@ void ShenandoahControlThread::handle_alloc_failure(ShenandoahAllocRequest& req)
     log_info(gc)("Failed to allocate %s, " SIZE_FORMAT "%s",
                  req.type_string(),
                  byte_size_in_proper_unit(req.size() * HeapWordSize), proper_unit_for_byte_size(req.size() * HeapWordSize));
-
     // Now that alloc failure GC is scheduled, we can abort everything else
     heap->cancel_gc(GCCause::_allocation_failure);
   }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
index f9596feeeb7..339cfa7d5de 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp
@@ -88,6 +88,7 @@ class ShenandoahControlThread: public ConcurrentGCThread {
   ShenandoahSharedFlag _preemption_requested;
   ShenandoahSharedFlag _gc_requested;
   ShenandoahSharedFlag _alloc_failure_gc;
+  ShenandoahSharedFlag _humongous_alloc_failure_gc;
   ShenandoahSharedFlag _graceful_shutdown;
   ShenandoahSharedFlag _do_counters_update;
   ShenandoahSharedFlag _force_counters_update;
@@ -173,11 +174,11 @@ class ShenandoahControlThread: public ConcurrentGCThread {
   void prepare_for_graceful_shutdown();
   bool in_graceful_shutdown();
 
-  void service_concurrent_normal_cycle(const ShenandoahHeap* heap,
+  void service_concurrent_normal_cycle(ShenandoahHeap* heap,
                                        const ShenandoahGenerationType generation,
                                        GCCause::Cause cause);
 
-  void service_concurrent_old_cycle(const ShenandoahHeap* heap,
+  void service_concurrent_old_cycle(ShenandoahHeap* heap,
                                     GCCause::Cause &cause);
 
   void set_gc_mode(GCMode new_mode);
@@ -191,7 +192,7 @@ class ShenandoahControlThread: public ConcurrentGCThread {
   static const char* gc_mode_name(GCMode mode);
   void notify_control_thread();
 
-  void service_concurrent_cycle(const ShenandoahHeap* heap,
+  void service_concurrent_cycle(ShenandoahHeap* heap,
                                 ShenandoahGeneration* generation,
                                 GCCause::Cause &cause,
                                 bool do_old_gc_bootstrap);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
index fa62f7a9fe8..0a7354b17be 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahDegeneratedGC.cpp
@@ -57,7 +57,11 @@ bool ShenandoahDegenGC::collect(GCCause::Cause cause) {
   vmop_degenerated();
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   if (heap->mode()->is_generational()) {
-    heap->log_heap_status("At end of Degenerated GC");
+    bool is_bootstrap_gc = heap->is_concurrent_old_mark_in_progress() && _generation->is_young();
+    heap->mmu_tracker()->record_degenerated(_generation, GCId::current(), is_bootstrap_gc,
+                                            !heap->collection_set()->has_old_regions());
+    const char* msg = is_bootstrap_gc? "At end of Degenerated Boostrap Old GC": "At end of Degenerated GC";
+    heap->log_heap_status(msg);
   }
   return true;
 }
@@ -272,27 +276,53 @@ void ShenandoahDegenGC::op_degenerated() {
       }
 
       op_cleanup_complete();
+      // We defer generation resizing actions until after cset regions have been recycled.
+      if (heap->mode()->is_generational()) {
+        size_t old_region_surplus = heap->get_old_region_surplus();
+        size_t old_region_deficit = heap->get_old_region_deficit();
+        bool success;
+        size_t region_xfer;
+        const char* region_destination;
+        if (old_region_surplus) {
+          region_xfer = old_region_surplus;
+          region_destination = "young";
+          success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
+        } else if (old_region_deficit) {
+          region_xfer = old_region_surplus;
+          region_destination = "old";
+          success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
+          if (!success) {
+            ((ShenandoahOldHeuristics *) heap->old_generation()->heuristics())->trigger_cannot_expand();
+          }
+        } else {
+          region_destination = "none";
+          region_xfer = 0;
+          success = true;
+        }
+
+        size_t young_available = heap->young_generation()->available();
+        size_t old_available = heap->old_generation()->available();
+        log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
+                           SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
+                           success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
+                           byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
+                           byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
+
+        heap->set_old_region_surplus(0);
+        heap->set_old_region_deficit(0);
+      }
       break;
     default:
       ShouldNotReachHere();
   }
 
   if (heap->mode()->is_generational()) {
-    // In case degeneration interrupted concurrent evacuation or update references,
-    // we need to clean up transient state. Otherwise, these actions have no effect.
-
-    heap->young_generation()->unadjust_available();
-    heap->old_generation()->unadjust_available();
-    // No need to old_gen->increase_used(). That was done when plabs were allocated,
-    // accounting for both old evacs and promotions.
-
-    heap->set_alloc_supplement_reserve(0);
+    // In case degeneration interrupted concurrent evacuation or update references, we need to clean up transient state.
+    // Otherwise, these actions have no effect.
     heap->set_young_evac_reserve(0);
     heap->set_old_evac_reserve(0);
     heap->reset_old_evac_expended();
     heap->set_promoted_reserve(0);
-
-    heap->adjust_generation_sizes();
   }
 
   if (ShenandoahVerify) {
@@ -354,7 +384,16 @@ void ShenandoahDegenGC::op_prepare_evacuation() {
     heap->tlabs_retire(false);
   }
 
-  if (!heap->collection_set()->is_empty()) {
+  size_t humongous_regions_promoted = heap->get_promotable_humongous_regions();
+  size_t regular_regions_promoted_in_place = heap->get_regular_regions_promoted_in_place();
+  if (!heap->collection_set()->is_empty() || (humongous_regions_promoted + regular_regions_promoted_in_place > 0)) {
+    // Even if the collection set is empty, we need to do evacuation if there are regions to be promoted in place.
+    // Degenerated evacuation takes responsibility for registering objects and setting the remembered set cards to dirty.
+
+    if (ShenandoahVerify) {
+      heap->verifier()->verify_before_evacuation();
+    }
+
     heap->set_evacuation_in_progress(true);
     heap->set_has_forwarded_objects(true);
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
index 3ac75079411..c8739734597 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
@@ -38,7 +38,6 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/orderAccess.hpp"
 
-
 ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) :
     _max(max_regions),
     _free_set(free_set),
@@ -162,9 +161,16 @@ void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_
   //  During flip_to_gc:
   //                  Mutator empty => Collector
   //                  Mutator empty => Old Collector
-  assert (((region_capacity < _region_size_bytes) && (orig_set == Mutator) && (new_set == Collector)) ||
-          ((region_capacity == _region_size_bytes) && (orig_set == Mutator) && (new_set == Collector || new_set == OldCollector)),
-          "Unexpected movement between sets");
+  // At start of update refs:
+  //                  Collector => Mutator
+  //                  OldCollector Empty => Mutator
+  assert (((region_capacity <= _region_size_bytes) &&
+           ((orig_set == Mutator) && (new_set == Collector)) ||
+           ((orig_set == Collector) && (new_set == Mutator))) ||
+          ((region_capacity == _region_size_bytes) &&
+           ((orig_set == Mutator) && (new_set == Collector)) ||
+           ((orig_set == OldCollector) && (new_set == Mutator)) ||
+           (new_set == OldCollector)), "Unexpected movement between sets");
 
   _membership[idx] = new_set;
   _capacity_of[orig_set] -= region_capacity;
@@ -398,7 +404,6 @@ void ShenandoahSetsOfFree::assert_bounds() {
 }
 #endif
 
-
 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
   _heap(heap),
   _free_sets(max_regions, this)
@@ -412,6 +417,7 @@ ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation,
                                                            ShenandoahAllocRequest& req, bool& in_new_region) {
   shenandoah_assert_heaplocked();
+
   size_t rightmost =
     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector);
   size_t leftmost =
@@ -450,7 +456,19 @@ HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation
   return nullptr;
 }
 
-HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
+void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) {
+  shenandoah_assert_heaplocked();
+  size_t idx = region->index();
+  size_t capacity = alloc_capacity(region);
+  assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set");
+  if (capacity >= PLAB::min_size() * HeapWordSize) {
+    _free_sets.make_free(idx, OldCollector, capacity);
+    _heap->augment_promo_reserve(capacity);
+  }
+}
+
+HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation,
+                                                       ShenandoahAllocRequest& req, bool& in_new_region) {
   shenandoah_assert_heaplocked();
   size_t rightmost =
     (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector);
@@ -469,7 +487,8 @@ HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation aff
       }
     }
   }
-  log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT, shenandoah_affiliation_name(affiliation), p2i(&req));
+  log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
+                      shenandoah_affiliation_name(affiliation), p2i(&req));
   return nullptr;
 }
 
@@ -494,15 +513,15 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
   if (_heap->mode()->is_generational()) {
     switch (req.affiliation()) {
       case ShenandoahAffiliation::OLD_GENERATION:
-        // Note: unsigned result from adjusted_unaffiliated_regions() will never be less than zero, but it may equal zero.
-        if (_heap->old_generation()->adjusted_unaffiliated_regions() <= 0) {
+        // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
+        if (_heap->old_generation()->free_unaffiliated_regions() <= 0) {
           allow_new_region = false;
         }
         break;
 
       case ShenandoahAffiliation::YOUNG_GENERATION:
-        // Note: unsigned result from adjusted_unaffiliated_regions() will never be less than zero, but it may equal zero.
-        if (_heap->young_generation()->adjusted_unaffiliated_regions() <= 0) {
+        // Note: unsigned result from free_unaffiliated_regions() will never be less than zero, but it may equal zero.
+        if (_heap->young_generation()->free_unaffiliated_regions() <= 0) {
           allow_new_region = false;
         }
         break;
@@ -515,7 +534,6 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
         break;
     }
   }
-
   switch (req.type()) {
     case ShenandoahAllocRequest::_alloc_tlab:
     case ShenandoahAllocRequest::_alloc_shared: {
@@ -524,8 +542,9 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
         ShenandoahHeapRegion* r = _heap->get_region(idx);
         if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) {
           // try_allocate_in() increases used if the allocation is successful.
-          HeapWord* result = try_allocate_in(r, req, in_new_region);
-          if (result != nullptr) {
+          HeapWord* result;
+          size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
+          if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
             return result;
           }
         }
@@ -576,19 +595,16 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
           }
         }
       }
-
       // No dice. Can we borrow space from mutator view?
       if (!ShenandoahEvacReserveOverflow) {
         return nullptr;
       }
 
-      // TODO:
-      // if (!allow_new_region && req.is_old() && (young_generation->adjusted_unaffiliated_regions() > 0)) {
-      //   transfer a region from young to old;
-      //   allow_new_region = true;
-      //   heap->set_old_evac_reserve(heap->get_old_evac_reserve() + region_size_bytes);
-      // }
-      //
+      if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
+        // This allows us to flip a mutator region to old_collector
+        allow_new_region = true;
+      }
+
       // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
       // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
       // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
@@ -706,6 +722,9 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
       // coalesce-and-fill processing.
       r->end_preemptible_coalesce_and_fill();
       _heap->clear_cards_for(r);
+      _heap->old_generation()->increment_affiliated_region_count();
+    } else {
+      _heap->young_generation()->increment_affiliated_region_count();
     }
 
     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
@@ -727,6 +746,8 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
   // req.size() is in words, r->free() is in bytes.
   if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+      assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
+      assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions");
       // Need to assure that plabs are aligned on multiple of card region.
       // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable
       // memory remaining in the region (which will also be aligned to cards).
@@ -838,7 +859,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
 
   // Check if there are enough regions left to satisfy allocation.
   if (_heap->mode()->is_generational()) {
-    size_t avail_young_regions = generation->adjusted_unaffiliated_regions();
+    size_t avail_young_regions = generation->free_unaffiliated_regions();
     if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) {
       return nullptr;
     }
@@ -908,6 +929,8 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
     // While individual regions report their true use, all humongous regions are marked used in the free set.
     _free_sets.remove_from_free_sets(r->index());
   }
+  _heap->young_generation()->increase_affiliated_region_count(num);
+
   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
   _free_sets.increase_used(Mutator, total_humongous_size);
   _free_sets.assert_bounds();
@@ -925,6 +948,11 @@ bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 }
 
+bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
+  ShenandoahHeapRegion* r = _heap->get_region(idx);
+  return can_allocate_from(r);
+}
+
 size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
   ShenandoahHeapRegion* r = _heap->get_region(idx);
   return alloc_capacity(r);
@@ -976,14 +1004,15 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
   size_t idx = r->index();
 
   assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
+  // Note: can_allocate_from(r) means r is entirely empty
   assert(can_allocate_from(r), "Should not be allocated");
 
   size_t region_capacity = alloc_capacity(r);
   _free_sets.move_to_set(idx, OldCollector, region_capacity);
   _free_sets.assert_bounds();
-
-  // We do not ensure that the region is no longer trash,
-  // relying on try_allocate_in(), which always comes next,
+  _heap->generation_sizer()->force_transfer_to_old(1);
+  _heap->augment_old_evac_reserve(region_capacity);
+  // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
   // to recycle trash before attempting to allocate anything in the region.
 }
 
@@ -997,8 +1026,7 @@ void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
   _free_sets.move_to_set(idx, Collector, region_capacity);
   _free_sets.assert_bounds();
 
-  // We do not ensure that the region is no longer trash,
-  // relying on try_allocate_in(), which always comes next,
+  // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
   // to recycle trash before attempting to allocate anything in the region.
 }
 
@@ -1016,10 +1044,21 @@ void ShenandoahFreeSet::clear_internal() {
 // move some of the mutator regions into the collector set or old_collector set with the intent of packing
 // old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the
 // next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap.
-void ShenandoahFreeSet::find_regions_with_alloc_capacity() {
+void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions) {
 
+  old_cset_regions = 0;
+  young_cset_regions = 0;
   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
     ShenandoahHeapRegion* region = _heap->get_region(idx);
+    if (region->is_trash()) {
+      // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up".
+      if (region->is_old()) {
+        old_cset_regions++;
+      } else {
+        assert(region->is_young(), "Trashed region should be old or young");
+        young_cset_regions++;
+      }
+    }
     if (region->is_alloc_allowed() || region->is_trash()) {
       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
       assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already");
@@ -1044,20 +1083,124 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity() {
   }
 }
 
-void ShenandoahFreeSet::rebuild() {
+// Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set.
+// This is called from outside the heap lock.
+void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) {
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  size_t collector_empty_xfer = 0;
+  size_t collector_not_empty_xfer = 0;
+  size_t old_collector_empty_xfer = 0;
+
+  // Process empty regions within the Collector free set
+  if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) {
+    ShenandoahHeapLocker locker(_heap->lock());
+    for (size_t idx = _free_sets.leftmost_empty(Collector);
+         (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) {
+      if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) {
+        _free_sets.move_to_set(idx, Mutator, region_size_bytes);
+        max_xfer_regions--;
+        collector_empty_xfer += region_size_bytes;
+      }
+    }
+  }
+
+  // Process empty regions within the OldCollector free set
+  size_t old_collector_regions = 0;
+  if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) {
+    ShenandoahHeapLocker locker(_heap->lock());
+    for (size_t idx = _free_sets.leftmost_empty(OldCollector);
+         (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) {
+      if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) {
+        _free_sets.move_to_set(idx, Mutator, region_size_bytes);
+        max_xfer_regions--;
+        old_collector_empty_xfer += region_size_bytes;
+        old_collector_regions++;
+      }
+    }
+    if (old_collector_regions > 0) {
+      _heap->generation_sizer()->transfer_to_young(old_collector_regions);
+    }
+  }
+
+  // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set
+  if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) {
+    ShenandoahHeapLocker locker(_heap->lock());
+    for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) {
+      size_t alloc_capacity = this->alloc_capacity(idx);
+      if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) {
+        _free_sets.move_to_set(idx, Mutator, alloc_capacity);
+        max_xfer_regions--;
+        collector_not_empty_xfer += alloc_capacity;
+      }
+    }
+  }
+
+  size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
+  size_t total_xfer = collector_xfer + old_collector_empty_xfer;
+  log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
+                     SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
+                     byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
+                     byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
+                     byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer));
+}
+
+
+// Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
+void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions) {
   shenandoah_assert_heaplocked();
   // This resets all state information, removing all regions from all sets.
   clear();
-
   log_debug(gc, free)("Rebuilding FreeSet");
 
   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
   // mutator set otherwise.
-  find_regions_with_alloc_capacity();
+  find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions);
+}
+
+void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions) {
+  shenandoah_assert_heaplocked();
+  size_t young_reserve, old_reserve;
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+  size_t old_capacity = _heap->old_generation()->max_capacity();
+  size_t old_available = _heap->old_generation()->available();
+  size_t old_unaffiliated_regions = _heap->old_generation()->free_unaffiliated_regions();
+  size_t young_capacity = _heap->young_generation()->max_capacity();
+  size_t young_available = _heap->young_generation()->available();
+  size_t young_unaffiliated_regions = _heap->young_generation()->free_unaffiliated_regions();
+
+  old_unaffiliated_regions += old_cset_regions;
+  old_available += old_cset_regions * region_size_bytes;
+  young_unaffiliated_regions += young_cset_regions;
+  young_available += young_cset_regions * region_size_bytes;
+
+  // Consult old-region surplus and deficit to make adjustments to current generation capacities and availability.
+  // The generation region transfers take place after we rebuild.
+  size_t old_region_surplus = _heap->get_old_region_surplus();
+  size_t old_region_deficit = _heap->get_old_region_deficit();
+
+  if (old_region_surplus > 0) {
+    size_t xfer_bytes = old_region_surplus * region_size_bytes;
+    assert(old_region_surplus <= old_unaffiliated_regions, "Cannot transfer regions that are affiliated");
+    old_capacity -= xfer_bytes;
+    old_available -= xfer_bytes;
+    old_unaffiliated_regions -= old_region_surplus;
+    young_capacity += xfer_bytes;
+    young_available += xfer_bytes;
+    young_unaffiliated_regions += old_region_surplus;
+  } else if (old_region_deficit > 0) {
+    size_t xfer_bytes = old_region_deficit * region_size_bytes;
+    assert(old_region_deficit <= young_unaffiliated_regions, "Cannot transfer regions that are affiliated");
+    old_capacity += xfer_bytes;
+    old_available += xfer_bytes;
+    old_unaffiliated_regions += old_region_deficit;
+    young_capacity -= xfer_bytes;;
+    young_available -= xfer_bytes;
+    young_unaffiliated_regions -= old_region_deficit;
+  }
 
   // Evac reserve: reserve trailing space for evacuations, with regions reserved for old evacuations placed to the right
   // of regions reserved of young evacuations.
-  size_t young_reserve, old_reserve;
   if (!_heap->mode()->is_generational()) {
     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
     old_reserve = 0;
@@ -1070,15 +1213,32 @@ void ShenandoahFreeSet::rebuild() {
       // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
       young_reserve = _heap->get_young_evac_reserve();
       old_reserve = _heap->get_promoted_reserve() + _heap->get_old_evac_reserve();
+      assert(old_reserve <= old_available,
+             "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
+             _heap->get_promoted_reserve(), _heap->get_old_evac_reserve(), old_available);
     } else {
       // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
-      young_reserve = (_heap->young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
-      old_reserve = MAX2((_heap->old_generation()->max_capacity() * ShenandoahOldEvacReserve) / 100,
-                         ShenandoahOldCompactionReserve * ShenandoahHeapRegion::region_size_bytes());
+      young_reserve = (young_capacity * ShenandoahEvacReserve) / 100;
+      // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
+      // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
+      // unaffiliated regions.
+      old_reserve = old_available;
     }
   }
-  reserve_regions(young_reserve, old_reserve);
+  if (old_reserve > _free_sets.capacity_of(OldCollector)) {
+    // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
+    // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
+    // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
+    // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
+    if (old_reserve > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) {
+      old_reserve = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes;
+    }
+  }
+  if (young_reserve > young_unaffiliated_regions * region_size_bytes) {
+    young_reserve = young_unaffiliated_regions * region_size_bytes;
+  }
 
+  reserve_regions(young_reserve, old_reserve);
   _free_sets.establish_alloc_bias(OldCollector);
   _free_sets.assert_bounds();
   log_status();
@@ -1090,7 +1250,8 @@ void ShenandoahFreeSet::rebuild() {
 // the collector set is at least to_reserve, and the memory available for allocations within the old collector set
 // is at least to_reserve_old.
 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) {
-  for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
+  for (size_t i = _heap->num_regions(); i > 0; i--) {
+    size_t idx = i - 1;
     ShenandoahHeapRegion* r = _heap->get_region(idx);
     if (_free_sets.in_free_set(idx, Mutator)) {
       assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD");
@@ -1128,6 +1289,17 @@ void ShenandoahFreeSet::log_status() {
     size_t retired_young = 0;
     size_t retired_young_humongous = 0;
     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+    size_t retired_young_waste = 0;
+    size_t retired_old_waste = 0;
+    size_t consumed_collector = 0;
+    size_t consumed_old_collector = 0;
+    size_t consumed_mutator = 0;
+    size_t available_old = 0;
+    size_t available_young = 0;
+    size_t available_mutator = 0;
+    size_t available_collector = 0;
+    size_t available_old_collector = 0;
+
     char buffer[BUFFER_SIZE];
     for (uint i = 0; i < BUFFER_SIZE; i++) {
       buffer[i] = '\0';
@@ -1151,12 +1323,21 @@ void ShenandoahFreeSet::log_status() {
       }
       if (_free_sets.in_free_set(i, Mutator)) {
         assert(!r->is_old(), "Old regions should not be in mutator_free set");
-        buffer[idx] = (alloc_capacity(r) == region_size_bytes)? 'M': 'm';
+        size_t capacity = alloc_capacity(r);
+        available_mutator += capacity;
+        consumed_mutator += region_size_bytes - capacity;
+        buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
       } else if (_free_sets.in_free_set(i, Collector)) {
         assert(!r->is_old(), "Old regions should not be in collector_free set");
-        buffer[idx] = (alloc_capacity(r) == region_size_bytes)? 'C': 'c';
+        size_t capacity = alloc_capacity(r);
+        available_collector += capacity;
+        consumed_collector += region_size_bytes - capacity;
+        buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
       } else if (_free_sets.in_free_set(i, OldCollector)) {
-        buffer[idx] = (alloc_capacity(r) == region_size_bytes)? 'O': 'o';
+        size_t capacity = alloc_capacity(r);
+        available_old_collector += capacity;
+        consumed_old_collector += region_size_bytes - capacity;
+        buffer[idx] = (capacity == region_size_bytes)? 'O': 'o';
       } else if (r->is_humongous()) {
         if (r->is_old()) {
           buffer[idx] = 'H';
@@ -1168,9 +1349,11 @@ void ShenandoahFreeSet::log_status() {
       } else {
         if (r->is_old()) {
           buffer[idx] = '~';
+          retired_old_waste += alloc_capacity(r);
           retired_old += region_size_bytes;
         } else {
           buffer[idx] = '_';
+          retired_young_waste += alloc_capacity(r);
           retired_young += region_size_bytes;
         }
       }
@@ -1184,12 +1367,6 @@ void ShenandoahFreeSet::log_status() {
     log_info(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
     size_t total_young = retired_young + retired_young_humongous;
     size_t total_old = retired_old + retired_old_humongous;
-    log_info(gc, free)("Retired young: " SIZE_FORMAT "%s (including humongous: " SIZE_FORMAT "%s), old: " SIZE_FORMAT
-                       "%s (including humongous: " SIZE_FORMAT "%s)",
-                       byte_size_in_proper_unit(total_young),             proper_unit_for_byte_size(total_young),
-                       byte_size_in_proper_unit(retired_young_humongous), proper_unit_for_byte_size(retired_young_humongous),
-                       byte_size_in_proper_unit(total_old),               proper_unit_for_byte_size(total_old),
-                       byte_size_in_proper_unit(retired_old_humongous),   proper_unit_for_byte_size(retired_old_humongous));
   }
 #endif
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
index ef43c236eab..bbf6f1cafda 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
@@ -72,7 +72,7 @@ class ShenandoahSetsOfFree {
   // Place region idx into free set which_set.  Requires that idx is currently NotFree.
   void make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity);
 
-  // Place region idx into free set new_set.  Requires that idx is currently not NotFRee.
+  // Place region idx into free set new_set.  Requires that idx is currently not NotFree.
   void move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity);
 
   // Returns the ShenandoahFreeMemoryType affiliation of region idx, or NotFree if this region is not currently free.  This does
@@ -172,11 +172,20 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> {
   void flip_to_gc(ShenandoahHeapRegion* r);
   void flip_to_old_gc(ShenandoahHeapRegion* r);
 
+  void adjust_bounds_for_additional_old_collector_free_region(size_t idx);
+
+  void recompute_bounds();
+  void adjust_bounds();
+  bool touches_bounds(size_t num) const;
+
+  // Used of free set represents the amount of is_mutator_free set that has been consumed since most recent rebuild.
+  void increase_used(size_t amount);
   void clear_internal();
 
   void try_recycle_trashed(ShenandoahHeapRegion *r);
 
   bool can_allocate_from(ShenandoahHeapRegion *r) const;
+  bool can_allocate_from(size_t idx) const;
   bool has_alloc_capacity(size_t idx) const;
   bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
   bool has_no_alloc_capacity(ShenandoahHeapRegion *r) const;
@@ -188,7 +197,11 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> {
   size_t alloc_capacity(size_t idx) const;
 
   void clear();
-  void rebuild();
+  void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions);
+  void rebuild(size_t young_cset_regions, size_t old_cset_regions);
+  void move_collector_sets_to_mutator(size_t cset_regions);
+
+  void add_old_collector_free_region(ShenandoahHeapRegion* region);
 
   void recycle_trash();
 
@@ -209,7 +222,7 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> {
 
   void print_on(outputStream* out) const;
 
-  void find_regions_with_alloc_capacity();
+  void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions);
   void reserve_regions(size_t young_reserve, size_t old_reserve);
 };
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 79c8b1c1345..db886618f38 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -175,14 +175,21 @@ void ShenandoahFullGC::op_full(GCCause::Cause cause) {
 
   metrics.snap_after();
   if (heap->mode()->is_generational()) {
+    heap->mmu_tracker()->record_full(heap->global_generation(), GCId::current());
     heap->log_heap_status("At end of Full GC");
 
     // Since we allow temporary violation of these constraints during Full GC, we want to enforce that the assertions are
     // made valid by the time Full GC completes.
-    assert(heap->old_generation()->used_regions_size() <= heap->old_generation()->adjusted_capacity(),
+    assert(heap->old_generation()->used_regions_size() <= heap->old_generation()->max_capacity(),
            "Old generation affiliated regions must be less than capacity");
-    assert(heap->young_generation()->used_regions_size() <= heap->young_generation()->adjusted_capacity(),
+    assert(heap->young_generation()->used_regions_size() <= heap->young_generation()->max_capacity(),
            "Young generation affiliated regions must be less than capacity");
+
+    assert((heap->young_generation()->used() + heap->young_generation()->get_humongous_waste())
+           <= heap->young_generation()->used_regions_size(), "Young consumed can be no larger than span of affiliated regions");
+    assert((heap->old_generation()->used() + heap->old_generation()->get_humongous_waste())
+           <= heap->old_generation()->used_regions_size(), "Old consumed can be no larger than span of affiliated regions");
+
   }
   if (metrics.is_good_progress()) {
     ShenandoahHeap::heap()->notify_gc_progress();
@@ -199,11 +206,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
   heap->set_gc_generation(heap->global_generation());
 
   if (heap->mode()->is_generational()) {
-    // Defer unadjust_available() invocations until after Full GC finishes its efforts because Full GC makes use
-    // of young-gen memory that may have been loaned from old-gen.
-
     // No need for old_gen->increase_used() as this was done when plabs were allocated.
-    heap->set_alloc_supplement_reserve(0);
     heap->set_young_evac_reserve(0);
     heap->set_old_evac_reserve(0);
     heap->reset_old_evac_expended();
@@ -342,8 +345,6 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
   // Resize metaspace
   MetaspaceGC::compute_new_size();
 
-  heap->adjust_generation_sizes();
-
   // Free worker slices
   for (uint i = 0; i < heap->max_workers(); i++) {
     delete worker_slices[i];
@@ -357,10 +358,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
     heap->verifier()->verify_after_fullgc();
   }
 
-  // Having reclaimed all dead memory, it is now safe to restore capacities to original values.
-  heap->young_generation()->unadjust_available();
-  heap->old_generation()->unadjust_available();
-
+  // Humongous regions are promoted on demand and are accounted for by normal Full GC mechanisms.
   if (VerifyAfterGC) {
     Universe::verify();
   }
@@ -1340,7 +1338,6 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
         account_for_region(r, _young_regions, _young_usage, _young_humongous_waste);
       }
     }
-
     r->set_live_data(live);
     r->reset_alloc_metadata();
   }
@@ -1501,12 +1498,84 @@ void ShenandoahFullGC::phase5_epilog() {
     ShenandoahPostCompactClosure post_compact;
     heap->heap_region_iterate(&post_compact);
     post_compact.update_generation_usage();
-    log_info(gc)("FullGC done: global usage: " SIZE_FORMAT "%s, young usage: " SIZE_FORMAT "%s, old usage: " SIZE_FORMAT "%s",
-                 byte_size_in_proper_unit(heap->global_generation()->used()), proper_unit_for_byte_size(heap->global_generation()->used()),
-                 byte_size_in_proper_unit(heap->young_generation()->used()),  proper_unit_for_byte_size(heap->young_generation()->used()),
-                 byte_size_in_proper_unit(heap->old_generation()->used()),    proper_unit_for_byte_size(heap->old_generation()->used()));
+    if (heap->mode()->is_generational()) {
+      size_t old_usage = heap->old_generation()->used_regions_size();
+      size_t old_capacity = heap->old_generation()->max_capacity();
+
+      assert(old_usage % ShenandoahHeapRegion::region_size_bytes() == 0, "Old usage must aligh with region size");
+      assert(old_capacity % ShenandoahHeapRegion::region_size_bytes() == 0, "Old capacity must aligh with region size");
+
+      if (old_capacity > old_usage) {
+        size_t excess_old_regions = (old_capacity - old_usage) / ShenandoahHeapRegion::region_size_bytes();
+        heap->generation_sizer()->transfer_to_young(excess_old_regions);
+      } else if (old_capacity < old_usage) {
+        size_t old_regions_deficit = (old_usage - old_capacity) / ShenandoahHeapRegion::region_size_bytes();
+        heap->generation_sizer()->transfer_to_old(old_regions_deficit);
+      }
+
+      log_info(gc)("FullGC done: young usage: " SIZE_FORMAT "%s, old usage: " SIZE_FORMAT "%s",
+                   byte_size_in_proper_unit(heap->young_generation()->used()), proper_unit_for_byte_size(heap->young_generation()->used()),
+                   byte_size_in_proper_unit(heap->old_generation()->used()),   proper_unit_for_byte_size(heap->old_generation()->used()));
+    }
     heap->collection_set()->clear();
-    heap->free_set()->rebuild();
+    size_t young_cset_regions, old_cset_regions;
+    heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions);
+
+    // We also do not expand old generation size following Full GC because we have scrambled age populations and
+    // no longer have objects separated by age into distinct regions.
+
+    // TODO: Do we need to fix FullGC so that it maintains aged segregation of objects into distinct regions?
+    //       A partial solution would be to remember how many objects are of tenure age following Full GC, but
+    //       this is probably suboptimal, because most of these objects will not reside in a region that will be
+    //       selected for the next evacuation phase.
+
+    // In case this Full GC resulted from degeneration, clear the tally on anticipated promotion.
+    heap->clear_promotion_potential();
+    heap->clear_promotion_in_place_potential();
+
+    if (heap->mode()->is_generational()) {
+      // Invoke this in case we are able to transfer memory from OLD to YOUNG.
+      heap->adjust_generation_sizes_for_next_cycle(0, 0, 0);
+    }
+    heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
+
+    // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
+    // abbreviated cycle.
+    if (heap->mode()->is_generational()) {
+      bool success;
+      size_t region_xfer;
+      const char* region_destination;
+      ShenandoahYoungGeneration* young_gen = heap->young_generation();
+      ShenandoahGeneration* old_gen = heap->old_generation();
+
+      size_t old_region_surplus = heap->get_old_region_surplus();
+      size_t old_region_deficit = heap->get_old_region_deficit();
+      if (old_region_surplus) {
+        success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
+        region_destination = "young";
+        region_xfer = old_region_surplus;
+      } else if (old_region_deficit) {
+        success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
+        region_destination = "old";
+        region_xfer = old_region_deficit;
+        if (!success) {
+          ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
+        }
+      } else {
+        region_destination = "none";
+        region_xfer = 0;
+        success = true;
+      }
+      heap->set_old_region_surplus(0);
+      heap->set_old_region_deficit(0);
+      size_t young_available = young_gen->available();
+      size_t old_available = old_gen->available();
+      log_info(gc, ergo)("After cleanup, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
+                         SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
+                         success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
+                         byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
+                         byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
+    }
+    heap->clear_cancelled_gc(true /* clear oom handler */);
   }
-  heap->clear_cancelled_gc(true /* clear oom handler */);
 }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
index 22281b3250e..3606e2cf632 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
@@ -225,7 +225,6 @@ void ShenandoahGeneration::prepare_gc() {
 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* heap, bool* preselected_regions,
                                                       ShenandoahCollectionSet* collection_set,
                                                       size_t &consumed_by_advance_promotion) {
-  assert(heap->mode()->is_generational(), "Only generational mode uses evacuation budgets.");
   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
   size_t regions_available_to_loan = 0;
   size_t minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes;
@@ -249,160 +248,90 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* heap, bool
 
   // Do not fill up old-gen memory with promotions.  Reserve some amount of memory for compaction purposes.
   size_t young_evac_reserve_max = 0;
-  if (heap->doing_mixed_evacuations()) {
-    // Compute old_evacuation_reserve: how much memory are we reserving to hold the results of
-    // evacuating old-gen heap regions?  In order to sustain a consistent pace of young-gen collections,
-    // the goal is to maintain a consistent value for this parameter (when the candidate set is not
-    // empty).  This value is the minimum of:
-    //   1. old_gen->available()
-    //   2. old-gen->capacity() * ShenandoahOldEvacReserve) / 100
-    //       (e.g. old evacuation should be no larger than 5% of old_gen capacity)
-    //   3. ((young_gen->capacity * ShenandoahEvacReserve / 100) * ShenandoahOldEvacRatioPercent) / 100
-    //       (e.g. old evacuation should be no larger than 12% of young-gen evacuation)
-    old_evacuation_reserve = old_generation->available();
-    // This assertion has been disabled because we expect this code to be replaced by 05/2023
-    // assert(old_evacuation_reserve > minimum_evacuation_reserve, "Old-gen available has not been preserved!");
-    size_t old_evac_reserve_max = old_generation->max_capacity() * ShenandoahOldEvacReserve / 100;
-    if (old_evac_reserve_max < old_evacuation_reserve) {
-      old_evacuation_reserve = old_evac_reserve_max;
-    }
-    young_evac_reserve_max =
-      (((young_generation->max_capacity() * ShenandoahEvacReserve) / 100) * ShenandoahOldEvacRatioPercent) / 100;
-    if (young_evac_reserve_max < old_evacuation_reserve) {
-      old_evacuation_reserve = young_evac_reserve_max;
-    }
-  }
 
-  if (minimum_evacuation_reserve > old_generation->available()) {
-    // Due to round-off errors during enforcement of minimum_evacuation_reserve during previous GC passes,
-    // there can be slight discrepancies here.
-    minimum_evacuation_reserve = old_generation->available();
-  }
+  // First priority is to reclaim the easy garbage out of young-gen.
 
-  heap->set_old_evac_reserve(old_evacuation_reserve);
-  heap->reset_old_evac_expended();
-
-  // Compute the young evacuation reserve: This is how much memory is available for evacuating young-gen objects.
-  // We ignore the possible effect of promotions, which reduce demand for young-gen evacuation memory.
-  //
-  // TODO: We could give special treatment to the regions that have reached promotion age, because we know their
-  // live data is entirely eligible for promotion.  This knowledge can feed both into calculations of young-gen
-  // evacuation reserve and promotion reserve.
-  //
-  //  young_evacuation_reserve for young generation: how much memory are we reserving to hold the results
-  //  of evacuating young collection set regions?  This is typically smaller than the total amount
-  //  of available memory, and is also smaller than the total amount of marked live memory within
-  //  young-gen.  This value is the smaller of
-  //
-  //    1. (young_gen->capacity() * ShenandoahEvacReserve) / 100
-  //    2. (young_gen->available() + old_gen_memory_available_to_be_loaned
-  //
-  //  ShenandoahEvacReserve represents the configured target size of the evacuation region.  We can only honor
-  //  this target if there is memory available to hold the evacuations.  Memory is available if it is already
-  //  free within young gen, or if it can be borrowed from old gen.  Since we have not yet chosen the collection
-  //  sets, we do not yet know the exact accounting of how many regions will be freed by this collection pass.
-  //  What we do know is that there will be at least one evacuated young-gen region for each old-gen region that
-  //  is loaned to the evacuation effort (because regions to be collected consume more memory than the compacted
-  //  regions that will replace them).  In summary, if there are old-gen regions that are available to hold the
-  //  results of young-gen evacuations, it is safe to loan them for this purpose.  At this point, we have not yet
-  //  established a promoted_reserve.  We'll do that after we choose the collection set and analyze its impact
-  //  on available memory.
-  //
-  // We do not know the evacuation_supplement until after we have computed the collection set.  It is not always
-  // the case that young-regions inserted into the collection set will result in net decrease of in-use regions
-  // because ShenandoahEvacWaste times multiplied by memory within the region may be larger than the region size.
-  // The problem is especially relevant to regions that have been inserted into the collection set because they have
-  // reached tenure age.  These regions tend to have much higher utilization (e.g. 95%).  These regions also offer
-  // a unique opportunity because we know that every live object contained within the region is elgible to be
-  // promoted.  Thus, the following implementation treats these regions specially:
-  //
-  //  1. Before beginning collection set selection, we tally the total amount of live memory held within regions
-  //     that are known to have reached tenure age.  If this memory times ShenandoahEvacWaste is available within
-  //     old-gen memory, establish an advance promotion reserve to hold all or some percentage of these objects.
-  //     This advance promotion reserve is excluded from memory available for holding old-gen evacuations and cannot
-  //     be "loaned" to young gen.
-  //
-  //  2. Tenure-aged regions are included in the collection set iff their evacuation size * ShenandoahEvacWaste fits
-  //     within the advance promotion reserve.  It is counter productive to evacuate these regions if they cannot be
-  //     evacuated directly into old-gen memory.  So if there is not sufficient memory to hold copies of their
-  //     live data right now, we'll just let these regions remain in young for now, to be evacuated by a subsequent
-  //     evacuation pass.
-  //
-  //  3. Next, we calculate a young-gen evacuation budget, which is the smaller of the two quantities mentioned
-  //     above.  old_gen_memory_available_to_be_loaned is calculated as:
-  //       old_gen->available - (advance-promotion-reserve + old-gen_evacuation_reserve)
-  //
-  //  4. When choosing the collection set, special care is taken to assure that the amount of loaned memory required to
-  //     hold the results of evacuation is smaller than the total memory occupied by the regions added to the collection
-  //     set.  We need to take these precautions because we do not know how much memory will be reclaimed by evacuation
-  //     until after the collection set has been constructed.  The algorithm is as follows:
-  //
-  //     a. We feed into the algorithm (i) young available at the start of evacuation and (ii) the amount of memory
-  //        loaned from old-gen that is available to hold the results of evacuation.
-  //     b. As candidate regions are added into the young-gen collection set, we maintain accumulations of the amount
-  //        of memory spanned by the collection set regions and the amount of memory that must be reserved to hold
-  //        evacuation results (by multiplying live-data size by ShenandoahEvacWaste).  We process candidate regions
-  //        in order of decreasing amounts of garbage.  We skip over (and do not include into the collection set) any
-  //        regions that do not satisfy all of the following conditions:
-  //
-  //          i. The amount of live data within the region as scaled by ShenandoahEvacWaste must fit within the
-  //             relevant evacuation reserve (live data of old-gen regions must fit within the old-evac-reserve, live
-  //             data of young-gen tenure-aged regions must fit within the advance promotion reserve, live data within
-  //             other young-gen regions must fit within the youn-gen evacuation reserve).
-  //         ii. The accumulation of memory consumed by evacuation must not exceed the accumulation of memory reclaimed
-  //             through evacuation by more than young-gen available.
-  //        iii. Other conditions may be enforced as appropriate for specific heuristics.
-  //
-  //       Note that regions are considered for inclusion in the selection set in order of decreasing amounts of garbage.
-  //       It is possible that a region with a larger amount of garbage will be rejected because it also has a larger
-  //       amount of live data and some region that follows this region in candidate order is included in the collection
-  //       set (because it has less live data and thus can fit within the evacuation limits even though it has less
-  //       garbage).
-
-  size_t young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
-  // old evacuation can pack into existing partially used regions.  young evacuation and loans for young allocations
-  // need to target regions that do not already hold any old-gen objects.  Round down.
-  regions_available_to_loan = old_generation->free_unaffiliated_regions();
-
-  size_t required_evacuation_reserve;
-  // Memory evacuated from old-gen on this pass will be available to hold old-gen evacuations in next pass.
-  if (old_evacuation_reserve > minimum_evacuation_reserve) {
-    required_evacuation_reserve = 0;
+  // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
+  size_t maximum_young_evacuation_reserve = (young_generation->max_capacity() * ShenandoahEvacReserve) / 100;
+  size_t young_evacuation_reserve = maximum_young_evacuation_reserve;
+  size_t excess_young;
+  if (young_generation->available() > young_evacuation_reserve) {
+    excess_young = young_generation->available() - young_evacuation_reserve;
   } else {
-    required_evacuation_reserve = minimum_evacuation_reserve - old_evacuation_reserve;
+    young_evacuation_reserve = young_generation->available();
+    excess_young = 0;
   }
-
-  consumed_by_advance_promotion = _heuristics->select_aged_regions(
-    old_generation->available() - old_evacuation_reserve - required_evacuation_reserve, num_regions, preselected_regions);
-  size_t net_available_old_regions =
-    (old_generation->available() - old_evacuation_reserve - consumed_by_advance_promotion) / region_size_bytes;
-
- if (regions_available_to_loan > net_available_old_regions) {
-    regions_available_to_loan = net_available_old_regions;
+  size_t unaffiliated_young = young_generation->free_unaffiliated_regions() * region_size_bytes;
+  if (excess_young > unaffiliated_young) {
+    excess_young = unaffiliated_young;
+  } else {
+    // round down to multiple of region size
+    excess_young /= region_size_bytes;
+    excess_young *= region_size_bytes;
+  }
+  // excess_young is available to be transferred to OLD.  Assume that OLD will not request any more than had
+  // already been set aside for its promotion and evacuation needs at the end of previous GC.  No need to
+  // hold back memory for allocation runway.
+
+  ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
+
+  // maximum_old_evacuation_reserve is an upper bound on memory evacuated from old and evacuated to old (promoted).
+  size_t maximum_old_evacuation_reserve =
+    maximum_young_evacuation_reserve * ShenandoahOldEvacRatioPercent / (100 - ShenandoahOldEvacRatioPercent);
+  // Here's the algebra:
+  //  TotalEvacuation = OldEvacuation + YoungEvacuation
+  //  OldEvacuation = TotalEvacuation * (ShenandoahOldEvacRatioPercent/100)
+  //  OldEvacuation = YoungEvacuation * (ShenandoahOldEvacRatioPercent/100)/(1 - ShenandoahOldEvacRatioPercent/100)
+  //  OldEvacuation = YoungEvacuation * ShenandoahOldEvacRatioPercent/(100 - ShenandoahOldEvacRatioPercent)
+
+  if (maximum_old_evacuation_reserve > old_generation->available()) {
+    maximum_old_evacuation_reserve = old_generation->available();
+  }
+
+  // Second priority is to reclaim garbage out of old-gen if there are old-gen collection candidates.  Third priority
+  // is to promote as much as we have room to promote.  However, if old-gen memory is in short supply, this means young
+  // GC is operating under "duress" and was unable to transfer the memory that we would normally expect.  In this case,
+  // old-gen will refrain from compacting itself in order to allow a quicker young-gen cycle (by avoiding the update-refs
+  // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
+  // do not add to the update-refs burden of GC.
+
+  size_t old_promo_reserve;
+  if (old_heuristics->unprocessed_old_collection_candidates() > 0) {
+    // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
+    // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
+    // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
+    old_evacuation_reserve = maximum_old_evacuation_reserve;
+    old_promo_reserve = 0;
+  } else {
+    // Make all old-evacuation memory for promotion, but if we can't use it all for promotion, we'll allow some evacuation.
+    old_evacuation_reserve = 0;
+    old_promo_reserve = maximum_old_evacuation_reserve;
   }
 
-  // Otherwise, regions_available_to_loan is less than net_available_old_regions because available memory is
-  // scattered between multiple partially used regions.
+  // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
+  // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
+  // crannies within existing partially used regions and it generally tries to do so.
+  size_t old_free_regions = old_generation->free_unaffiliated_regions();
+  size_t old_free_unfragmented = old_free_regions * region_size_bytes;
+  if (old_evacuation_reserve > old_free_unfragmented) {
+    size_t delta = old_evacuation_reserve - old_free_unfragmented;
+    old_evacuation_reserve -= delta;
 
-  if (young_evacuation_reserve > young_generation->available()) {
-    size_t short_fall = young_evacuation_reserve - young_generation->available();
-    if (regions_available_to_loan * region_size_bytes >= short_fall) {
-      old_regions_loaned_for_young_evac = (short_fall + region_size_bytes - 1) / region_size_bytes;
-      regions_available_to_loan -= old_regions_loaned_for_young_evac;
-    } else {
-      old_regions_loaned_for_young_evac = regions_available_to_loan;
-      regions_available_to_loan = 0;
-      young_evacuation_reserve = young_generation->available() + old_regions_loaned_for_young_evac * region_size_bytes;
-      // In this case, there's no memory available for new allocations while evacuating and updating, unless we
-      // find more old-gen memory to borrow below.
-    }
+    // Let promo consume fragments of old-gen memory.
+    old_promo_reserve += delta;
   }
-  // In generational mode, we may end up choosing a young collection set that contains so many promotable objects
-  // that there is not sufficient space in old generation to hold the promoted objects.  That is ok because we have
-  // assured there is sufficient space in young generation to hold the rejected promotion candidates.  These rejected
-  // promotion candidates will presumably be promoted in a future evacuation cycle.
-  heap->set_young_evac_reserve(young_evacuation_reserve);
   collection_set->establish_preselected(preselected_regions);
+  consumed_by_advance_promotion = _heuristics->select_aged_regions(old_promo_reserve, num_regions, preselected_regions);
+  assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
+  if (consumed_by_advance_promotion < old_promo_reserve) {
+    // If we're in a global collection, this memory can be used for old evacuations
+    old_evacuation_reserve += old_promo_reserve - consumed_by_advance_promotion;
+  }
+  heap->set_young_evac_reserve(young_evacuation_reserve);
+  heap->set_old_evac_reserve(old_evacuation_reserve);
+  heap->set_promoted_reserve(consumed_by_advance_promotion);
+
+  // There is no need to expand OLD because all memory used here was set aside at end of previous GC
 }
 
 // Having chosen the collection set, adjust the budgets for generational mode based on its composition.  Note
@@ -425,356 +354,92 @@ void ShenandoahGeneration::adjust_evacuation_budgets(ShenandoahHeap* heap, Shena
   // available that results from a decrease in memory consumed by old evacuation is not necessarily available to be loaned
   // to young-gen.
 
-  assert(heap->mode()->is_generational(), "Only generational mode uses evacuation budgets.");
-  size_t old_regions_loaned_for_young_evac, regions_available_to_loan;
   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
   ShenandoahOldGeneration* old_generation = heap->old_generation();
   ShenandoahYoungGeneration* young_generation = heap->young_generation();
-  size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
-  size_t old_evacuated_committed = (size_t) (ShenandoahEvacWaste * old_evacuated);
-  size_t old_evacuation_reserve = heap->get_old_evac_reserve();
-  // Immediate garbage found during choose_collection_set() is all young
-  size_t immediate_garbage = collection_set->get_immediate_trash();
-  size_t old_available = old_generation->available();
-  size_t young_available = young_generation->available() + immediate_garbage;
-  size_t loaned_regions = 0;
-  size_t available_loan_remnant = 0; // loaned memory that is not yet dedicated to any particular budget
-
-  // We expect this code to be replaced by 05/01/23.
-  //
-  // assert(((consumed_by_advance_promotion * 33) / 32) >= collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste,
-  //       "Advance promotion (" SIZE_FORMAT ") should be at least young_bytes_to_be_promoted (" SIZE_FORMAT
-  //       ")* ShenandoahEvacWaste, totalling: " SIZE_FORMAT ", within round-off errors of up to 3.125%%",
-  //       consumed_by_advance_promotion, collection_set->get_young_bytes_to_be_promoted(),
-  //       (size_t) (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste));
-
-  // assert(consumed_by_advance_promotion <= (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste * 33) / 32,
-  //       "Round-off errors should be less than 3.125%%, consumed by advance: " SIZE_FORMAT ", promoted: " SIZE_FORMAT,
-  //       consumed_by_advance_promotion, (size_t) (collection_set->get_young_bytes_to_be_promoted() * ShenandoahEvacWaste));
 
+  // Preselected regions have been inserted into the collection set, so we no longer need the preselected array.
   collection_set->abandon_preselected();
 
+  size_t old_evacuated = collection_set->get_old_bytes_reserved_for_evacuation();
+  size_t old_evacuated_committed = (size_t) (ShenandoahOldEvacWaste * old_evacuated);
+  size_t old_evacuation_reserve = heap->get_old_evac_reserve();
+
   if (old_evacuated_committed > old_evacuation_reserve) {
-    // This should only happen due to round-off errors when enforcing ShenandoahEvacWaste
+    // This should only happen due to round-off errors when enforcing ShenandoahOldEvacWaste
     assert(old_evacuated_committed <= (33 * old_evacuation_reserve) / 32,
            "Round-off errors should be less than 3.125%%, committed: " SIZE_FORMAT ", reserved: " SIZE_FORMAT,
            old_evacuated_committed, old_evacuation_reserve);
     old_evacuated_committed = old_evacuation_reserve;
+    // Leave old_evac_reserve as previously configured
   } else if (old_evacuated_committed < old_evacuation_reserve) {
-    // This may happen if the old-gen collection consumes less than full budget.
+    // This happens if the old-gen collection consumes less than full budget.
     old_evacuation_reserve = old_evacuated_committed;
     heap->set_old_evac_reserve(old_evacuation_reserve);
   }
 
-  // Recompute old_regions_loaned_for_young_evac because young-gen collection set may not need all the memory
-  // originally reserved.
-  size_t young_promoted = collection_set->get_young_bytes_to_be_promoted();
-  size_t young_promoted_reserve_used = (size_t) (ShenandoahEvacWaste * young_promoted);
+  size_t young_advance_promoted = collection_set->get_young_bytes_to_be_promoted();
+  size_t young_advance_promoted_reserve_used = (size_t) (ShenandoahPromoEvacWaste * young_advance_promoted);
 
   size_t young_evacuated = collection_set->get_young_bytes_reserved_for_evacuation();
   size_t young_evacuated_reserve_used = (size_t) (ShenandoahEvacWaste * young_evacuated);
 
-  // We'll invoke heap->set_young_evac_reserve() further below, after we make additional adjustments to its value
-
-  // Adjust old_regions_loaned_for_young_evac to feed into calculations of promoted_reserve
-  if (young_evacuated_reserve_used > young_available) {
-    size_t short_fall = young_evacuated_reserve_used - young_available;
-
-    // region_size_bytes is a power of 2.  loan an integral number of regions.
-    size_t revised_loan_for_young_evacuation = (short_fall + region_size_bytes - 1) / region_size_bytes;
-
-    // available_loan_remnant represents memory loaned from old-gen but not required for young evacuation.
-    // This is the excess loaned memory that results from rounding the required loan up to an integral number
-    // of heap regions.  This will be dedicated to alloc_supplement below.
-    available_loan_remnant = (revised_loan_for_young_evacuation * region_size_bytes) - short_fall;
-
-    // We previously loaned more than was required by young-gen evacuation.  So claw some of this memory back.
-    old_regions_loaned_for_young_evac = revised_loan_for_young_evacuation;
-    loaned_regions = old_regions_loaned_for_young_evac;
-  } else {
-    // Undo the previous loan, if any.
-    old_regions_loaned_for_young_evac = 0;
-    loaned_regions = 0;
-  }
-
-  size_t old_bytes_loaned_for_young_evac = old_regions_loaned_for_young_evac * region_size_bytes - available_loan_remnant;
-
-  // Recompute regions_available_to_loan based on possible changes to old_regions_loaned_for_young_evac and
-  // old_evacuation_reserve.
-
-  // Any decrease in old_regions_loaned_for_young_evac are immediately available to be loaned
-  // However, a change to old_evacuation_reserve() is not necessarily available to loan, because this memory may
-  // reside within many fragments scattered throughout old-gen.
-
-  regions_available_to_loan = old_generation->free_unaffiliated_regions();
-  size_t working_old_available = old_generation->available();
-
-  assert(regions_available_to_loan * region_size_bytes <= working_old_available,
-         "Regions available to loan  must be less than available memory");
-
-  // fragmented_old_total is the amount of memory in old-gen beyond regions_available_to_loan that is otherwise not
-  // yet dedicated to a particular budget.  This memory can be used for promotion_reserve.
-  size_t fragmented_old_total = working_old_available - regions_available_to_loan * region_size_bytes;
-
-  // fragmented_old_usage is the memory that is dedicated to holding evacuated old-gen objects, which does not need
-  // to be an integral number of regions.
-  size_t fragmented_old_usage = old_evacuated_committed + consumed_by_advance_promotion;
-
-  if (fragmented_old_total >= fragmented_old_usage) {
-    // Seems this will be rare.  In this case, all of the memory required for old-gen evacuations and promotions can be
-    // taken from the existing fragments within old-gen.  Reduce this fragmented total by this amount.
-    fragmented_old_total -= fragmented_old_usage;
-    // And reduce regions_available_to_loan by the regions dedicated to young_evac.
-    regions_available_to_loan -= old_regions_loaned_for_young_evac;
-  } else {
-    // In this case, we need to dedicate some of the regions_available_to_loan to hold the results of old-gen evacuations
-    // and promotions.
-
-    size_t unaffiliated_memory_required_for_old = fragmented_old_usage - fragmented_old_total;
-    size_t unaffiliated_regions_used_by_old = (unaffiliated_memory_required_for_old + region_size_bytes - 1) / region_size_bytes;
-    regions_available_to_loan -= (unaffiliated_regions_used_by_old + old_regions_loaned_for_young_evac);
-
-    size_t memory_for_promotions_and_old_evac = fragmented_old_total + unaffiliated_regions_used_by_old;
-    size_t memory_required_for_promotions_and_old_evac = fragmented_old_usage;
-    size_t excess_fragmented = memory_for_promotions_and_old_evac - memory_required_for_promotions_and_old_evac;
-    fragmented_old_total = excess_fragmented;
-  }
-
-  // Subtract from working_old_available old_evacuated_committed and consumed_by_advance_promotion
-  working_old_available -= fragmented_old_usage;
-  // And also subtract out the regions loaned for young evacuation
-  working_old_available -= old_regions_loaned_for_young_evac * region_size_bytes;
-
-  // Assure that old_evacuated_committed + old_bytes_loaned_for_young_evac >= the minimum evacuation reserve
-  // in order to prevent promotion reserve from violating minimum evacuation reserve.
-  size_t old_regions_reserved_for_alloc_supplement = 0;
-  size_t old_bytes_reserved_for_alloc_supplement = 0;
-  size_t reserved_bytes_for_future_old_evac = 0;
-
-  old_bytes_reserved_for_alloc_supplement = available_loan_remnant;
-  available_loan_remnant = 0;
-
-  // Memory that has been loaned for young evacuations and old-gen regions in the current mixed-evacuation collection
-  // set will be available to hold future old-gen evacuations.  If this memory is less than the desired amount of memory
-  // set aside for old-gen compaction reserve, try to set aside additional memory so that it will be available during
-  // the next mixed evacuation cycle.  Note that memory loaned to young-gen for allocation supplement is excluded from
-  // the old-gen promotion reserve.
-  size_t future_evac_reserve_regions = old_regions_loaned_for_young_evac + collection_set->get_old_region_count();
-  size_t collected_regions = collection_set->get_young_region_count();
-
-  if (future_evac_reserve_regions < ShenandoahOldCompactionReserve) {
-    // Require that we loan more memory for holding young evacuations to assure that we have adequate reserves to receive
-    // old-gen evacuations during subsequent collections.  Loaning this memory for an allocation supplement does not
-    // satisfy our needs because newly allocated objects are not necessarily counter-balanced by reclaimed collection
-    // set regions.
-
-    // Put this memory into reserve by identifying it as old_regions_loaned_for_young_evac
-    size_t additional_regions_to_loan = ShenandoahOldCompactionReserve - future_evac_reserve_regions;
-
-    // We can loan additional regions to be repaid from the anticipated recycling of young collection set regions
-    // provided that these regions are currently available within old-gen memory.
-    size_t collected_regions_to_loan;
-    if (collected_regions >= additional_regions_to_loan) {
-      collected_regions_to_loan = additional_regions_to_loan;
-      additional_regions_to_loan = 0;
-    } else if (collected_regions > 0) {
-      collected_regions_to_loan = collected_regions;
-      additional_regions_to_loan -= collected_regions_to_loan;
-    } else {
-      collected_regions_to_loan = 0;
-    }
-
-    if (collected_regions_to_loan > 0) {
-      // We're evacuating at least this many regions, it's ok to use these regions for allocation supplement since
-      // we'll be able to repay the loan at end of this GC pass, assuming the regions are available.
-      if (collected_regions_to_loan > regions_available_to_loan) {
-        collected_regions_to_loan = regions_available_to_loan;
-      }
-      old_bytes_reserved_for_alloc_supplement += collected_regions_to_loan * region_size_bytes;
-      regions_available_to_loan -= collected_regions_to_loan;
-      loaned_regions += collected_regions_to_loan;
-      working_old_available -= collected_regions_to_loan * region_size_bytes;
-    }
-
-    // If there's still memory that we want to exclude from the current promotion reserve, but we are unable to loan
-    // this memory because fully empty old-gen regions are not available, decrement the working_old_available to make
-    // sure that this memory is not used to hold the results of old-gen evacuation.
-    if (additional_regions_to_loan > regions_available_to_loan) {
-      size_t unloaned_regions = additional_regions_to_loan - regions_available_to_loan;
-      size_t unloaned_bytes = unloaned_regions * region_size_bytes;
-
-      if (working_old_available < unloaned_bytes) {
-        // We're in dire straits.  We won't be able to reserve all the memory that we want to make available for the
-        // next old-gen evacuation.  We'll reserve as much of it as possible.  Setting working_old_available to zero
-        // means there will be no promotion except for the advance promotion.  Note that if some advance promotion fails,
-        // the object will be evacuated to young-gen so we should still end up reclaiming the entire advance promotion
-        // collection set.
-        reserved_bytes_for_future_old_evac = working_old_available;
-        working_old_available = 0;
-      } else {
-        reserved_bytes_for_future_old_evac = unloaned_bytes;
-        working_old_available -= unloaned_bytes;
-      }
-      size_t regions_reserved_for_future_old_evac =
-        (reserved_bytes_for_future_old_evac + region_size_bytes - 1) / region_size_bytes;
-
-      if (regions_reserved_for_future_old_evac < regions_available_to_loan) {
-        regions_available_to_loan -= regions_reserved_for_future_old_evac;
-      } else {
-        regions_available_to_loan = 0;
-      }
+  assert(young_evacuated_reserve_used <= young_generation->available(), "Cannot evacuate more than is available in young");
+  heap->set_young_evac_reserve(young_evacuated_reserve_used);
 
-      // Since we're in dire straits, zero out fragmented_old_total so this won't be used for promotion;
-      if (working_old_available > fragmented_old_total) {
-        working_old_available -= fragmented_old_total;
+  size_t old_available = old_generation->available();
+  // Now that we've established the collection set, we know how much memory is really required by old-gen for evacuation
+  // and promotion reserves.  Try shrinking OLD now in case that gives us a bit more runway for mutator allocations during
+  // evac and update phases.
+  size_t old_consumed = old_evacuated_committed + young_advance_promoted_reserve_used;
+  assert(old_available >= old_consumed, "Cannot consume more than is available");
+  size_t excess_old = old_available - old_consumed;
+  size_t unaffiliated_old_regions = old_generation->free_unaffiliated_regions();
+  size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
+  assert(old_available >= unaffiliated_old, "Unaffiliated old is a subset of old available");
+
+  // Make sure old_evac_committed is unaffiliated
+  if (old_evacuated_committed > 0) {
+    if (unaffiliated_old > old_evacuated_committed) {
+      size_t giveaway = unaffiliated_old - old_evacuated_committed;
+      size_t giveaway_regions = giveaway / region_size_bytes;  // round down
+      if (giveaway_regions > 0) {
+        excess_old = MIN2(excess_old, giveaway_regions * region_size_bytes);
       } else {
-        working_old_available = 0;
+        excess_old = 0;
       }
-      fragmented_old_total = 0;
+    } else {
+      excess_old = 0;
     }
   }
 
-  // Establish young_evac_reserve so that this young-gen memory is not used for new allocations, allowing the memory
-  // to be returned to old-gen as soon as the current collection set regions are reclaimed.
-  heap->set_young_evac_reserve(young_evacuated_reserve_used);
-
-  // Limit promoted_reserve so that we can set aside memory to be loaned from old-gen to young-gen.  This
-  // value is not "critical".  If we underestimate, certain promotions will simply be deferred.  If we put
-  // "all the rest" of old-gen memory into the promotion reserve, we'll have nothing left to loan to young-gen
-  // during the evac and update phases of GC.  So we "limit" the sizes of the promotion budget to be the smaller of:
-  //
-  //  1. old_available
-  //     (old_available is old_gen->available() -
-  //      (old_evacuated_committed + consumed_by_advance_promotion + loaned_for_young_evac + reserved_for_alloc_supplement))
-  //  2. young bytes reserved for evacuation (we can't promote more than young is evacuating)
-  size_t promotion_reserve = working_old_available;
-
-  // We experimented with constraining promoted_reserve to be no larger than 4 times the size of previously_promoted,
-  // but this constraint was too limiting, resulting in failure of legitimate promotions.  This was tried before we
-  // had special handling in place for advance promotion.  We should retry now that advance promotion is handled
-  // specially.
-
-  // We had also experimented with constraining promoted_reserve to be no more than young_evacuation_committed
-  // divided by promotion_divisor, where:
-  //  size_t promotion_divisor = (0x02 << InitialTenuringThreshold) - 1;
-  // This also was found to be too limiting, resulting in failure of legitimate promotions.
-  //
-  // Both experiments were conducted in the presence of other bugs which could have been the root cause for
-  // the failures identified above as being "too limiting".  TODO: conduct new experiments with the more limiting
-  // values of young_evacuation_reserved_used.
-
-  // young_evacuation_reserve_used already excludes bytes known to be promoted, which equals consumed_by_advance_promotion
-  if (young_evacuated_reserve_used < promotion_reserve) {
-    // Shrink promotion_reserve if it is larger than the memory to be consumed by evacuating all young objects in
-    // collection set, including anticipated waste.  There's no benefit in using a larger promotion_reserve.
-    // young_evacuation_reserve_used does not include live memory within tenure-aged regions.
-    promotion_reserve = young_evacuated_reserve_used;
-  }
-  assert(working_old_available >= promotion_reserve, "Cannot reserve for promotion more than is available");
-  working_old_available -= promotion_reserve;
-  // Having reserved this memory for promotion, the regions are no longer available to be loaned.
-  size_t regions_consumed_by_promotion_reserve = (promotion_reserve + region_size_bytes - 1) / region_size_bytes;
-  if (regions_consumed_by_promotion_reserve > regions_available_to_loan) {
-    // This can happen if the promotion reserve makes use of memory that is fragmented between many partially available
-    // old-gen regions.
-    regions_available_to_loan = 0;
-  } else {
-    regions_available_to_loan -= regions_consumed_by_promotion_reserve;
+  // If we find that OLD has excess regions, give them back to YOUNG now to reduce likelihood we run out of allocation
+  // runway during evacuation and update-refs.
+  size_t regions_to_xfer = 0;
+  if (excess_old > unaffiliated_old) {
+    // we can give back unaffiliated_old (all of unaffiliated is excess)
+    if (unaffiliated_old_regions > 0) {
+      regions_to_xfer = unaffiliated_old_regions;
+    }
+  } else if (unaffiliated_old_regions > 0) {
+    // excess_old < unaffiliated old: we can give back MIN(excess_old/region_size_bytes, unaffiliated_old_regions)
+    size_t excess_regions = excess_old / region_size_bytes;
+    size_t regions_to_xfer = MIN2(excess_regions, unaffiliated_old_regions);
   }
 
-  log_debug(gc)("old_gen->available(): " SIZE_FORMAT " divided between promotion reserve: " SIZE_FORMAT
-                ", old evacuation reserve: " SIZE_FORMAT ", advance promotion reserve supplement: " SIZE_FORMAT
-                ", old loaned for young evacuation: " SIZE_FORMAT ", old reserved for alloc supplement: " SIZE_FORMAT,
-                old_generation->available(), promotion_reserve, old_evacuated_committed, consumed_by_advance_promotion,
-                old_regions_loaned_for_young_evac * region_size_bytes, old_bytes_reserved_for_alloc_supplement);
-
-  promotion_reserve += consumed_by_advance_promotion;
-  heap->set_promoted_reserve(promotion_reserve);
-
-  heap->reset_promoted_expended();
-  if (collection_set->get_old_bytes_reserved_for_evacuation() == 0) {
-    // Setting old evacuation reserve to zero denotes that there is no old-gen evacuation in this pass.
-    heap->set_old_evac_reserve(0);
+  if (regions_to_xfer > 0) {
+    bool result = heap->generation_sizer()->transfer_to_young(regions_to_xfer);
+    assert(excess_old > regions_to_xfer * region_size_bytes, "Cannot xfer more than excess old");
+    excess_old -= regions_to_xfer * region_size_bytes;
+    log_info(gc, ergo)("%s transferred " SIZE_FORMAT " excess regions to young before start of evacuation",
+                       result? "Successfully": "Unsuccessfully", regions_to_xfer);
   }
 
-  size_t old_gen_usage_base = old_generation->used() - collection_set->get_old_garbage();
-  heap->capture_old_usage(old_gen_usage_base);
-
-  // Compute additional evacuation supplement, which is extra memory borrowed from old-gen that can be allocated
-  // by mutators while GC is working on evacuation and update-refs.  This memory can be temporarily borrowed
-  // from old-gen allotment, then repaid at the end of update-refs from the recycled collection set.  After
-  // we have computed the collection set based on the parameters established above, we can make additional
-  // loans based on our knowledge of the collection set to determine how much allocation we can allow
-  // during the evacuation and update-refs phases of execution.  The total available supplement is the result
-  // of adding old_bytes_reserved_for_alloc_supplement to the smaller of:
-  //
-  //   1. regions_available_to_loan * region_size_bytes
-  //   2. The replenishment budget (number of regions in collection set - the number of regions already
-  //         under lien for the young_evacuation_reserve)
-  //
-
-  // Regardless of how many regions may be available to be loaned, we can loan no more regions than
-  // the total number of young regions to be evacuated.  Call this the regions_for_runway.
-
-  if (regions_available_to_loan > 0 && (collected_regions > loaned_regions)) {
-    assert(regions_available_to_loan * region_size_bytes <= working_old_available,
-           "regions_available_to_loan should not exceed working_old_available");
-
-    size_t additional_regions_to_loan = collected_regions - loaned_regions;
-    if (additional_regions_to_loan > regions_available_to_loan) {
-      additional_regions_to_loan = regions_available_to_loan;
-    }
-    loaned_regions += additional_regions_to_loan;
-    old_bytes_reserved_for_alloc_supplement += additional_regions_to_loan * region_size_bytes;
-    working_old_available -= additional_regions_to_loan * region_size_bytes;
-  }
-  size_t allocation_supplement = old_bytes_reserved_for_alloc_supplement + old_bytes_loaned_for_young_evac;
-  assert(allocation_supplement % ShenandoahHeapRegion::region_size_bytes() == 0,
-         "allocation_supplement must be multiple of region size");
-
-  heap->set_alloc_supplement_reserve(allocation_supplement);
-
-  // TODO: young_available, which feeds into alloc_budget_evac_and_update is lacking memory available within
-  // existing young-gen regions that were not selected for the collection set.  Add this in and adjust the
-  // log message (where it says "empty-region allocation budget").
-
-
-  log_debug(gc)("Memory reserved for young evacuation: " SIZE_FORMAT "%s for evacuating " SIZE_FORMAT
-                "%s out of young available: " SIZE_FORMAT "%s",
-                byte_size_in_proper_unit(young_evacuated_reserve_used),
-                proper_unit_for_byte_size(young_evacuated_reserve_used),
-                byte_size_in_proper_unit(young_evacuated), proper_unit_for_byte_size(young_evacuated),
-                byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
-
-  log_debug(gc)("Memory reserved for old evacuation: " SIZE_FORMAT "%s for evacuating " SIZE_FORMAT
-                "%s out of old available: " SIZE_FORMAT "%s",
-                byte_size_in_proper_unit(old_evacuated), proper_unit_for_byte_size(old_evacuated),
-                byte_size_in_proper_unit(old_evacuated), proper_unit_for_byte_size(old_evacuated),
-                byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available));
-
-  size_t regular_promotion = promotion_reserve - consumed_by_advance_promotion;
-  size_t excess =
-    old_available - (old_evacuation_reserve + promotion_reserve + old_bytes_loaned_for_young_evac + allocation_supplement);
-  log_info(gc, ergo)("Old available: " SIZE_FORMAT "%s is partitioned into old evacuation budget: " SIZE_FORMAT
-                     "%s, aged region promotion budget: " SIZE_FORMAT
-                     "%s, regular region promotion budget: " SIZE_FORMAT
-                     "%s, loaned for young evacuation: " SIZE_FORMAT
-                     "%s, loaned for young allocations: " SIZE_FORMAT
-                     "%s, excess: " SIZE_FORMAT "%s",
-                     byte_size_in_proper_unit(old_available),
-                     proper_unit_for_byte_size(old_available),
-                     byte_size_in_proper_unit(old_evacuation_reserve),
-                     proper_unit_for_byte_size(old_evacuation_reserve),
-                     byte_size_in_proper_unit(consumed_by_advance_promotion),
-                     proper_unit_for_byte_size(consumed_by_advance_promotion),
-                     byte_size_in_proper_unit(regular_promotion),
-                     proper_unit_for_byte_size(regular_promotion),
-                     byte_size_in_proper_unit(old_bytes_loaned_for_young_evac),
-                     proper_unit_for_byte_size(old_bytes_loaned_for_young_evac),
-                     byte_size_in_proper_unit(allocation_supplement),
-                     proper_unit_for_byte_size(allocation_supplement),
-                     byte_size_in_proper_unit(excess),
-                     proper_unit_for_byte_size(excess));
+  // Add in the excess_old memory to hold unanticipated promotions, if any.  If there are more unanticipated
+  // promotions than fit in reserved memory, they will be deferred until a future GC pass.
+  size_t total_promotion_reserve = young_advance_promoted_reserve_used + excess_old;
+  heap->set_promoted_reserve(total_promotion_reserve);
+  heap->reset_promoted_expended();
 }
 
 void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
@@ -822,7 +487,6 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
 
       // Budgeting parameters to compute_evacuation_budgets are passed by reference.
       compute_evacuation_budgets(heap, preselected_regions, collection_set, consumed_by_advance_promotion);
-
       _heuristics->choose_collection_set(collection_set, heap->old_heuristics());
       if (!collection_set->is_empty()) {
         // only make use of evacuation budgets when we are evacuating
@@ -839,7 +503,11 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
                             ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
     ShenandoahHeapLocker locker(heap->lock());
-    heap->free_set()->rebuild();
+    size_t young_cset_regions, old_cset_regions;
+
+    // We are preparing for evacuation.  At this time, we ignore cset region tallies.
+    heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions);
+    heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
   }
   heap->set_evacuation_reserve_quantities(false);
 }
@@ -897,7 +565,7 @@ ShenandoahGeneration::ShenandoahGeneration(ShenandoahGenerationType type,
   _collection_thread_time_s(0.0),
   _affiliated_region_count(0), _humongous_waste(0), _used(0), _bytes_allocated_since_gc_start(0),
   _max_capacity(max_capacity), _soft_max_capacity(soft_max_capacity),
-  _adjusted_capacity(max_capacity), _heuristics(nullptr) {
+  _heuristics(nullptr) {
   _is_marking_complete.set();
   assert(max_workers > 0, "At least one queue");
   for (uint i = 0; i < max_workers; ++i) {
@@ -955,6 +623,28 @@ size_t ShenandoahGeneration::decrement_affiliated_region_count() {
   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
   // a coherent value.
   _affiliated_region_count--;
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
+         "used + humongous cannot exceed regions");
+  return _affiliated_region_count;
+}
+
+size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
+  shenandoah_assert_heaplocked_or_fullgc_safepoint();
+  _affiliated_region_count += delta;
+  return _affiliated_region_count;
+}
+
+size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
+  shenandoah_assert_heaplocked_or_fullgc_safepoint();
+  assert(_affiliated_region_count > delta, "Affiliated region count cannot be negative");
+
+  _affiliated_region_count -= delta;
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
+         "used + humongous cannot exceed regions");
   return _affiliated_region_count;
 }
 
@@ -967,11 +657,11 @@ void ShenandoahGeneration::establish_usage(size_t num_regions, size_t num_bytes,
 
 void ShenandoahGeneration::increase_used(size_t bytes) {
   Atomic::add(&_used, bytes);
-}
-
-void ShenandoahGeneration::decrease_used(size_t bytes) {
-  assert(_used >= bytes, "cannot reduce bytes used by generation below zero");
-  Atomic::sub(&_used, bytes);
+  // This detects arithmetic wraparound on _used.  Non-generational mode does not keep track of _affiliated_region_count
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
+         "used cannot exceed regions");
 }
 
 void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
@@ -982,13 +672,25 @@ void ShenandoahGeneration::increase_humongous_waste(size_t bytes) {
 
 void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
   if (bytes > 0) {
-    assert(_humongous_waste >= bytes, "Waste cannot be negative");
     assert(ShenandoahHeap::heap()->is_full_gc_in_progress() || (_humongous_waste >= bytes),
            "Waste (" SIZE_FORMAT ") cannot be negative (after subtracting " SIZE_FORMAT ")", _humongous_waste, bytes);
     Atomic::sub(&_humongous_waste, bytes);
   }
 }
 
+void ShenandoahGeneration::decrease_used(size_t bytes) {
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_used >= bytes), "cannot reduce bytes used by generation below zero");
+  Atomic::sub(&_used, bytes);
+
+  // Non-generational mode does not maintain affiliated region counts
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
+         "Affiliated regions must hold more than what is currently used");
+}
+
 size_t ShenandoahGeneration::used_regions() const {
   return _affiliated_region_count;
 }
@@ -996,7 +698,7 @@ size_t ShenandoahGeneration::used_regions() const {
 size_t ShenandoahGeneration::free_unaffiliated_regions() const {
   size_t result = max_capacity() / ShenandoahHeapRegion::region_size_bytes();
   if (_affiliated_region_count > result) {
-    result = 0;                 // If old-gen is loaning regions to young-gen, affiliated regions may exceed capacity temporarily.
+    result = 0;
   } else {
     result -= _affiliated_region_count;
   }
@@ -1019,61 +721,48 @@ size_t ShenandoahGeneration::soft_available() const {
   return in_use > soft_capacity ? 0 : soft_capacity - in_use;
 }
 
-size_t ShenandoahGeneration::adjust_available(intptr_t adjustment) {
-  assert(adjustment % ShenandoahHeapRegion::region_size_bytes() == 0,
-        "Adjustment to generation size must be multiple of region size");
-  _adjusted_capacity = max_capacity() + adjustment;
-  return _adjusted_capacity;
-}
-
-size_t ShenandoahGeneration::unadjust_available() {
-  _adjusted_capacity = max_capacity();
-  return _adjusted_capacity;
-}
-
-size_t ShenandoahGeneration::adjusted_available() const {
-  size_t in_use = used() + get_humongous_waste();
-  size_t capacity = _adjusted_capacity;
-  return in_use > capacity ? 0 : capacity - in_use;
-}
-
-size_t ShenandoahGeneration::adjusted_capacity() const {
-  return _adjusted_capacity;
-}
-
-size_t ShenandoahGeneration::adjusted_unaffiliated_regions() const {
-  // This assertion has been disabled because we expect this code to be replaced by 05/2023
-  // assert(adjusted_capacity() >= used_regions_size(), "adjusted_unaffiliated_regions() cannot return negative");
-  assert((adjusted_capacity() - used_regions_size()) % ShenandoahHeapRegion::region_size_bytes() == 0,
-         "adjusted capacity (" SIZE_FORMAT ") and used regions size (" SIZE_FORMAT ") should be multiples of region_size_bytes",
-         adjusted_capacity(), used_regions_size());
-  return (adjusted_capacity() - used_regions_size()) / ShenandoahHeapRegion::region_size_bytes();
-}
-
 void ShenandoahGeneration::increase_capacity(size_t increment) {
   shenandoah_assert_heaplocked_or_safepoint();
-  assert(_max_capacity + increment <= ShenandoahHeap::heap()->max_size_for(this), "Cannot increase generation capacity beyond maximum.");
-  assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Region-sized changes only");
-  // TODO: ysr: remove this check and warning
-  if (increment % ShenandoahHeapRegion::region_size_bytes() != 0) {
-    log_warning(gc)("Increment (" INTPTR_FORMAT ") should be a multiple of region size (" SIZE_FORMAT ")",
-                    increment, ShenandoahHeapRegion::region_size_bytes());
-  }
+
+  // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
+  // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
+  // in place.
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
+  assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
   _max_capacity += increment;
-  _adjusted_capacity += increment;
+
+  // This detects arithmetic wraparound on _used
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
+         "Affiliated regions must hold more than what is currently used");
 }
 
 void ShenandoahGeneration::decrease_capacity(size_t decrement) {
   shenandoah_assert_heaplocked_or_safepoint();
-  assert(_max_capacity - decrement >= ShenandoahHeap::heap()->min_size_for(this), "Cannot decrease generation capacity beyond minimum.");
-  assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Region-sized changes only");
-  // TODO: ysr: remove this check and warning
-  if (decrement % ShenandoahHeapRegion::region_size_bytes() != 0) {
-    log_warning(gc)("Decrement (" INTPTR_FORMAT ") should be a multiple of region size (" SIZE_FORMAT ")",
-                    decrement, ShenandoahHeapRegion::region_size_bytes());
-  }
+
+  // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
+  // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
+  // in place.
+  assert(decrement % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
+  assert(_max_capacity >= decrement, "Generation capacity cannot be negative");
+
   _max_capacity -= decrement;
-  _adjusted_capacity -= decrement;
+
+  // This detects arithmetic wraparound on _used
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
+         "Affiliated regions must hold more than what is currently used");
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_used <= _max_capacity), "Cannot use more than capacity");
+  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
+  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+         (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity),
+         "Cannot use more than capacity");
 }
 
 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
index 160a51a443b..2bd85414280 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
@@ -50,8 +50,6 @@ class ShenandoahGeneration : public CHeapObj<mtGC> {
 
   double _collection_thread_time_s;
 
-protected:
-  // Usage
   size_t _affiliated_region_count;
 
   // How much free memory is left in the last region of humongous objects.
@@ -61,13 +59,15 @@ class ShenandoahGeneration : public CHeapObj<mtGC> {
   // The units are bytes. The value is only changed on a safepoint or under the
   // heap lock.
   size_t _humongous_waste;
+
+protected:
+  // Usage
+
   volatile size_t _used;
   volatile size_t _bytes_allocated_since_gc_start;
   size_t _max_capacity;
   size_t _soft_max_capacity;
 
-  size_t _adjusted_capacity;
-
   ShenandoahHeuristics* _heuristics;
 
 private:
@@ -117,22 +117,6 @@ class ShenandoahGeneration : public CHeapObj<mtGC> {
   // max heap size will cause the adaptive heuristic to run more frequent cycles.
   size_t soft_available() const;
 
-  // During evacuation and update-refs, some memory may be shifted between generations.  In particular, memory
-  // may be loaned by old-gen to young-gen based on the promise the loan will be promptly repaid from the memory reclaimed
-  // when the current collection set is recycled.  The capacity adjustment also takes into consideration memory that is
-  // set aside within each generation to hold the results of evacuation, but not promotion, into that region.  Promotions
-  // into old-gen are bounded by adjusted_available() whereas evacuations into old-gen are pre-committed.
-  size_t adjusted_available() const;
-  size_t adjusted_capacity() const;
-
-  // This is the number of FREE regions that are eligible to be affiliated with this generation according to the current
-  // adjusted capacity.
-  size_t adjusted_unaffiliated_regions() const;
-
-  // Both of following return new value of available
-  size_t adjust_available(intptr_t adjustment);
-  size_t unadjust_available();
-
   size_t bytes_allocated_since_gc_start();
   void reset_bytes_allocated_since_gc_start();
   void increase_allocated(size_t bytes);
@@ -205,6 +189,12 @@ class ShenandoahGeneration : public CHeapObj<mtGC> {
   // Return the updated value of affiliated_region_count
   size_t decrement_affiliated_region_count();
 
+  // Return the updated value of affiliated_region_count
+  size_t increase_affiliated_region_count(size_t delta);
+
+  // Return the updated value of affiliated_region_count
+  size_t decrease_affiliated_region_count(size_t delta);
+
   void establish_usage(size_t num_regions, size_t num_bytes, size_t humongous_waste);
 
   void increase_used(size_t bytes);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 348e1900e39..eaa51009955 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -379,8 +379,11 @@ jint ShenandoahHeap::initialize() {
 
     // Initialize to complete
     _marking_context->mark_complete();
+    size_t young_cset_regions, old_cset_regions;
 
-    _free_set->rebuild();
+    // We are initializing free set.  We ignore cset region tallies.
+    _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions);
+    _free_set->rebuild(young_cset_regions, old_cset_regions);
   }
 
   if (AlwaysPreTouch) {
@@ -519,15 +522,10 @@ void ShenandoahHeap::initialize_heuristics_generations() {
 
   _young_generation = new ShenandoahYoungGeneration(_max_workers, max_capacity_young, initial_capacity_young);
   _old_generation = new ShenandoahOldGeneration(_max_workers, max_capacity_old, initial_capacity_old);
-  _global_generation = new ShenandoahGlobalGeneration(_gc_mode->is_generational(), _max_workers, soft_max_capacity(), soft_max_capacity());
-
+  _global_generation = new ShenandoahGlobalGeneration(_gc_mode->is_generational(), _max_workers, max_capacity(), max_capacity());
   _global_generation->initialize_heuristics(_gc_mode);
-  if (mode()->is_generational()) {
-    _young_generation->initialize_heuristics(_gc_mode);
-    _old_generation->initialize_heuristics(_gc_mode);
-
-    ShenandoahEvacWaste = ShenandoahGenerationalEvacWaste;
-  }
+  _young_generation->initialize_heuristics(_gc_mode);
+  _old_generation->initialize_heuristics(_gc_mode);
 }
 
 #ifdef _MSC_VER
@@ -540,6 +538,8 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
   _gc_generation(nullptr),
   _prepare_for_old_mark(false),
   _initial_size(0),
+  _promotion_potential(0),
+  _promotion_in_place_potential(0),
   _committed(0),
   _max_workers(MAX3(ConcGCThreads, ParallelGCThreads, 1U)),
   _workers(nullptr),
@@ -549,7 +549,6 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
   _regions(nullptr),
   _affiliations(nullptr),
   _update_refs_iterator(this),
-  _alloc_supplement_reserve(0),
   _promoted_reserve(0),
   _old_evac_reserve(0),
   _old_evac_expended(0),
@@ -581,6 +580,8 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
   _gc_timer(new ConcurrentGCTimer()),
   _soft_ref_policy(),
   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
+  _old_regions_surplus(0),
+  _old_regions_deficit(0),
   _marking_context(nullptr),
   _bitmap_size(0),
   _bitmap_regions_per_slice(0),
@@ -818,13 +819,6 @@ void ShenandoahHeap::set_soft_max_capacity(size_t v) {
          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
          min_capacity(), v, max_capacity());
   Atomic::store(&_soft_max_size, v);
-
-  if (mode()->is_generational()) {
-    size_t max_capacity_young = _generation_sizer.max_young_size();
-    size_t min_capacity_young = _generation_sizer.min_young_size();
-    size_t new_capacity_young = clamp(v, min_capacity_young, max_capacity_young);
-    _young_generation->set_soft_max_capacity(new_capacity_young);
-  }
 }
 
 size_t ShenandoahHeap::min_capacity() const {
@@ -893,9 +887,7 @@ void ShenandoahHeap::handle_promotion_failure() {
 }
 
 void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) {
-  // We squelch excessive reports to reduce noise in logs.  Squelch enforcement is not "perfect" because
-  // this same code can be in-lined in multiple contexts, and each context will have its own copy of the static
-  // last_report_epoch and this_epoch_report_count variables.
+  // We squelch excessive reports to reduce noise in logs.
   const size_t MaxReportsPerEpoch = 4;
   static size_t last_report_epoch = 0;
   static size_t epoch_report_count = 0;
@@ -915,11 +907,18 @@ void ShenandoahHeap::report_promotion_failure(Thread* thread, size_t size) {
     PLAB* plab = ShenandoahThreadLocalData::plab(thread);
     size_t words_remaining = (plab == nullptr)? 0: plab->words_remaining();
     const char* promote_enabled = ShenandoahThreadLocalData::allow_plab_promotions(thread)? "enabled": "disabled";
+    ShenandoahGeneration* old_gen = old_generation();
+    size_t old_capacity = old_gen->max_capacity();
+    size_t old_usage = old_gen->used();
+    size_t old_free_regions = old_gen->free_unaffiliated_regions();
 
     log_info(gc, ergo)("Promotion failed, size " SIZE_FORMAT ", has plab? %s, PLAB remaining: " SIZE_FORMAT
-                       ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT,
-                       size, plab == nullptr? "no": "yes",
-                       words_remaining, promote_enabled, promotion_reserve, promotion_expended);
+                       ", plab promotions %s, promotion reserve: " SIZE_FORMAT ", promotion expended: " SIZE_FORMAT
+                       ", old capacity: " SIZE_FORMAT ", old_used: " SIZE_FORMAT ", old unaffiliated regions: " SIZE_FORMAT,
+                       size * HeapWordSize, plab == nullptr? "no": "yes",
+                       words_remaining * HeapWordSize, promote_enabled, promotion_reserve, promotion_expended,
+                       old_capacity, old_usage, old_free_regions);
+
     if ((gc_id == last_report_epoch) && (epoch_report_count >= MaxReportsPerEpoch)) {
       log_info(gc, ergo)("Squelching additional promotion failure reports for current epoch");
     } else if (gc_id != last_report_epoch) {
@@ -1029,7 +1028,6 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b
     // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
     // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
     // aligned with the start of a card's memory range.
-
     retire_plab(plab, thread);
 
     size_t actual_size = 0;
@@ -1037,7 +1035,12 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b
     // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
     HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
     if (plab_buf == nullptr) {
-      return nullptr;
+      if (min_size == PLAB::min_size()) {
+        // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size.  This allows us
+        // to fail faster on subsequent promotion attempts.
+        ShenandoahThreadLocalData::disable_plab_promotions(thread);
+      }
+      return NULL;
     } else {
       ShenandoahThreadLocalData::enable_plab_retries(thread);
     }
@@ -1056,7 +1059,6 @@ HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, b
 #endif // ASSERT
     }
     plab->set_buf(plab_buf, actual_size);
-
     if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
       return nullptr;
     }
@@ -1154,11 +1156,93 @@ void ShenandoahHeap::coalesce_and_fill_old_regions() {
   parallel_heap_region_iterate(&coalesce);
 }
 
-bool ShenandoahHeap::adjust_generation_sizes() {
-  if (mode()->is_generational()) {
-    return _generation_sizer.adjust_generation_sizes();
+// xfer_limit is the maximum we're able to transfer from young to old
+void ShenandoahHeap::adjust_generation_sizes_for_next_cycle(
+  size_t xfer_limit, size_t young_cset_regions, size_t old_cset_regions) {
+
+  // Make sure old-generation is large enough, but no larger, than is necessary to hold mixed evacuations
+  // and promotions if we anticipate either.
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  size_t promo_load = get_promotion_potential();
+  // The free set will reserve this amount of memory to hold young evacuations
+  size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
+  size_t old_reserve = 0;
+  size_t mixed_candidates = old_heuristics()->unprocessed_old_collection_candidates();
+  bool doing_mixed = (mixed_candidates > 0);
+  bool doing_promotions = promo_load > 0;
+
+  // round down
+  size_t max_old_region_xfer = xfer_limit / region_size_bytes;
+
+  // We can limit the reserve to the size of anticipated promotions
+  size_t max_old_reserve = young_reserve * ShenandoahOldEvacRatioPercent / (100 - ShenandoahOldEvacRatioPercent);
+  // Here's the algebra:
+  //  TotalEvacuation = OldEvacuation + YoungEvacuation
+  //  OldEvacuation = TotalEvacuation*(ShenandoahOldEvacRatioPercent/100)
+  //  OldEvacuation = YoungEvacuation * (ShenandoahOldEvacRatioPercent/100)/(1 - ShenandoahOldEvacRatioPercent/100)
+  //  OldEvacuation = YoungEvacuation * ShenandoahOldEvacRatioPercent/(100 - ShenandoahOldEvacRatioPercent)
+
+  size_t reserve_for_mixed, reserve_for_promo;
+  if (doing_mixed) {
+    assert(old_generation()->available() >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
+           "Unaffiliated available must be less than total available");
+
+    // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
+    // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
+    size_t max_evac_need = (size_t)
+      (old_heuristics()->unprocessed_old_collection_candidates_live_memory() * ShenandoahOldEvacWaste);
+    size_t old_fragmented_available =
+      old_generation()->available() - old_generation()->free_unaffiliated_regions() * region_size_bytes;
+    reserve_for_mixed = max_evac_need + old_fragmented_available;
+    if (reserve_for_mixed > max_old_reserve) {
+      reserve_for_mixed = max_old_reserve;
+    }
+  } else {
+    reserve_for_mixed = 0;
   }
-  return false;
+
+  size_t available_for_promotions = max_old_reserve - reserve_for_mixed;
+  if (doing_promotions) {
+    // We're only promoting and we have a maximum bound on the amount to be promoted
+    reserve_for_promo = (size_t) (promo_load * ShenandoahPromoEvacWaste);
+    if (reserve_for_promo > available_for_promotions) {
+      reserve_for_promo = available_for_promotions;
+    }
+  } else {
+    reserve_for_promo = 0;
+  }
+  old_reserve = reserve_for_mixed + reserve_for_promo;
+  assert(old_reserve <= max_old_reserve, "cannot reserve more than max for old evacuations");
+  size_t old_available = old_generation()->available() + old_cset_regions * region_size_bytes;
+  size_t young_available = young_generation()->available() + young_cset_regions * region_size_bytes;
+  size_t old_region_deficit = 0;
+  size_t old_region_surplus = 0;
+  if (old_available >= old_reserve) {
+    size_t old_excess = old_available - old_reserve;
+    size_t excess_regions = old_excess / region_size_bytes;
+    size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_cset_regions;
+    size_t unaffiliated_old = unaffiliated_old_regions * region_size_bytes;
+    if (unaffiliated_old_regions < excess_regions) {
+      // We'll give only unaffiliated old to young, which is known to be less than the excess.
+      old_region_surplus = unaffiliated_old_regions;
+    } else {
+      // unaffiliated_old_regions > excess_regions, so we only give away the excess.
+      old_region_surplus = excess_regions;
+    }
+  } else {
+    // We need to request transfer from YOUNG.  Ignore that this will directly impact young_generation()->max_capacity(),
+    // indirectly impacting young_reserve and old_reserve.  These computations are conservative.
+    size_t old_need = old_reserve - old_available;
+    // Round up the number of regions needed from YOUNG
+    old_region_deficit = (old_need + region_size_bytes - 1) / region_size_bytes;
+  }
+  if (old_region_deficit > max_old_region_xfer) {
+    // If we're running short on young-gen memory, limit the xfer.  Old-gen collection activities will be curtailed
+    // if the budget is smaller than desired.
+    old_region_deficit = max_old_region_xfer;
+  }
+  set_old_region_surplus(old_region_surplus);
+  set_old_region_deficit(old_region_deficit);
 }
 
 // Called from stubs in JIT code or interpreter
@@ -1193,7 +1277,7 @@ HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size,
                                             size_t* actual_size) {
   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
   // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
-  // if we are at risk of exceeding the old-gen evacuation budget.
+  // if we are at risk of infringing on the old-gen evacuation budget.
   HeapWord* res = allocate_memory(req, false);
   if (res != nullptr) {
     *actual_size = req.actual_size();
@@ -1228,14 +1312,16 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_p
     // strategy is to try again, as long as GC makes progress.
     //
     // Then, we need to make sure the allocation was retried after at least one
-    // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
+    // Full GC.
     size_t tries = 0;
+    size_t original_fullgc_count = shenandoah_policy()->get_fullgc_count();
     while (result == nullptr && _progress_last_gc.is_set()) {
       tries++;
       control_thread()->handle_alloc_failure(req);
       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
     }
-    while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
+    while (result == nullptr &&
+           ((shenandoah_policy()->get_fullgc_count() == original_fullgc_count) || (tries <= ShenandoahOOMGCRetries))) {
       tries++;
       control_thread()->handle_alloc_failure(req);
       result = allocate_memory_under_lock(req, in_new_region, is_promotion);
@@ -1297,20 +1383,18 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
     if (mode()->is_generational()) {
       if (req.affiliation() == YOUNG_GENERATION) {
         if (req.is_mutator_alloc()) {
-          size_t young_available = young_generation()->adjusted_available();
-          if (requested_bytes > young_available) {
-            // We know this is not a GCLAB.  This must be a TLAB or a shared allocation.
-            if (req.is_lab_alloc() && (young_available >= req.min_size())) {
-              try_smaller_lab_size = true;
-              smaller_lab_size = young_available / HeapWordSize;
-            } else {
-              // Can't allocate because even min_size() is larger than remaining young_available
-              log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
-                                 ", young available: " SIZE_FORMAT,
-                                 req.is_lab_alloc()? "TLAB": "shared",
-                                 HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_available);
-              return nullptr;
-            }
+          size_t young_words_available = young_generation()->available() / HeapWordSize;
+          if (ShenandoahElasticTLAB && req.is_lab_alloc() && (req.min_size() < young_words_available)) {
+            // Allow ourselves to try a smaller lab size even if requested_bytes <= young_available.  We may need a smaller
+            // lab size because young memory has become too fragmented.
+            try_smaller_lab_size = true;
+            smaller_lab_size = (young_words_available < req.size())? young_words_available: req.size();
+          } else if (req.size() > young_words_available) {
+            // Can't allocate because even min_size() is larger than remaining young_available
+            log_info(gc, ergo)("Unable to shrink %s alloc request of minimum size: " SIZE_FORMAT
+                               ", young words available: " SIZE_FORMAT, req.type_string(),
+                               HeapWordSize * (req.is_lab_alloc()? req.min_size(): req.size()), young_words_available);
+            return nullptr;
           }
         }
       } else {                    // reg.affiliation() == OLD_GENERATION
@@ -1351,66 +1435,83 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
       }
     } // This ends the is_generational() block
 
-    if (!try_smaller_lab_size) {
-      result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
-      if (result != nullptr) {
-        if (req.is_old()) {
-          ShenandoahThreadLocalData::reset_plab_promoted(thread);
-          if (req.is_gc_alloc()) {
-            if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
-              if (promotion_eligible) {
-                size_t actual_size = req.actual_size() * HeapWordSize;
+    // First try the original request.  If TLAB request size is greater than available, allocate() will attempt to downsize
+    // request to fit within available memory.
+    result = (allow_allocation)? _free_set->allocate(req, in_new_region): nullptr;
+    if (result != nullptr) {
+      if (req.is_old()) {
+        ShenandoahThreadLocalData::reset_plab_promoted(thread);
+        if (req.is_gc_alloc()) {
+          bool disable_plab_promotions = false;
+          if (req.type() ==  ShenandoahAllocRequest::_alloc_plab) {
+            if (promotion_eligible) {
+              size_t actual_size = req.actual_size() * HeapWordSize;
+              // The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
+              // If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
+              if (get_promoted_expended() + actual_size <= get_promoted_reserve()) {
                 // Assume the entirety of this PLAB will be used for promotion.  This prevents promotion from overreach.
                 // When we retire this plab, we'll unexpend what we don't really use.
                 ShenandoahThreadLocalData::enable_plab_promotions(thread);
                 expend_promoted(actual_size);
-                // This assert has been disabled because we expect this code to be replaced by 05/2023.
-                // assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
+                assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
                 ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, actual_size);
               } else {
-                // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
-                ShenandoahThreadLocalData::disable_plab_promotions(thread);
-                ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
+                disable_plab_promotions = true;
               }
-            } else if (is_promotion) {
-              // Shared promotion.  Assume size is requested_bytes.
-              expend_promoted(requested_bytes);
-              assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
+            } else {
+              disable_plab_promotions = true;
             }
+            if (disable_plab_promotions) {
+              // Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
+              ShenandoahThreadLocalData::disable_plab_promotions(thread);
+              ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
+            }
+          } else if (is_promotion) {
+            // Shared promotion.  Assume size is requested_bytes.
+            expend_promoted(requested_bytes);
+            assert(get_promoted_expended() <= get_promoted_reserve(), "Do not expend more promotion than budgeted");
           }
-
-          // Register the newly allocated object while we're holding the global lock since there's no synchronization
-          // built in to the implementation of register_object().  There are potential races when multiple independent
-          // threads are allocating objects, some of which might span the same card region.  For example, consider
-          // a card table's memory region within which three objects are being allocated by three different threads:
-          //
-          // objects being "concurrently" allocated:
-          //    [-----a------][-----b-----][--------------c------------------]
-          //            [---- card table memory range --------------]
-          //
-          // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
-          //   wants to set the has-object, first-start, and last-start attributes of the preceding card region.
-          //   allocation of object b wants to set the has-object, first-start, and last-start attributes of this card region.
-          //   allocation of object c also wants to set the has-object, first-start, and last-start attributes of this card region.
-          //
-          // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
-          // last-start representing object b while first-start represents object c.  This is why we need to require all
-          // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
-          ShenandoahHeap::heap()->card_scan()->register_object(result);
-        }
-      } else {
-        // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
-        if (req.is_old() && req.is_gc_alloc() &&
-            (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
-          // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
-          // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
-          ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
         }
+
+        // Register the newly allocated object while we're holding the global lock since there's no synchronization
+        // built in to the implementation of register_object().  There are potential races when multiple independent
+        // threads are allocating objects, some of which might span the same card region.  For example, consider
+        // a card table's memory region within which three objects are being allocated by three different threads:
+        //
+        // objects being "concurrently" allocated:
+        //    [-----a------][-----b-----][--------------c------------------]
+        //            [---- card table memory range --------------]
+        //
+        // Before any objects are allocated, this card's memory range holds no objects.  Note that allocation of object a
+        //   wants to set the starts-object, first-start, and last-start attributes of the preceding card region.
+        //   allocation of object b wants to set the starts-object, first-start, and last-start attributes of this card region.
+        //   allocation of object c also wants to set the starts-object, first-start, and last-start attributes of this
+        //   card region.
+        //
+        // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
+        // last-start representing object b while first-start represents object c.  This is why we need to require all
+        // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
+        ShenandoahHeap::heap()->card_scan()->register_object(result);
+      }
+    } else {
+      // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
+      if (req.is_old() && req.is_gc_alloc() && (req.type() == ShenandoahAllocRequest::_alloc_plab)) {
+        // We don't need to disable PLAB promotions because there is no PLAB.  We leave promotions enabled because
+        // this allows the surrounding infrastructure to retry alloc_plab_slow() with a smaller PLAB size.
+        ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
       }
+    }
+    if ((result != nullptr) || !try_smaller_lab_size) {
       return result;
     }
-    // else, try_smaller_lab_size is true so we fall through and recurse with a smaller lab size
-  } // This closes the block that holds the heap lock.  This releases the lock.
+    // else, fall through to try_smaller_lab_size
+  } // This closes the block that holds the heap lock, releasing the lock.
+
+  // We failed to allocate the originally requested lab size.  Let's see if we can allocate a smaller lab size.
+  if (req.size() == smaller_lab_size) {
+    // If we were already trying to allocate min size, no value in attempting to repeat the same.  End the recursion.
+    return nullptr;
+  }
 
   // We arrive here if the tlab allocation request can be resized to fit within young_available
   assert((req.affiliation() == YOUNG_GENERATION) && req.is_lab_alloc() && req.is_mutator_alloc() &&
@@ -1589,31 +1690,41 @@ class ShenandoahGenerationalEvacuationTask : public WorkerTask {
   void do_work() {
     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
     ShenandoahHeapRegion* r;
+    ShenandoahMarkingContext* const ctx = ShenandoahHeap::heap()->marking_context();
+    size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+    size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100;
     while ((r = _regions->next()) != nullptr) {
-      log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s]",
+      log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
                     r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
                     r->is_active()? "active": "inactive",
-                    r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular");
+                    r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
+                    r->is_cset()? "cset": "not-cset");
+
       if (r->is_cset()) {
         assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
         _sh->marked_object_iterate(r, &cl);
         if (ShenandoahPacing) {
           _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
         }
-      } else if (r->is_young() && r->is_active() && r->is_humongous_start() && (r->age() > InitialTenuringThreshold)) {
-        // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
-        // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
-        // triggers the load-reference barrier (LRB) to copy on reference fetch.
-        if (r->promote_humongous() == 0) {
-          // We chose not to promote because old-gen is out of memory.  Report and handle the promotion failure because
-          // this suggests need for expanding old-gen and/or performing collection of old-gen.
-          ShenandoahHeap* heap = ShenandoahHeap::heap();
-          oop obj = cast_to_oop(r->bottom());
-          size_t size = obj->size();
-          Thread* thread = Thread::current();
-          heap->report_promotion_failure(thread, size);
-          heap->handle_promotion_failure();
+      } else if (r->is_young() && r->is_active() && (r->age() >= InitialTenuringThreshold)) {
+        HeapWord* tams = ctx->top_at_mark_start(r);
+        if (r->is_humongous_start()) {
+          // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
+          // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
+          // triggers the load-reference barrier (LRB) to copy on reference fetch.
+          r->promote_humongous();
+        } else if (r->is_regular() && (r->garbage_before_padded_for_promote() < old_garbage_threshold) && (r->get_top_before_promote() == tams)) {
+          // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
+          // the LRB to copy on reference fetch.
+          r->promote_in_place();
         }
+        // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
+        // more garbage than ShenandoahOldGarbageTrheshold, we'll promote by evacuation.  If there is room for evacuation
+        // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
+        // by evacuation in some future GC cycle.
+
+        // If an aged regular region has received allocations during the current cycle, we do not promote because the
+        // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
       }
       // else, region is free, or OLD, or not in collection set, or humongous_continuation,
       // or is young humongous_start that is too young to be promoted
@@ -1812,7 +1923,7 @@ void ShenandoahHeap::set_young_lab_region_flags() {
 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
   if (ShenandoahElasticTLAB) {
     if (mode()->is_generational()) {
-      return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->adjusted_available());
+      return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
     } else {
       // With Elastic TLABs, return the max allowed size, and let the allocation path
       // figure out the safe size for current allocation.
@@ -1900,24 +2011,16 @@ void ShenandoahHeap::on_cycle_start(GCCause::Cause cause, ShenandoahGeneration*
 
   shenandoah_policy()->record_cycle_start();
   generation->heuristics()->record_cycle_start();
-
-  // When a cycle starts, attribute any thread activity when the collector
-  // is idle to the global generation.
-  _mmu_tracker.record(global_generation());
 }
 
 void ShenandoahHeap::on_cycle_end(ShenandoahGeneration* generation) {
   generation->heuristics()->record_cycle_end();
-
   if (mode()->is_generational() && (generation->is_global() || upgraded_to_full())) {
     // If we just completed a GLOBAL GC, claim credit for completion of young-gen and old-gen GC as well
     young_generation()->heuristics()->record_cycle_end();
     old_generation()->heuristics()->record_cycle_end();
   }
   set_gc_cause(GCCause::_no_gc);
-
-  // When a cycle ends, the thread activity is attributed to the respective generation
-  _mmu_tracker.record(generation);
 }
 
 void ShenandoahHeap::verify(VerifyOption vo) {
@@ -2676,6 +2779,16 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask {
   template<class T>
   void do_work(uint worker_id) {
     T cl;
+    if (CONCURRENT && (worker_id == 0)) {
+      // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
+      // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
+      size_t cset_regions = _heap->collection_set()->count();
+      // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
+      // we need the reclaimed collection set regions to replenish the collector reserves
+      _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
+    }
+    // If !CONCURRENT, there's no value in expanding Mutator free set
+
     ShenandoahHeapRegion* r = _regions->next();
     // We update references for global, old, and young collections.
     assert(_heap->active_generation()->is_mark_complete(), "Expected complete marking");
@@ -2938,12 +3051,58 @@ void ShenandoahHeap::update_heap_region_states(bool concurrent) {
 }
 
 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
-  {
-    ShenandoahGCPhase phase(concurrent ?
-                            ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
-                            ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
-    ShenandoahHeapLocker locker(lock());
-    _free_set->rebuild();
+  ShenandoahGCPhase phase(concurrent ?
+                          ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
+                          ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  ShenandoahHeapLocker locker(lock());
+  size_t young_cset_regions, old_cset_regions;
+  _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions);
+
+  if (mode()->is_generational()) {
+    assert(verify_generation_usage(true, old_generation()->used_regions(),
+                                   old_generation()->used(), old_generation()->get_humongous_waste(),
+                                   true, young_generation()->used_regions(),
+                                   young_generation()->used(), young_generation()->get_humongous_waste()),
+           "Generation accounts are inaccurate");
+
+    // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
+    // available for transfer to old. Note that transfer of humongous regions does not impact available.
+    size_t allocation_runway = young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
+    adjust_generation_sizes_for_next_cycle(allocation_runway, young_cset_regions, old_cset_regions);
+
+    // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
+    // memory represents more than 16 regions worth of data.  Note that fragmentation may increase when we promote regular
+    // regions in place when many of these regular regions have an abundant amount of available memory within them.  Fragmentation
+    // will decrease as promote-by-copy consumes the available memory within these partially consumed regions.
+    //
+    // We consider old-gen to have excessive fragmentation if more than 12.5% of old-gen is free memory that resides
+    // within partially consumed regions of memory.
+  }
+  // Rebuild free set based on adjusted generation sizes.
+  _free_set->rebuild(young_cset_regions, old_cset_regions);
+
+  if (mode()->is_generational()) {
+    size_t old_available = old_generation()->available();
+    size_t old_unaffiliated_available = old_generation()->free_unaffiliated_regions() * region_size_bytes;
+    size_t old_fragmented_available;
+    assert(old_available >= old_unaffiliated_available, "unaffiliated available is a subset of total available");
+    old_fragmented_available = old_available - old_unaffiliated_available;
+
+    size_t old_capacity = old_generation()->max_capacity();
+    size_t heap_capacity = capacity();
+    if ((old_capacity > heap_capacity / 8) && (old_fragmented_available > old_capacity / 8)) {
+      ((ShenandoahOldHeuristics *) old_generation()->heuristics())->trigger_old_is_fragmented();
+    }
+
+    size_t old_used = old_generation()->used() + old_generation()->get_humongous_waste();
+    size_t trigger_threshold = old_generation()->usage_trigger_threshold();
+    // Detects unsigned arithmetic underflow
+    assert(old_used < ShenandoahHeap::heap()->capacity(), "Old used must be less than heap capacity");
+
+    if (old_used > trigger_threshold) {
+      ((ShenandoahOldHeuristics *) old_generation()->heuristics())->trigger_old_has_grown();
+    }
   }
 }
 
@@ -3187,6 +3346,57 @@ void ShenandoahGenerationRegionClosure<GLOBAL_NON_GEN>::heap_region_do(Shenandoa
   _cl->heap_region_do(region);
 }
 
+bool ShenandoahHeap::verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
+                                             bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste) {
+  size_t tally_old_regions = 0;
+  size_t tally_old_bytes = 0;
+  size_t tally_old_waste = 0;
+  size_t tally_young_regions = 0;
+  size_t tally_young_bytes = 0;
+  size_t tally_young_waste = 0;
+
+  shenandoah_assert_heaplocked_or_safepoint();
+  for (size_t i = 0; i < num_regions(); i++) {
+    ShenandoahHeapRegion* r = get_region(i);
+    if (r->is_old()) {
+      tally_old_regions++;
+      tally_old_bytes += r->used();
+      if (r->is_humongous()) {
+        ShenandoahHeapRegion* start = r->humongous_start_region();
+        HeapWord* obj_addr = start->bottom();
+        oop obj = cast_to_oop(obj_addr);
+        size_t word_size = obj->size();
+        HeapWord* end_addr = obj_addr + word_size;
+        if (end_addr <= r->end()) {
+          tally_old_waste += (r->end() - end_addr) * HeapWordSize;
+        }
+      }
+    } else if (r->is_young()) {
+      tally_young_regions++;
+      tally_young_bytes += r->used();
+      if (r->is_humongous()) {
+        ShenandoahHeapRegion* start = r->humongous_start_region();
+        HeapWord* obj_addr = start->bottom();
+        oop obj = cast_to_oop(obj_addr);
+        size_t word_size = obj->size();
+        HeapWord* end_addr = obj_addr + word_size;
+        if (end_addr <= r->end()) {
+          tally_young_waste += (r->end() - end_addr) * HeapWordSize;
+        }
+      }
+    }
+  }
+  if (verify_young &&
+      ((young_regions != tally_young_regions) || (young_bytes != tally_young_bytes) || (young_waste != tally_young_waste))) {
+    return false;
+  } else if (verify_old &&
+             ((old_regions != tally_old_regions) || (old_bytes != tally_old_bytes) || (old_waste != tally_old_waste))) {
+    return false;
+  } else {
+    return true;
+  }
+}
+
 ShenandoahGeneration* ShenandoahHeap::generation_for(ShenandoahAffiliation affiliation) const {
   if (!mode()->is_generational()) {
     return global_generation();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index 105835e5728..368ae2a1ab3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -205,6 +205,9 @@ class ShenandoahHeap : public CollectedHeap {
   void prepare_for_verify() override;
   void verify(VerifyOption vo) override;
 
+  bool verify_generation_usage(bool verify_old, size_t old_regions, size_t old_bytes, size_t old_waste,
+                               bool verify_young, size_t young_regions, size_t young_bytes, size_t young_waste);
+
 // WhiteBox testing support.
   bool supports_concurrent_gc_breakpoints() const override {
     return true;
@@ -215,6 +218,14 @@ class ShenandoahHeap : public CollectedHeap {
 private:
            size_t _initial_size;
            size_t _minimum_size;
+           size_t _promotion_potential;
+           size_t _promotion_in_place_potential;
+           size_t _pad_for_promote_in_place;    // bytes of filler
+           size_t _promotable_humongous_regions;
+           size_t _promotable_humongous_usage;
+           size_t _regular_regions_promoted_in_place;
+           size_t _regular_usage_promoted_in_place;
+
   volatile size_t _soft_max_size;
   shenandoah_padding(0);
   volatile size_t _committed;
@@ -284,6 +295,8 @@ class ShenandoahHeap : public CollectedHeap {
   void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
   void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const;
 
+  inline ShenandoahMmuTracker* const mmu_tracker() { return &_mmu_tracker; };
+
 // ---------- GC state machinery
 //
 // GC state describes the important parts of collector state, that may be
@@ -332,31 +345,8 @@ class ShenandoahHeap : public CollectedHeap {
   ShenandoahSharedFlag   _progress_last_gc;
   ShenandoahSharedFlag   _concurrent_strong_root_in_progress;
 
-  // _alloc_supplement_reserve is a supplemental budget for new_memory allocations.  During evacuation and update-references,
-  // mutator allocation requests are "authorized" iff young_gen->available() plus _alloc_supplement_reserve minus
-  // _young_evac_reserve is greater than request size.  The values of _alloc_supplement_reserve and _young_evac_reserve
-  // are zero except during evacuation and update-reference phases of GC.  Both of these values are established at
-  // the start of evacuation, and they remain constant throughout the duration of these two phases of GC.  Since these
-  // two values are constant throughout each GC phases, we introduce a new service into ShenandoahGeneration.  This service
-  // provides adjusted_available() based on an adjusted capacity.  At the start of evacuation, we adjust young capacity by
-  // adding the amount to be borrowed from old-gen and subtracting the _young_evac_reserve, we adjust old capacity by
-  // subtracting the amount to be loaned to young-gen.
-  //
-  // We always use adjusted capacities to determine permission to allocate within young and to promote into old.  Note
-  // that adjusted capacities equal traditional capacities except during evacuation and update refs.
-  //
-  // During evacuation, we assure that _old_evac_expended does not exceed _old_evac_reserve.
-  //
-  // At the end of update references, we perform the following bookkeeping activities:
-  //
-  // 1. Unadjust the capacity within young-gen and old-gen to undo the effects of borrowing memory from old-gen.  Note that
-  //    the entirety of the collection set is now available, so allocation capacity naturally increase at this time.
-  // 2. Clear (reset to zero) _alloc_supplement_reserve, _young_evac_reserve, _old_evac_reserve, and _promoted_reserve
-  //
-  // _young_evac_reserve and _old_evac_reserve are only non-zero during evacuation and update-references.
-  //
-  // Allocation of old GCLABs assures that _old_evac_expended + request-size < _old_evac_reserved.  If the allocation
-  //  is authorized, increment _old_evac_expended by request size.  This allocation ignores old_gen->available().
+  // TODO: Revisit the following comment.  It may not accurately represent the true behavior when evacuations fail due to
+  // difficulty finding memory to hold evacuated objects.
   //
   // Note that the typical total expenditure on evacuation is less than the associated evacuation reserve because we generally
   // reserve ShenandoahEvacWaste (> 1.0) times the anticipated evacuation need.  In the case that there is an excessive amount
@@ -364,10 +354,15 @@ class ShenandoahHeap : public CollectedHeap {
   // effort.  If this happens, the requesting thread blocks until some other thread manages to evacuate the offending object.
   // Only after "all" threads fail to evacuate an object do we consider the evacuation effort to have failed.
 
-  intptr_t _alloc_supplement_reserve;  // Bytes reserved for young allocations during evac and update refs
+  // How many full-gc cycles have been completed?
+  volatile size_t _completed_fullgc_cycles;
+
   size_t _promoted_reserve;            // Bytes reserved within old-gen to hold the results of promotion
   volatile size_t _promoted_expended;  // Bytes of old-gen memory expended on promotions
 
+  // Allocation of old GCLABs (aka PLABs) assures that _old_evac_expended + request-size < _old_evac_reserved.  If the allocation
+  //  is authorized, increment _old_evac_expended by request size.  This allocation ignores old_gen->available().
+
   size_t _old_evac_reserve;            // Bytes reserved within old-gen to hold evacuated objects from old-gen collection set
   volatile size_t _old_evac_expended;  // Bytes of old-gen memory expended on old-gen evacuations
 
@@ -397,7 +392,6 @@ class ShenandoahHeap : public CollectedHeap {
   void set_gc_state_mask(uint mask, bool value);
 
 public:
-
   char gc_state() const;
   static address gc_state_addr();
 
@@ -415,6 +409,7 @@ class ShenandoahHeap : public CollectedHeap {
   void set_prepare_for_old_mark_in_progress(bool cond);
   void set_aging_cycle(bool cond);
 
+
   inline bool is_stable() const;
   inline bool is_idle() const;
   inline bool has_evacuation_reserve_quantities() const;
@@ -441,9 +436,31 @@ class ShenandoahHeap : public CollectedHeap {
   inline void set_previous_promotion(size_t promoted_bytes);
   inline size_t get_previous_promotion() const;
 
+  inline void clear_promotion_potential() { _promotion_potential = 0; };
+  inline void set_promotion_potential(size_t val) { _promotion_potential = val; };
+  inline size_t get_promotion_potential() { return _promotion_potential; };
+
+  inline void clear_promotion_in_place_potential() { _promotion_in_place_potential = 0; };
+  inline void set_promotion_in_place_potential(size_t val) { _promotion_in_place_potential = val; };
+  inline size_t get_promotion_in_place_potential() { return _promotion_in_place_potential; };
+
+  inline void set_pad_for_promote_in_place(size_t pad) { _pad_for_promote_in_place = pad; }
+  inline size_t get_pad_for_promote_in_place() { return _pad_for_promote_in_place; }
+
+  inline void reserve_promotable_humongous_regions(size_t region_count) { _promotable_humongous_regions = region_count; }
+  inline void reserve_promotable_humongous_usage(size_t bytes) { _promotable_humongous_usage = bytes; }
+  inline void reserve_promotable_regular_regions(size_t region_count) { _regular_regions_promoted_in_place = region_count; }
+  inline void reserve_promotable_regular_usage(size_t used_bytes) { _regular_usage_promoted_in_place = used_bytes; }
+
+  inline size_t get_promotable_humongous_regions() { return _promotable_humongous_regions; }
+  inline size_t get_promotable_humongous_usage() { return _promotable_humongous_usage; }
+  inline size_t get_regular_regions_promoted_in_place() { return _regular_regions_promoted_in_place; }
+  inline size_t get_regular_usage_promoted_in_place() { return _regular_usage_promoted_in_place; }
+
   // Returns previous value
   inline size_t set_promoted_reserve(size_t new_val);
   inline size_t get_promoted_reserve() const;
+  inline void augment_promo_reserve(size_t increment);
 
   inline void reset_promoted_expended();
   inline size_t expend_promoted(size_t increment);
@@ -453,6 +470,7 @@ class ShenandoahHeap : public CollectedHeap {
   // Returns previous value
   inline size_t set_old_evac_reserve(size_t new_val);
   inline size_t get_old_evac_reserve() const;
+  inline void augment_old_evac_reserve(size_t increment);
 
   inline void reset_old_evac_expended();
   inline size_t expend_old_evac(size_t increment);
@@ -462,11 +480,6 @@ class ShenandoahHeap : public CollectedHeap {
   inline size_t set_young_evac_reserve(size_t new_val);
   inline size_t get_young_evac_reserve() const;
 
-  // Returns previous value.  This is a signed value because it is the amount borrowed minus the amount reserved for
-  // young-gen evacuation.  In case we cannot borrow much, this value might be negative.
-  inline intptr_t set_alloc_supplement_reserve(intptr_t new_val);
-  inline intptr_t get_alloc_supplement_reserve() const;
-
 private:
   void manage_satb_barrier(bool active);
 
@@ -517,11 +530,11 @@ class ShenandoahHeap : public CollectedHeap {
   void update_heap_references(bool concurrent);
   // Final update region states
   void update_heap_region_states(bool concurrent);
-  void rebuild_free_set(bool concurrent);
 
   void rendezvous_threads();
   void recycle_trash();
 public:
+  void rebuild_free_set(bool concurrent);
   void notify_gc_progress()    { _progress_last_gc.set();   }
   void notify_gc_no_progress() { _progress_last_gc.unset(); }
 
@@ -698,6 +711,10 @@ class ShenandoahHeap : public CollectedHeap {
 // ---------- Allocation support
 //
 private:
+  // How many bytes to transfer between old and young after we have finished recycling collection set regions?
+  size_t _old_regions_surplus;
+  size_t _old_regions_deficit;
+
   HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion);
 
   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
@@ -731,6 +748,12 @@ class ShenandoahHeap : public CollectedHeap {
 
   void set_young_lab_region_flags();
 
+  inline void set_old_region_surplus(size_t surplus) { _old_regions_surplus = surplus; };
+  inline void set_old_region_deficit(size_t deficit) { _old_regions_deficit = deficit; };
+
+  inline size_t get_old_region_surplus() { return _old_regions_surplus; };
+  inline size_t get_old_region_deficit() { return _old_regions_deficit; };
+
 // ---------- Marking support
 //
 private:
@@ -830,7 +853,7 @@ class ShenandoahHeap : public CollectedHeap {
   void cancel_old_gc();
   bool is_old_gc_active();
   void coalesce_and_fill_old_regions();
-  bool adjust_generation_sizes();
+  void adjust_generation_sizes_for_next_cycle(size_t old_xfer_limit, size_t young_cset_regions, size_t old_cset_regions);
 
 // ---------- Helper functions
 //
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
index d131d336e78..d6dccf01f9d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
@@ -291,11 +291,12 @@ inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size,
 
   PLAB* plab = ShenandoahThreadLocalData::plab(thread);
   HeapWord* obj;
+
   if (plab == nullptr) {
     assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
     // No PLABs in this thread, fallback to shared allocation
     return nullptr;
-  } else if (is_promotion && (plab->words_remaining() > 0) && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
+  } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
     return nullptr;
   }
   // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
@@ -382,7 +383,6 @@ inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, Shenandoah
            break;
         }
         case OLD_GENERATION: {
-
            PLAB* plab = ShenandoahThreadLocalData::plab(thread);
            if (plab != nullptr) {
              has_plab = true;
@@ -510,6 +510,7 @@ inline oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, Shenandoah
       // For non-LAB allocations, we have no way to retract the allocation, and
       // have to explicitly overwrite the copy with the filler object. With that overwrite,
       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
+      assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
       fill_with_object(copy, size);
       shenandoah_assert_correct(nullptr, copy_val);
       // For non-LAB allocations, the object has already been registered
@@ -753,6 +754,14 @@ inline size_t ShenandoahHeap::get_old_evac_reserve() const {
   return _old_evac_reserve;
 }
 
+inline void ShenandoahHeap::augment_old_evac_reserve(size_t increment) {
+  _old_evac_reserve += increment;
+}
+
+inline void ShenandoahHeap::augment_promo_reserve(size_t increment) {
+  _promoted_reserve += increment;
+}
+
 inline void ShenandoahHeap::reset_old_evac_expended() {
   Atomic::store(&_old_evac_expended, (size_t) 0);
 }
@@ -791,16 +800,6 @@ inline size_t ShenandoahHeap::get_young_evac_reserve() const {
   return _young_evac_reserve;
 }
 
-inline intptr_t ShenandoahHeap::set_alloc_supplement_reserve(intptr_t new_val) {
-  intptr_t orig = _alloc_supplement_reserve;
-  _alloc_supplement_reserve = new_val;
-  return orig;
-}
-
-inline intptr_t ShenandoahHeap::get_alloc_supplement_reserve() const {
-  return _alloc_supplement_reserve;
-}
-
 template<class T>
 inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
   marked_object_iterate(region, cl, region->top());
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index 299c2b1c593..941f4f1ec73 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -29,6 +29,7 @@
 #include "gc/shared/space.inline.hpp"
 #include "gc/shared/tlab_globals.hpp"
 #include "gc/shenandoah/shenandoahCardTable.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
@@ -102,7 +103,7 @@ void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affilia
     case _empty_uncommitted:
       do_commit();
     case _empty_committed:
-      set_affiliation(affiliation);
+      assert(this->affiliation() == affiliation, "Region affiliation should already be established");
       set_state(_regular);
     case _regular:
     case _pinned:
@@ -122,7 +123,13 @@ void ShenandoahHeapRegion::make_young_maybe() {
    case _cset:
    case _humongous_start:
    case _humongous_cont:
-     set_affiliation(YOUNG_GENERATION);
+     if (affiliation() != YOUNG_GENERATION) {
+       if (is_old()) {
+         ShenandoahHeap::heap()->old_generation()->decrement_affiliated_region_count();
+       }
+       set_affiliation(YOUNG_GENERATION);
+       ShenandoahHeap::heap()->young_generation()->increment_affiliated_region_count();
+     }
      return;
    case _pinned_cset:
    case _regular:
@@ -175,6 +182,7 @@ void ShenandoahHeapRegion::make_humongous_start() {
 void ShenandoahHeapRegion::make_humongous_start_bypass(ShenandoahAffiliation affiliation) {
   shenandoah_assert_heaplocked();
   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
+  // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
   set_affiliation(affiliation);
   reset_age();
   switch (_state) {
@@ -207,6 +215,7 @@ void ShenandoahHeapRegion::make_humongous_cont_bypass(ShenandoahAffiliation affi
   shenandoah_assert_heaplocked();
   assert (ShenandoahHeap::heap()->is_full_gc_in_progress(), "only for full GC");
   set_affiliation(affiliation);
+  // Don't bother to account for affiliated regions during Full GC.  We recompute totals at end.
   reset_age();
   switch (_state) {
     case _empty_committed:
@@ -469,6 +478,7 @@ bool ShenandoahHeapRegion::oop_fill_and_coalesce_without_cancel() {
       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
       assert(next_marked_obj <= t, "next marked object cannot exceed top");
       size_t fill_size = next_marked_obj - obj_addr;
+      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
       obj_addr = next_marked_obj;
@@ -514,6 +524,7 @@ bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
       assert(next_marked_obj <= t, "next marked object cannot exceed top");
       size_t fill_size = next_marked_obj - obj_addr;
+      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
       heap->card_scan()->coalesce_objects(obj_addr, fill_size);
       obj_addr = next_marked_obj;
@@ -565,8 +576,8 @@ void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateCl
       HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
       assert(next_marked_obj <= t, "next marked object cannot exceed top");
       size_t fill_size = next_marked_obj - obj_addr;
+      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
       ShenandoahHeap::fill_with_object(obj_addr, fill_size);
-
       // coalesce_objects() unregisters all but first object subsumed within coalesced range.
       rem_set_scanner->coalesce_objects(obj_addr, fill_size);
       obj_addr = next_marked_obj;
@@ -670,8 +681,8 @@ void ShenandoahHeapRegion::recycle() {
   set_update_watermark(bottom());
 
   make_empty();
+  ShenandoahHeap::heap()->generation_for(affiliation())->decrement_affiliated_region_count();
   set_affiliation(FREE);
-
   if (ZapUnusedHeapArea) {
     SpaceMangler::mangle_region(MemRegion(bottom(), end()));
   }
@@ -957,40 +968,22 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation
   }
 
   if (!heap->mode()->is_generational()) {
+    log_trace(gc)("Changing affiliation of region %zu from %s to %s",
+                  index(), affiliation_name(), shenandoah_affiliation_name(new_affiliation));
     heap->set_affiliation(this, new_affiliation);
     return;
   }
 
-  log_trace(gc)("Changing affiliation of region %zu from %s to %s",
-    index(), shenandoah_affiliation_name(region_affiliation), shenandoah_affiliation_name(new_affiliation));
-
-  if (region_affiliation == ShenandoahAffiliation::YOUNG_GENERATION) {
-    heap->young_generation()->decrement_affiliated_region_count();
-  } else if (region_affiliation == ShenandoahAffiliation::OLD_GENERATION) {
-    heap->old_generation()->decrement_affiliated_region_count();
-  }
-
-  size_t regions;
   switch (new_affiliation) {
     case FREE:
       assert(!has_live(), "Free region should not have live data");
       break;
     case YOUNG_GENERATION:
       reset_age();
-      regions = heap->young_generation()->increment_affiliated_region_count();
-      // During Full GC, we allow temporary violation of this requirement.  We enforce that this condition is
-      // restored upon completion of Full GC.
-      assert(heap->is_full_gc_in_progress() ||
-             (regions * ShenandoahHeapRegion::region_size_bytes() <= heap->young_generation()->adjusted_capacity()),
-             "Number of young regions cannot exceed adjusted capacity");
       break;
     case OLD_GENERATION:
-      regions = heap->old_generation()->increment_affiliated_region_count();
-      // During Full GC, we allow temporary violation of this requirement.  We enforce that this condition is
-      // restored upon completion of Full GC.
-      assert(heap->is_full_gc_in_progress() ||
-             (regions * ShenandoahHeapRegion::region_size_bytes() <= heap->old_generation()->adjusted_capacity()),
-             "Number of old regions cannot exceed adjusted capacity");
+      // TODO: should we reset_age() for OLD as well?  Examine invocations of set_affiliation(). Some contexts redundantly
+      //       invoke reset_age().
       break;
     default:
       ShouldNotReachHere();
@@ -999,8 +992,92 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation
   heap->set_affiliation(this, new_affiliation);
 }
 
-// Returns number of regions promoted, or zero if we choose not to promote.
-size_t ShenandoahHeapRegion::promote_humongous() {
+// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
+// set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
+// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
+// contained herein.
+void ShenandoahHeapRegion::promote_in_place() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ShenandoahMarkingContext* marking_context = heap->marking_context();
+  HeapWord* tams = marking_context->top_at_mark_start(this);
+  assert(heap->active_generation()->is_mark_complete(), "sanity");
+  assert(is_young(), "Only young regions can be promoted");
+  assert(is_regular(), "Use different service to promote humongous regions");
+  assert(age() >= InitialTenuringThreshold, "Only promote regions that are sufficiently aged");
+
+  ShenandoahOldGeneration* old_gen = heap->old_generation();
+  ShenandoahYoungGeneration* young_gen = heap->young_generation();
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+  {
+    ShenandoahHeapLocker locker(heap->lock());
+
+    HeapWord* update_watermark = get_update_watermark();
+
+    // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
+    // is_collector_free range.
+    restore_top_before_promote();
+
+    size_t region_capacity = free();
+    size_t region_used = used();
+
+    // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
+    assert(update_watermark >= top(), "original top cannot exceed preserved update_watermark");
+    set_update_watermark(top());
+
+    // Unconditionally transfer one region from young to old to represent the newly promoted region.
+    // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
+    // if there are already enough unaffiliated regions in old to account for this newly promoted region.
+    // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
+    // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
+    // we would be trading a fully empty region for a partially used region.
+
+    young_gen->decrease_used(region_used);
+    young_gen->decrement_affiliated_region_count();
+
+    // transfer_to_old() increases capacity of old and decreases capacity of young
+    heap->generation_sizer()->force_transfer_to_old(1);
+    set_affiliation(OLD_GENERATION);
+
+    old_gen->increment_affiliated_region_count();
+    old_gen->increase_used(region_used);
+
+    // add_old_collector_free_region() increases promoted_reserve() if available space exceeds PLAB::min_size()
+    heap->free_set()->add_old_collector_free_region(this);
+  }
+
+  assert(top() == tams, "Cannot promote regions in place if top has advanced beyond TAMS");
+
+  // Since this region may have served previously as OLD, it may hold obsolete object range info.
+  heap->card_scan()->reset_object_range(bottom(), end());
+  heap->card_scan()->mark_range_as_dirty(bottom(), top() - bottom());
+
+  // TODO: use an existing coalesce-and-fill function rather than
+  // replicating the code here.
+  HeapWord* obj_addr = bottom();
+  while (obj_addr < tams) {
+    oop obj = cast_to_oop(obj_addr);
+    if (marking_context->is_marked(obj)) {
+      assert(obj->klass() != NULL, "klass should not be NULL");
+      // This thread is responsible for registering all objects in this region.  No need for lock.
+      heap->card_scan()->register_object_without_lock(obj_addr);
+      obj_addr += obj->size();
+    } else {
+      HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
+      assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
+      size_t fill_size = next_marked_obj - obj_addr;
+      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
+      ShenandoahHeap::fill_with_object(obj_addr, fill_size);
+      heap->card_scan()->register_object_without_lock(obj_addr);
+      obj_addr = next_marked_obj;
+    }
+  }
+
+  // We do not need to scan above TAMS because top equals tams
+  assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
+}
+
+void ShenandoahHeapRegion::promote_humongous() {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   ShenandoahMarkingContext* marking_context = heap->marking_context();
   assert(heap->active_generation()->is_mark_complete(), "sanity");
@@ -1020,51 +1097,40 @@ size_t ShenandoahHeapRegion::promote_humongous() {
   // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
   // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
   // has carefully analyzed the required sizes of an application's young-gen and old-gen.
-
-  size_t spanned_regions = ShenandoahHeapRegion::required_regions(obj->size() * HeapWordSize);
+  size_t used_bytes = obj->size() * HeapWordSize;
+  size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
+  size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
   size_t index_limit = index() + spanned_regions;
-
   {
     // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
     // young to old.
     ShenandoahHeapLocker locker(heap->lock());
-    size_t available_old_regions = old_generation->adjusted_unaffiliated_regions();
-    if (spanned_regions <= available_old_regions) {
-      log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
-
-      // For this region and each humongous continuation region spanned by this humongous object, change
-      // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
-      // in the last humongous region that is not spanned by obj is currently not used.
-      for (size_t i = index(); i < index_limit; i++) {
-        ShenandoahHeapRegion* r = heap->get_region(i);
-        log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
-                      r->index(), p2i(r->bottom()), p2i(r->top()));
-        // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
-        old_generation->increase_used(r->used());
-        young_generation->decrease_used(r->used());
-        r->set_affiliation(OLD_GENERATION);
-      }
 
-      ShenandoahHeapRegion* tail = heap->get_region(index_limit - 1);
-      size_t waste = tail->free();
-      if (waste != 0) {
-        old_generation->increase_humongous_waste(waste);
-        young_generation->decrease_humongous_waste(waste);
-      }
-      // Then fall through to finish the promotion after releasing the heap lock.
-    } else {
-      // There are not enough available old regions to promote this humongous region at this time, so defer promotion.
-      // TODO: Consider allowing the promotion now, with the expectation that we can resize and/or collect OLD
-      // momentarily to address the transient violation of budgets.  Some problems that need to be addressed in order
-      // to allow transient violation of capacity budgets are:
-      //  1. Various size_t subtractions assume usage is less than capacity, and thus assume there will be no
-      //     arithmetic underflow when we subtract usage from capacity.  The results of such size_t subtractions
-      //     would need to be guarded and special handling provided.
-      //  2. ShenandoahVerifier enforces that usage is less than capacity.  If we are going to relax this constraint,
-      //     we need to think about what conditions allow the constraint to be violated and document and implement the
-      //     changes.
-      return 0;
+    // We promote humongous objects unconditionally, without checking for availability.  We adjust
+    // usage totals, including humongous waste, after evacuation is done.
+    log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
+
+    young_generation->decrease_used(used_bytes);
+    young_generation->decrease_humongous_waste(humongous_waste);
+    young_generation->decrease_affiliated_region_count(spanned_regions);
+
+    // transfer_to_old() increases capacity of old and decreases capacity of young
+    heap->generation_sizer()->force_transfer_to_old(spanned_regions);
+
+    // For this region and each humongous continuation region spanned by this humongous object, change
+    // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
+    // in the last humongous region that is not spanned by obj is currently not used.
+    for (size_t i = index(); i < index_limit; i++) {
+      ShenandoahHeapRegion* r = heap->get_region(i);
+      log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
+                    r->index(), p2i(r->bottom()), p2i(r->top()));
+      // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
+      r->set_affiliation(OLD_GENERATION);
     }
+
+    old_generation->increase_affiliated_region_count(spanned_regions);
+    old_generation->increase_used(used_bytes);
+    old_generation->increase_humongous_waste(humongous_waste);
   }
 
   // Since this region may have served previously as OLD, it may hold obsolete object range info.
@@ -1082,7 +1148,6 @@ size_t ShenandoahHeapRegion::promote_humongous() {
                   index(), p2i(bottom()), p2i(bottom() + obj->size()));
     heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
   }
-  return index_limit - index();
 }
 
 void ShenandoahHeapRegion::decrement_humongous_waste() const {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
index 269f0fd239b..306cff58508 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
@@ -242,6 +242,8 @@ class ShenandoahHeapRegion {
   HeapWord* _new_top;
   double _empty_time;
 
+  HeapWord* _top_before_promoted;
+
   // Seldom updated fields
   RegionState _state;
   HeapWord* _coalesce_and_fill_boundary; // for old regions not selected as collection set candidates.
@@ -350,6 +352,11 @@ class ShenandoahHeapRegion {
     return _index;
   }
 
+  inline void save_top_before_promote();
+  inline HeapWord* get_top_before_promote() const { return _top_before_promoted; }
+  inline void restore_top_before_promote();
+  inline size_t garbage_before_padded_for_promote() const;
+
   // Allocation (return nullptr if full)
   inline HeapWord* allocate_aligned(size_t word_size, ShenandoahAllocRequest &req, size_t alignment_in_words);
 
@@ -429,6 +436,7 @@ class ShenandoahHeapRegion {
 
   size_t capacity() const       { return byte_size(bottom(), end()); }
   size_t used() const           { return byte_size(bottom(), top()); }
+  size_t used_before_promote() const { return byte_size(bottom(), get_top_before_promote()); }
   size_t free() const           { return byte_size(top(),    end()); }
 
   // Does this region contain this address?
@@ -457,8 +465,9 @@ class ShenandoahHeapRegion {
   void decrement_age() { if (_age-- == 0) { _age = 0; } }
   void reset_age()     { _age = 0; }
 
-  // Sets all remembered set cards to dirty.  Returns the number of regions spanned by the associated humongous object.
-  size_t promote_humongous();
+  // Register all objects.  Set all remembered set cards to dirty.
+  void promote_humongous();
+  void promote_in_place();
 
 private:
   void decrement_humongous_waste() const;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index 4ef7f36ab1d..c5823b11ddd 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -82,6 +82,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq
   if (size >= req.min_size()) {
     // Even if req.min_size() is not a multiple of card size, we know that size is.
     if (pad_words > 0) {
+      assert(pad_words >= ShenandoahHeap::min_fill_size(), "pad_words expanded above to meet size constraint");
       ShenandoahHeap::fill_with_object(orig_top, pad_words);
       ShenandoahHeap::heap()->card_scan()->register_object(orig_top);
     }
@@ -190,6 +191,17 @@ inline size_t ShenandoahHeapRegion::garbage() const {
   return result;
 }
 
+inline size_t ShenandoahHeapRegion::garbage_before_padded_for_promote() const {
+  size_t used_before_promote = byte_size(bottom(), get_top_before_promote());
+  assert(get_top_before_promote() != nullptr, "top before promote should not equal null");
+  assert(used_before_promote >= get_live_data_bytes(),
+         "Live Data must be a subset of used before promotion live: " SIZE_FORMAT " used: " SIZE_FORMAT,
+         get_live_data_bytes(), used_before_promote);
+  size_t result = used_before_promote - get_live_data_bytes();
+  return result;
+
+}
+
 inline HeapWord* ShenandoahHeapRegion::get_update_watermark() const {
   HeapWord* watermark = Atomic::load_acquire(&_update_watermark);
   assert(bottom() <= watermark && watermark <= top(), "within bounds");
@@ -240,4 +252,17 @@ inline bool ShenandoahHeapRegion::is_affiliated() const {
   return affiliation() != FREE;
 }
 
+inline void ShenandoahHeapRegion::save_top_before_promote() {
+  _top_before_promoted = _top;
+}
+
+inline void ShenandoahHeapRegion::restore_top_before_promote() {
+  _top = _top_before_promoted;
+#ifdef ASSERT
+  _top_before_promoted = nullptr;
+#endif
+ }
+
+
+
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp
index e9268063a56..76cc19fd47d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahInitLogger.cpp
@@ -47,14 +47,14 @@ void ShenandoahInitLogger::print_heap() {
     log_info(gc, init)("Heuristics: %s", heap->global_generation()->heuristics()->name());
   } else {
     log_info(gc, init)("Young Heuristics: %s", heap->young_generation()->heuristics()->name());
-    log_info(gc, init)("Young Generation Initial Size: " SIZE_FORMAT "%s",
+    log_info(gc, init)("Young Generation Soft Size: " SIZE_FORMAT "%s",
                        byte_size_in_proper_unit(heap->young_generation()->soft_max_capacity()),
                        proper_unit_for_byte_size(heap->young_generation()->soft_max_capacity()));
     log_info(gc, init)("Young Generation Max: " SIZE_FORMAT "%s",
                        byte_size_in_proper_unit(heap->young_generation()->max_capacity()),
                        proper_unit_for_byte_size(heap->young_generation()->max_capacity()));
     log_info(gc, init)("Old Heuristics: %s", heap->old_generation()->heuristics()->name());
-    log_info(gc, init)("Old Generation Initial Size: " SIZE_FORMAT "%s",
+    log_info(gc, init)("Old Generation Soft Size: " SIZE_FORMAT "%s",
                        byte_size_in_proper_unit(heap->old_generation()->soft_max_capacity()),
                        proper_unit_for_byte_size(heap->old_generation()->soft_max_capacity()));
     log_info(gc, init)("Old Generation Max: " SIZE_FORMAT "%s",
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp
index b9443f12923..a7688679f29 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.cpp
@@ -32,7 +32,6 @@
 #include "runtime/os.hpp"
 #include "runtime/task.hpp"
 
-
 class ShenandoahMmuTask : public PeriodicTask {
   ShenandoahMmuTracker* _mmu_tracker;
 public:
@@ -53,66 +52,134 @@ class ThreadTimeAccumulator : public ThreadClosure {
   }
 };
 
-double ShenandoahMmuTracker::gc_thread_time_seconds() {
+ShenandoahMmuTracker::ShenandoahMmuTracker() :
+    _most_recent_timestamp(0.0),
+    _most_recent_gc_time(0.0),
+    _most_recent_gcu(0.0),
+    _most_recent_mutator_time(0.0),
+    _most_recent_mu(0.0),
+    _most_recent_periodic_time_stamp(0.0),
+    _most_recent_periodic_gc_time(0.0),
+    _most_recent_periodic_mutator_time(0.0),
+    _mmu_periodic_task(new ShenandoahMmuTask(this)) {
+}
+
+ShenandoahMmuTracker::~ShenandoahMmuTracker() {
+  _mmu_periodic_task->disenroll();
+  delete _mmu_periodic_task;
+}
+
+void ShenandoahMmuTracker::fetch_cpu_times(double &gc_time, double &mutator_time) {
   ThreadTimeAccumulator cl;
   // We include only the gc threads because those are the only threads
   // we are responsible for.
   ShenandoahHeap::heap()->gc_threads_do(&cl);
-  return double(cl.total_time) / NANOSECS_PER_SEC;
-}
+  double most_recent_gc_thread_time = double(cl.total_time) / NANOSECS_PER_SEC;
+  gc_time = most_recent_gc_thread_time;
 
-double ShenandoahMmuTracker::process_time_seconds() {
   double process_real_time(0.0), process_user_time(0.0), process_system_time(0.0);
   bool valid = os::getTimesSecs(&process_real_time, &process_user_time, &process_system_time);
-  if (valid) {
-    return process_user_time + process_system_time;
+  assert(valid, "don't know why this would not be valid");
+  mutator_time =(process_user_time + process_system_time) - most_recent_gc_thread_time;
+}
+
+void ShenandoahMmuTracker::update_utilization(ShenandoahGeneration* generation, size_t gcid, const char *msg) {
+  double current = os::elapsedTime();
+  _most_recent_gcid = gcid;
+  _most_recent_is_full = false;
+
+  if (gcid == 0) {
+    fetch_cpu_times(_most_recent_gc_time, _most_recent_mutator_time);
+
+    _most_recent_timestamp = current;
+  } else {
+    double gc_cycle_period = current - _most_recent_timestamp;
+    _most_recent_timestamp = current;
+
+    double gc_thread_time, mutator_thread_time;
+    fetch_cpu_times(gc_thread_time, mutator_thread_time);
+    double gc_time = gc_thread_time - _most_recent_gc_time;
+    _most_recent_gc_time = gc_thread_time;
+    _most_recent_gcu = gc_time / (_active_processors * gc_cycle_period);
+    double mutator_time = mutator_thread_time - _most_recent_mutator_time;
+    _most_recent_mutator_time = mutator_thread_time;
+    _most_recent_mu = mutator_time / (_active_processors * gc_cycle_period);
+    log_info(gc, ergo)("At end of %s: GCU: %.1f%%, MU: %.1f%% during period of %.3fs",
+                       msg, _most_recent_gcu * 100, _most_recent_mu * 100, gc_cycle_period);
   }
-  return 0.0;
 }
 
-ShenandoahMmuTracker::ShenandoahMmuTracker() :
-    _generational_reference_time_s(0.0),
-    _process_reference_time_s(0.0),
-    _collector_reference_time_s(0.0),
-    _mmu_periodic_task(new ShenandoahMmuTask(this)),
-    _mmu_average(10, ShenandoahAdaptiveDecayFactor) {
+void ShenandoahMmuTracker::record_young(ShenandoahGeneration* generation, size_t gcid) {
+  update_utilization(generation, gcid, "Concurrent Young GC");
 }
 
-ShenandoahMmuTracker::~ShenandoahMmuTracker() {
-  _mmu_periodic_task->disenroll();
-  delete _mmu_periodic_task;
+void ShenandoahMmuTracker::record_bootstrap(ShenandoahGeneration* generation, size_t gcid, bool candidates_for_mixed) {
+  // Not likely that this will represent an "ideal" GCU, but doesn't hurt to try
+  update_utilization(generation, gcid, "Bootstrap Old GC");
+}
+
+void ShenandoahMmuTracker::record_old_marking_increment(ShenandoahGeneration* generation, size_t gcid, bool old_marking_done,
+                                                        bool has_old_candidates) {
+  // No special processing for old marking
+  double now = os::elapsedTime();
+  double duration = now - _most_recent_timestamp;
+
+  double gc_time, mutator_time;
+  fetch_cpu_times(gc_time, mutator_time);
+  double gcu = (gc_time - _most_recent_gc_time) / duration;
+  double mu = (mutator_time - _most_recent_mutator_time) / duration;
+  log_info(gc, ergo)("At end of %s: GCU: %.1f%%, MU: %.1f%% for duration %.3fs (totals to be subsumed in next gc report)",
+                     old_marking_done? "last OLD marking increment": "OLD marking increment",
+                     gcu * 100, mu * 100, duration);
+}
+
+void ShenandoahMmuTracker::record_mixed(ShenandoahGeneration* generation, size_t gcid, bool is_mixed_done) {
+  update_utilization(generation, gcid, "Mixed Concurrent GC");
+}
+
+void ShenandoahMmuTracker::record_degenerated(ShenandoahGeneration* generation,
+                                              size_t gcid, bool is_old_bootstrap, bool is_mixed_done) {
+  if ((gcid == _most_recent_gcid) && _most_recent_is_full) {
+    // Do nothing.  This is a redundant recording for the full gc that just completed.
+    // TODO: avoid making the call to record_degenerated() in the case that this degenerated upgraded to full gc.
+  } else if (is_old_bootstrap) {
+    update_utilization(generation, gcid, "Degenerated Bootstrap Old GC");
+  } else {
+    update_utilization(generation, gcid, "Degenerated Young GC");
+  }
 }
 
-void ShenandoahMmuTracker::record(ShenandoahGeneration* generation) {
-  shenandoah_assert_control_or_vm_thread();
-  double collector_time_s = gc_thread_time_seconds();
-  double elapsed_gc_time_s = collector_time_s - _generational_reference_time_s;
-  generation->add_collection_time(elapsed_gc_time_s);
-  _generational_reference_time_s = collector_time_s;
+void ShenandoahMmuTracker::record_full(ShenandoahGeneration* generation, size_t gcid) {
+  update_utilization(generation, gcid, "Full GC");
+  _most_recent_is_full = true;
 }
 
 void ShenandoahMmuTracker::report() {
   // This is only called by the periodic thread.
-  double process_time_s = process_time_seconds();
-  double elapsed_process_time_s = process_time_s - _process_reference_time_s;
-  if (elapsed_process_time_s <= 0.01) {
-    // No cpu time for this interval?
-    return;
-  }
+  double current = os::elapsedTime();
+  double time_delta = current - _most_recent_periodic_time_stamp;
+  _most_recent_periodic_time_stamp = current;
 
-  _process_reference_time_s = process_time_s;
-  double collector_time_s = gc_thread_time_seconds();
-  double elapsed_collector_time_s = collector_time_s - _collector_reference_time_s;
-  _collector_reference_time_s = collector_time_s;
-  double minimum_mutator_utilization = ((elapsed_process_time_s - elapsed_collector_time_s) / elapsed_process_time_s) * 100;
-  _mmu_average.add(minimum_mutator_utilization);
-  log_info(gc)("Average MMU = %.3f", _mmu_average.davg());
+  double gc_time, mutator_time;
+  fetch_cpu_times(gc_time, mutator_time);
+
+  double gc_delta = gc_time - _most_recent_periodic_gc_time;
+  _most_recent_periodic_gc_time = gc_time;
+
+  double mutator_delta = mutator_time - _most_recent_periodic_mutator_time;
+  _most_recent_periodic_mutator_time = mutator_time;
+
+  double mu = mutator_delta / (_active_processors * time_delta);
+  double gcu = gc_delta / (_active_processors * time_delta);
+  log_info(gc)("Periodic Sample: GCU = %.3f%%, MU = %.3f%% during most recent %.1fs", gcu * 100, mu * 100, time_delta);
 }
 
 void ShenandoahMmuTracker::initialize() {
-  _process_reference_time_s = process_time_seconds();
-  _generational_reference_time_s = gc_thread_time_seconds();
-  _collector_reference_time_s = _generational_reference_time_s;
+  // initialize static data
+  _active_processors = os::initial_active_processor_count();
+
+  double _most_recent_periodic_time_stamp = os::elapsedTime();
+  fetch_cpu_times(_most_recent_periodic_gc_time, _most_recent_periodic_mutator_time);
   _mmu_periodic_task->enroll();
 }
 
@@ -160,12 +227,12 @@ ShenandoahGenerationSizer::ShenandoahGenerationSizer(ShenandoahMmuTracker* mmu_t
 
 size_t ShenandoahGenerationSizer::calculate_min_young_regions(size_t heap_region_count) {
   size_t min_young_regions = (heap_region_count * ShenandoahMinYoungPercentage) / 100;
-  return MAX2(uint(min_young_regions), 1U);
+  return MAX2(min_young_regions, (size_t) 1U);
 }
 
 size_t ShenandoahGenerationSizer::calculate_max_young_regions(size_t heap_region_count) {
   size_t max_young_regions = (heap_region_count * ShenandoahMaxYoungPercentage) / 100;
-  return MAX2(uint(max_young_regions), 1U);
+  return MAX2(max_young_regions, (size_t) 1U);
 }
 
 void ShenandoahGenerationSizer::recalculate_min_max_young_length(size_t heap_region_count) {
@@ -202,110 +269,71 @@ void ShenandoahGenerationSizer::heap_size_changed(size_t heap_size) {
   recalculate_min_max_young_length(heap_size / ShenandoahHeapRegion::region_size_bytes());
 }
 
-bool ShenandoahGenerationSizer::adjust_generation_sizes() const {
-  shenandoah_assert_generational();
-  if (!use_adaptive_sizing()) {
-    return false;
-  }
-
-  if (_mmu_tracker->average() >= double(GCTimeRatio)) {
-    return false;
-  }
-
+// Returns true iff transfer is successful
+bool ShenandoahGenerationSizer::transfer_to_old(size_t regions) const {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
-  ShenandoahOldGeneration *old = heap->old_generation();
-  ShenandoahYoungGeneration *young = heap->young_generation();
-  ShenandoahGeneration *global = heap->global_generation();
-  double old_time_s = old->reset_collection_time();
-  double young_time_s = young->reset_collection_time();
-  double global_time_s = global->reset_collection_time();
-
-  const double transfer_threshold = 3.0;
-  double delta = young_time_s - old_time_s;
-
-  log_info(gc)("Thread Usr+Sys YOUNG = %.3f, OLD = %.3f, GLOBAL = %.3f", young_time_s, old_time_s, global_time_s);
+  ShenandoahGeneration* old_gen = heap->old_generation();
+  ShenandoahGeneration* young_gen = heap->young_generation();
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  size_t bytes_to_transfer = regions * region_size_bytes;
 
-  if (abs(delta) <= transfer_threshold) {
-    log_info(gc, ergo)("Difference (%.3f) for thread utilization for each generation is under threshold (%.3f)", abs(delta), transfer_threshold);
+  if (young_gen->free_unaffiliated_regions() < regions) {
+    return false;
+  } else if (old_gen->max_capacity() + bytes_to_transfer > heap->max_size_for(old_gen)) {
+    return false;
+  } else if (young_gen->max_capacity() - bytes_to_transfer < heap->min_size_for(young_gen)) {
     return false;
-  }
-
-  if (delta > 0) {
-    // young is busier than old, increase size of young to raise MMU
-    return transfer_capacity(old, young);
   } else {
-    // old is busier than young, increase size of old to raise MMU
-    return transfer_capacity(young, old);
+    young_gen->decrease_capacity(bytes_to_transfer);
+    old_gen->increase_capacity(bytes_to_transfer);
+    size_t new_size = old_gen->max_capacity();
+    log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " SIZE_FORMAT "%s",
+                 regions, young_gen->name(), old_gen->name(),
+                 byte_size_in_proper_unit(new_size), proper_unit_for_byte_size(new_size));
+    return true;
   }
 }
 
-bool ShenandoahGenerationSizer::transfer_capacity(ShenandoahGeneration* target) const {
-  ShenandoahHeapLocker locker(ShenandoahHeap::heap()->lock());
-  if (target->is_young()) {
-    return transfer_capacity(ShenandoahHeap::heap()->old_generation(), target);
-  } else {
-    assert(target->is_old(), "Expected old generation, if not young.");
-    return transfer_capacity(ShenandoahHeap::heap()->young_generation(), target);
-  }
+// This is used when promoting humongous or highly utilized regular regions in place.  It is not required in this situation
+// that the transferred regions be unaffiliated.
+void ShenandoahGenerationSizer::force_transfer_to_old(size_t regions) const {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ShenandoahGeneration* old_gen = heap->old_generation();
+  ShenandoahGeneration* young_gen = heap->young_generation();
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  size_t bytes_to_transfer = regions * region_size_bytes;
+
+  young_gen->decrease_capacity(bytes_to_transfer);
+  old_gen->increase_capacity(bytes_to_transfer);
+  size_t new_size = old_gen->max_capacity();
+  log_info(gc)("Forcing transfer of " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " SIZE_FORMAT "%s",
+               regions, young_gen->name(), old_gen->name(),
+               byte_size_in_proper_unit(new_size), proper_unit_for_byte_size(new_size));
 }
 
-bool ShenandoahGenerationSizer::transfer_capacity(ShenandoahGeneration* from, ShenandoahGeneration* to) const {
-  shenandoah_assert_heaplocked_or_safepoint();
 
-  size_t available_regions = from->free_unaffiliated_regions();
-  if (available_regions <= 0) {
-    log_info(gc)("%s has no regions available for transfer to %s", from->name(), to->name());
-    return false;
-  }
-
-  size_t regions_to_transfer = MAX2(1u, uint(double(available_regions) * _resize_increment));
-  if (from->is_young()) {
-    regions_to_transfer = adjust_transfer_from_young(from, regions_to_transfer);
-  } else {
-    regions_to_transfer = adjust_transfer_to_young(to, regions_to_transfer);
-  }
+bool ShenandoahGenerationSizer::transfer_to_young(size_t regions) const {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  ShenandoahGeneration* old_gen = heap->old_generation();
+  ShenandoahGeneration* young_gen = heap->young_generation();
+  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  size_t bytes_to_transfer = regions * region_size_bytes;
 
-  if (regions_to_transfer == 0) {
-    log_info(gc)("No capacity available to transfer from: %s (" SIZE_FORMAT "%s) to: %s (" SIZE_FORMAT "%s)",
-                  from->name(), byte_size_in_proper_unit(from->max_capacity()), proper_unit_for_byte_size(from->max_capacity()),
-                  to->name(), byte_size_in_proper_unit(to->max_capacity()), proper_unit_for_byte_size(to->max_capacity()));
+  if (old_gen->free_unaffiliated_regions() < regions) {
     return false;
+  } else if (young_gen->max_capacity() + bytes_to_transfer > heap->max_size_for(young_gen)) {
+    return false;
+  } else if (old_gen->max_capacity() - bytes_to_transfer < heap->min_size_for(old_gen)) {
+    return false;
+  } else {
+    old_gen->decrease_capacity(bytes_to_transfer);
+    young_gen->increase_capacity(bytes_to_transfer);
+    size_t new_size = young_gen->max_capacity();
+    log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " SIZE_FORMAT "%s",
+                 regions, old_gen->name(), young_gen->name(),
+                 byte_size_in_proper_unit(new_size), proper_unit_for_byte_size(new_size));
+    return true;
   }
-
-  log_info(gc)("Transfer " SIZE_FORMAT " region(s) from %s to %s", regions_to_transfer, from->name(), to->name());
-  from->decrease_capacity(regions_to_transfer * ShenandoahHeapRegion::region_size_bytes());
-  to->increase_capacity(regions_to_transfer * ShenandoahHeapRegion::region_size_bytes());
-  return true;
-}
-
-size_t ShenandoahGenerationSizer::adjust_transfer_from_young(ShenandoahGeneration* from, size_t regions_to_transfer) const {
-  assert(from->is_young(), "Expect to transfer from young");
-  size_t young_capacity_regions = from->max_capacity() / ShenandoahHeapRegion::region_size_bytes();
-  size_t new_young_regions = young_capacity_regions - regions_to_transfer;
-  size_t minimum_young_regions = min_young_regions();
-  // Check that we are not going to violate the minimum size constraint.
-  if (new_young_regions < minimum_young_regions) {
-    assert(minimum_young_regions <= young_capacity_regions, "Young is under minimum capacity.");
-    // If the transfer violates the minimum size and there is still some capacity to transfer,
-    // adjust the transfer to take the size to the minimum. Note that this may be zero.
-    regions_to_transfer = young_capacity_regions - minimum_young_regions;
-  }
-  return regions_to_transfer;
-}
-
-size_t ShenandoahGenerationSizer::adjust_transfer_to_young(ShenandoahGeneration* to, size_t regions_to_transfer) const {
-  assert(to->is_young(), "Can only transfer between young and old.");
-  size_t young_capacity_regions = to->max_capacity() / ShenandoahHeapRegion::region_size_bytes();
-  size_t new_young_regions = young_capacity_regions + regions_to_transfer;
-  size_t maximum_young_regions = max_young_regions();
-  // Check that we are not going to violate the maximum size constraint.
-  if (new_young_regions > maximum_young_regions) {
-    assert(maximum_young_regions >= young_capacity_regions, "Young is over maximum capacity");
-    // If the transfer violates the maximum size and there is still some capacity to transfer,
-    // adjust the transfer to take the size to the maximum. Note that this may be zero.
-    regions_to_transfer = maximum_young_regions - young_capacity_regions;
-  }
-  return regions_to_transfer;
 }
 
 size_t ShenandoahGenerationSizer::min_young_size() const {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp
index 9d3c230a6cc..6ab7d180245 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMmuTracker.hpp
@@ -50,16 +50,31 @@ class ShenandoahMmuTask;
  * MMU.
  */
 class ShenandoahMmuTracker {
+private:
+  // These variables hold recent snapshots of cumulative quantities that are used for calculating
+  // CPU time consumed by GC and mutator threads during each GC cycle.
+  double _most_recent_timestamp;
+  double _most_recent_gc_time;
+  double _most_recent_gcu;
+  double _most_recent_mutator_time;
+  double _most_recent_mu;
+
+  // These variables hold recent snapshots of cumulative quantities that are used for reporting
+  // periodic consumption of CPU time by GC and mutator threads.
+  double _most_recent_periodic_time_stamp;
+  double _most_recent_periodic_gc_time;
+  double _most_recent_periodic_mutator_time;
 
-  double _generational_reference_time_s;
-  double _process_reference_time_s;
-  double _collector_reference_time_s;
+  size_t _most_recent_gcid;
+  uint _active_processors;
+
+  bool _most_recent_is_full;
 
   ShenandoahMmuTask* _mmu_periodic_task;
   TruncatedSeq _mmu_average;
 
-  static double gc_thread_time_seconds();
-  static double process_time_seconds();
+  void update_utilization(ShenandoahGeneration* generation, size_t gcid, const char* msg);
+  static void fetch_cpu_times(double &gc_time, double &mutator_time);
 
 public:
   explicit ShenandoahMmuTracker();
@@ -68,22 +83,24 @@ class ShenandoahMmuTracker {
   // This enrolls the periodic task after everything is initialized.
   void initialize();
 
-  // This is called at the start and end of a GC cycle. The GC thread times
-  // will be accumulated in this generation. Note that the bootstrap cycle
-  // for an old collection should be counted against the old generation.
-  // When the collector is idle, it still runs a regulator and a control.
-  // The times for these threads are attributed to the global generation.
-  void record(ShenandoahGeneration* generation);
+  // At completion of each GC cycle (not including interrupted cycles), we invoke one of the following to record the
+  // GC utilization during this cycle.  Incremental efforts spent in an interrupted GC cycle will be accumulated into
+  // the CPU time reports for the subsequent completed [degenerated or full] GC cycle.
+  //
+  // We may redundantly record degen and full in the case that a degen upgrades to full.  When this happens, we will invoke
+  // both record_full() and record_degenerated() with the same value of gcid.  record_full() is called first and the log
+  // reports such a cycle as a FULL cycle.
+  void record_young(ShenandoahGeneration* generation, size_t gcid);
+  void record_bootstrap(ShenandoahGeneration* generation, size_t gcid, bool has_old_candidates);
+  void record_old_marking_increment(ShenandoahGeneration* generation, size_t gcid, bool old_marking_done, bool has_old_candidates);
+  void record_mixed(ShenandoahGeneration* generation, size_t gcid, bool is_mixed_done);
+  void record_full(ShenandoahGeneration* generation, size_t gcid);
+  void record_degenerated(ShenandoahGeneration* generation, size_t gcid, bool is_old_boostrap, bool is_mixed_done);
 
   // This is called by the periodic task timer. The interval is defined by
   // GCPauseIntervalMillis and defaults to 5 seconds. This method computes
   // the MMU over the elapsed interval and records it in a running average.
-  // This method also logs the average MMU.
   void report();
-
-  double average() {
-    return _mmu_average.davg();
-  }
 };
 
 class ShenandoahGenerationSizer {
@@ -114,14 +131,6 @@ class ShenandoahGenerationSizer {
   // given the number of heap regions depending on the kind of sizing algorithm.
   void recalculate_min_max_young_length(size_t heap_region_count);
 
-  // These two methods are responsible for enforcing the minimum and maximum
-  // constraints for the size of the generations.
-  size_t adjust_transfer_from_young(ShenandoahGeneration* from, size_t regions_to_transfer) const;
-  size_t adjust_transfer_to_young(ShenandoahGeneration* to, size_t regions_to_transfer) const;
-
-  // This will attempt to transfer capacity from one generation to the other. It
-  // returns true if a transfer is made, false otherwise.
-  bool transfer_capacity(ShenandoahGeneration* from, ShenandoahGeneration* to) const;
 public:
   explicit ShenandoahGenerationSizer(ShenandoahMmuTracker* mmu_tracker);
 
@@ -145,19 +154,11 @@ class ShenandoahGenerationSizer {
     return _use_adaptive_sizing;
   }
 
-  // This is invoked at the end of a collection. This happens on a safepoint
-  // to avoid any races with allocators (and to avoid interfering with
-  // allocators by taking the heap lock). The amount of capacity to move
-  // from one generation to another is controlled by YoungGenerationSizeIncrement
-  // and defaults to 20% of the available capacity of the donor generation.
-  // The minimum and maximum sizes of the young generation are controlled by
-  // ShenandoahMinYoungPercentage and ShenandoahMaxYoungPercentage, respectively.
-  // The method returns true when an adjustment is made, false otherwise.
-  bool adjust_generation_sizes() const;
-
-  // This may be invoked by a heuristic (from regulator thread) before it
-  // decides to run a collection.
-  bool transfer_capacity(ShenandoahGeneration* target) const;
+  bool transfer_to_young(size_t regions) const;
+  bool transfer_to_old(size_t regions) const;
+
+  // force transfer is used when we promote humongous objects.  May violate min/max limits on generation sizes
+  void force_transfer_to_old(size_t regions) const;
 };
 
 #endif //SHARE_GC_SHENANDOAH_SHENANDOAHMMUTRACKER_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
index 94e29209bac..da56f2cb683 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
@@ -30,6 +30,7 @@
 #include "gc/shenandoah/shenandoahOldGC.hpp"
 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahGeneration.hpp"
+#include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 #include "prims/jvmtiTagMap.hpp"
 #include "utilities/events.hpp"
@@ -146,5 +147,48 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
   // collection.
   vmop_entry_final_roots();
 
+  // We do not rebuild_free following increments of old marking because memory has not been reclaimed..  However, we may
+  // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow.
+  size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
+  heap->adjust_generation_sizes_for_next_cycle(allocation_runway, 0, 0);
+
+  bool success;
+  size_t region_xfer;
+  const char* region_destination;
+  ShenandoahYoungGeneration* young_gen = heap->young_generation();
+  ShenandoahGeneration* old_gen = heap->old_generation();
+  {
+    ShenandoahHeapLocker locker(heap->lock());
+
+    size_t old_region_surplus = heap->get_old_region_surplus();
+    size_t old_region_deficit = heap->get_old_region_deficit();
+    if (old_region_surplus) {
+      success = heap->generation_sizer()->transfer_to_young(old_region_surplus);
+      region_destination = "young";
+      region_xfer = old_region_surplus;
+    } else if (old_region_deficit) {
+      success = heap->generation_sizer()->transfer_to_old(old_region_deficit);
+      region_destination = "old";
+      region_xfer = old_region_deficit;
+      if (!success) {
+        ((ShenandoahOldHeuristics *) old_gen->heuristics())->trigger_cannot_expand();
+      }
+    } else {
+      region_destination = "none";
+      region_xfer = 0;
+      success = true;
+    }
+    heap->set_old_region_surplus(0);
+    heap->set_old_region_deficit(0);
+  }
+
+  // Report outside the heap lock
+  size_t young_available = young_gen->available();
+  size_t old_available = old_gen->available();
+  log_info(gc, ergo)("After old marking finished, %s " SIZE_FORMAT " regions to %s to prepare for next gc, old available: "
+                     SIZE_FORMAT "%s, young_available: " SIZE_FORMAT "%s",
+                     success? "successfully transferred": "failed to transfer", region_xfer, region_destination,
+                     byte_size_in_proper_unit(old_available), proper_unit_for_byte_size(old_available),
+                     byte_size_in_proper_unit(young_available), proper_unit_for_byte_size(young_available));
   return true;
 }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
index 331499816f8..f8aaf325cbd 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
@@ -174,12 +174,30 @@ class ShenandoahConcurrentCoalesceAndFillTask : public WorkerTask {
 ShenandoahOldGeneration::ShenandoahOldGeneration(uint max_queues, size_t max_capacity, size_t soft_max_capacity)
   : ShenandoahGeneration(OLD, max_queues, max_capacity, soft_max_capacity),
     _coalesce_and_fill_region_array(NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, ShenandoahHeap::heap()->num_regions(), mtGC)),
-    _state(IDLE)
+    _state(IDLE),
+    _growth_before_compaction(INITIAL_GROWTH_BEFORE_COMPACTION)
 {
+  _live_bytes_after_last_mark = ShenandoahHeap::heap()->capacity() * INITIAL_LIVE_FRACTION / FRACTIONAL_DENOMINATOR;
   // Always clear references for old generation
   ref_processor()->set_soft_reference_policy(true);
 }
 
+size_t ShenandoahOldGeneration::get_live_bytes_after_last_mark() const {
+  return _live_bytes_after_last_mark;
+}
+
+void ShenandoahOldGeneration::set_live_bytes_after_last_mark(size_t bytes) {
+  _live_bytes_after_last_mark = bytes;
+  if (_growth_before_compaction > MINIMUM_GROWTH_BEFORE_COMPACTION) {
+    _growth_before_compaction /= 2;
+  }
+}
+
+size_t ShenandoahOldGeneration::usage_trigger_threshold() const {
+  size_t result = _live_bytes_after_last_mark + (_live_bytes_after_last_mark * _growth_before_compaction) / FRACTIONAL_DENOMINATOR;
+  return result;
+}
+
 bool ShenandoahOldGeneration::contains(ShenandoahHeapRegion* region) const {
   // TODO: Should this be region->is_old() instead?
   return !region->is_young();
@@ -255,6 +273,7 @@ bool ShenandoahOldGeneration::coalesce_and_fill() {
   uint nworkers = workers->active_workers();
 
   log_debug(gc)("Starting (or resuming) coalesce-and-fill of old heap regions");
+
   // This code will see the same set of regions to fill on each resumption as it did
   // on the initial run. That's okay because each region keeps track of its own coalesce
   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
@@ -322,7 +341,11 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
         ShenandoahPhaseTimings::final_rebuild_freeset :
         ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
     ShenandoahHeapLocker locker(heap->lock());
-    heap->free_set()->rebuild();
+    size_t cset_young_regions, cset_old_regions;
+    heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions);
+    // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
+    // is in case there was any immediate old garbage identified.
+    heap->free_set()->rebuild(cset_young_regions, cset_old_regions);
   }
 }
 
@@ -405,7 +428,8 @@ void ShenandoahOldGeneration::validate_transition(State new_state) {
     case IDLE:
       // GC cancellation can send us back to IDLE from any state.
       assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become idle during old mark.");
-      assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become idle with collection candidates");
+      assert(!heap->mode()->is_generational() ||
+             (_old_heuristics->unprocessed_old_collection_candidates() == 0), "Cannot become idle with collection candidates");
       assert(!heap->is_prepare_for_old_mark_in_progress(), "Cannot become idle while making old generation parseable.");
       assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become idle when setup for bootstrapping.");
       break;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
index 0e3fb429c26..1ac7e22c41a 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
@@ -90,19 +90,45 @@ class ShenandoahOldGeneration : public ShenandoahGeneration {
 private:
   State _state;
 
+  static const size_t FRACTIONAL_DENOMINATOR = 64536;
+
+  // During initialization of the JVM, we search for the correct old-gen size by initally performing old-gen
+  // collection when old-gen usage is 50% more (INITIAL_GROWTH_BEFORE_COMPACTION) than the initial old-gen size
+  // estimate (3.125% of heap).  The next old-gen trigger occurs when old-gen grows 25% larger than its live
+  // memory at the end of the first old-gen collection.  Then we trigger again when old-gen growns 12.5%
+  // more than its live memory at the end of the previous old-gen collection.  Thereafter, we trigger each time
+  // old-gen grows more than 12.5% following the end of its previous old-gen collection.
+  static const size_t INITIAL_GROWTH_BEFORE_COMPACTION = FRACTIONAL_DENOMINATOR / 2;          //  50.0%
+  static const size_t MINIMUM_GROWTH_BEFORE_COMPACTION = FRACTIONAL_DENOMINATOR / 8;          //  12.5%
+
+  // INITIAL_LIVE_FRACTION represents the initial guess of how large old-gen should be.  We estimate that old-gen
+  // needs to consume 3.125% of the total heap size.  And we "pretend" that we start out with this amount of live
+  // old-gen memory.  The first old-collection trigger will occur when old-gen occupies 50% more than this initial
+  // approximation of the old-gen memory requirement, in other words when old-gen usage is 150% of 3.125%, which
+  // is 4.6875% of the total heap size.
+  static const uint16_t INITIAL_LIVE_FRACTION = FRACTIONAL_DENOMINATOR / 32;                    //   3.125%
+  size_t _live_bytes_after_last_mark;
+  size_t _growth_before_compaction; // How much growth in usage before we trigger old collection, per 65_536
+
+  void validate_transition(State new_state) NOT_DEBUG_RETURN;
+
 public:
   State state() const {
     return _state;
   }
 
+  void transition_to(State new_state);
+
+  size_t get_live_bytes_after_last_mark() const;
+  void set_live_bytes_after_last_mark(size_t new_live);
+
+  size_t usage_trigger_threshold() const;
+
   bool can_start_gc() {
     return _state == IDLE || _state == WAITING_FOR_FILL;
   }
 
   static const char* state_name(State state);
-
-  void transition_to(State new_state);
-  void validate_transition(State new_state) NOT_DEBUG_RETURN;
 };
 
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp
index 7664b75d1d6..7d21fb27972 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRegulatorThread.cpp
@@ -140,7 +140,8 @@ void ShenandoahRegulatorThread::regulator_sleep() {
 }
 
 bool ShenandoahRegulatorThread::start_old_cycle() {
-  return _old_heuristics->should_start_gc() && _control_thread->request_concurrent_gc(OLD);
+  return !ShenandoahHeap::heap()->doing_mixed_evacuations() && !ShenandoahHeap::heap()->collection_set()->has_old_regions() &&
+    _old_heuristics->should_start_gc() && _control_thread->request_concurrent_gc(OLD);
 }
 
 bool ShenandoahRegulatorThread::start_young_cycle() {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index dae7ab21b2d..61ea96999d0 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -403,24 +403,32 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {
                   byte_size_in_proper_unit(stats.used()),       proper_unit_for_byte_size(stats.used()));
   }
 
-  static void validate_usage(const char* label, ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) {
+  static void validate_usage(const bool adjust_for_padding,
+                             const char* label, ShenandoahGeneration* generation, ShenandoahCalculateRegionStatsClosure& stats) {
+    ShenandoahHeap* heap = ShenandoahHeap::heap();
     size_t generation_used = generation->used();
+    size_t generation_used_regions = generation->used_regions();
+    if (adjust_for_padding && (generation->is_young() || generation->is_global())) {
+      size_t pad = ShenandoahHeap::heap()->get_pad_for_promote_in_place();
+      generation_used += pad;
+    }
+
     guarantee(stats.used() == generation_used,
               "%s: generation (%s) used size must be consistent: generation-used: " SIZE_FORMAT "%s, regions-used: " SIZE_FORMAT "%s",
               label, generation->name(),
               byte_size_in_proper_unit(generation_used), proper_unit_for_byte_size(generation_used),
               byte_size_in_proper_unit(stats.used()),    proper_unit_for_byte_size(stats.used()));
 
-    guarantee(stats.regions() == generation->used_regions(),
+    guarantee(stats.regions() == generation_used_regions,
               "%s: generation (%s) used regions (" SIZE_FORMAT ") must equal regions that are in use (" SIZE_FORMAT ")",
               label, generation->name(), generation->used_regions(), stats.regions());
 
-// This check is disabled because of known issues with this feature. We expect this code to be updated by 05/2023.
-//    size_t capacity = generation->adjusted_capacity();
-//    guarantee(stats.span() <= capacity,
-//              "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") must not exceed current capacity (" SIZE_FORMAT "%s)",
-//              label, generation->name(), stats.regions(),
-//              byte_size_in_proper_unit(capacity), proper_unit_for_byte_size(capacity));
+    size_t generation_capacity = generation->max_capacity();
+    size_t humongous_regions_promoted = 0;
+    guarantee(stats.span() <= generation_capacity,
+              "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") must not exceed current capacity (" SIZE_FORMAT "%s)",
+              label, generation->name(), stats.regions(),
+              byte_size_in_proper_unit(generation_capacity), proper_unit_for_byte_size(generation_capacity));
 
     size_t humongous_waste = generation->get_humongous_waste();
     guarantee(stats.waste() == humongous_waste,
@@ -743,6 +751,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label,
                                              VerifyForwarded forwarded, VerifyMarked marked,
                                              VerifyCollectionSet cset,
                                              VerifyLiveness liveness, VerifyRegions regions,
+                                             VerifySize sizeness,
                                              VerifyGCState gcstate) {
   guarantee(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "only when nothing else happens");
   guarantee(ShenandoahVerify, "only when enabled, and bitmap is initialized in ShenandoahHeap::initialize");
@@ -795,6 +804,7 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label,
 
     if (enabled) {
       char actual = _heap->gc_state();
+      // Old generation marking is allowed in all states.
       if (!VerifyThreadGCState::verify_gc_state(actual, expected)) {
         fatal("%s: Global gc-state: expected %d, actual %d", label, expected, actual);
       }
@@ -813,13 +823,20 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label,
 
     ShenandoahCalculateRegionStatsClosure cl;
     _heap->heap_region_iterate(&cl);
-    size_t heap_used = _heap->used();
-    guarantee(cl.used() == heap_used,
-              "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s",
-              label,
-              byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used),
-              byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used()));
-
+    size_t heap_used;
+    if (_heap->mode()->is_generational() && (sizeness == _verify_size_adjusted_for_padding)) {
+      // Prior to evacuation, regular regions that are to be evacuated in place are padded to prevent further allocations
+      heap_used = _heap->used() + _heap->get_pad_for_promote_in_place();
+    } else if (sizeness != _verify_size_disable) {
+      heap_used = _heap->used();
+    }
+    if (sizeness != _verify_size_disable) {
+      guarantee(cl.used() == heap_used,
+                "%s: heap used size must be consistent: heap-used = " SIZE_FORMAT "%s, regions-used = " SIZE_FORMAT "%s",
+                label,
+                byte_size_in_proper_unit(heap_used), proper_unit_for_byte_size(heap_used),
+                byte_size_in_proper_unit(cl.used()), proper_unit_for_byte_size(cl.used()));
+    }
     size_t heap_committed = _heap->committed();
     guarantee(cl.committed() == heap_committed,
               "%s: heap committed size must be consistent: heap-committed = " SIZE_FORMAT "%s, regions-committed = " SIZE_FORMAT "%s",
@@ -868,10 +885,16 @@ void ShenandoahVerifier::verify_at_safepoint(const char* label,
       ShenandoahGenerationStatsClosure::log_usage(_heap->young_generation(),  cl.young);
       ShenandoahGenerationStatsClosure::log_usage(_heap->global_generation(), cl.global);
     }
-
-    ShenandoahGenerationStatsClosure::validate_usage(label, _heap->old_generation(),    cl.old);
-    ShenandoahGenerationStatsClosure::validate_usage(label, _heap->young_generation(),  cl.young);
-    ShenandoahGenerationStatsClosure::validate_usage(label, _heap->global_generation(), cl.global);
+    if (sizeness == _verify_size_adjusted_for_padding) {
+      ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old);
+      ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->young_generation(), cl.young);
+      ShenandoahGenerationStatsClosure::validate_usage(true, label, _heap->global_generation(), cl.global);
+    } else if (sizeness == _verify_size_exact) {
+      ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->old_generation(), cl.old);
+      ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->young_generation(), cl.young);
+      ShenandoahGenerationStatsClosure::validate_usage(false, label, _heap->global_generation(), cl.global);
+    }
+    // else: sizeness must equal _verify_size_disable
   }
 
   log_debug(gc)("Safepoint verification finished remembered set verification");
@@ -983,19 +1006,22 @@ void ShenandoahVerifier::verify_generic(VerifyOption vo) {
           _verify_cset_disable,        // cset may be inconsistent
           _verify_liveness_disable,    // no reliable liveness data
           _verify_regions_disable,     // no reliable region data
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
           _verify_gcstate_disable      // no data about gcstate
   );
 }
 
 void ShenandoahVerifier::verify_before_concmark() {
     verify_at_safepoint(
-            "Before Mark",
-            _verify_remembered_before_marking,  // verify read-only remembered set from bottom() to top()
+          "Before Mark",
+          _verify_remembered_before_marking,
+                                       // verify read-only remembered set from bottom() to top()
           _verify_forwarded_none,      // UR should have fixed up
           _verify_marked_disable,      // do not verify marked: lots ot time wasted checking dead allocations
           _verify_cset_none,           // UR should have fixed this
           _verify_liveness_disable,    // no reliable liveness data
           _verify_regions_notrash,     // no trash regions
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
           _verify_gcstate_stable       // there are no forwarded objects
   );
 }
@@ -1005,10 +1031,12 @@ void ShenandoahVerifier::verify_after_concmark() {
           "After Mark",
           _verify_remembered_disable,  // do not verify remembered set
           _verify_forwarded_none,      // no forwarded references
-          _verify_marked_complete_except_references, // bitmaps as precise as we can get, except dangling j.l.r.Refs
+          _verify_marked_complete_except_references,
+                                       // bitmaps as precise as we can get, except dangling j.l.r.Refs
           _verify_cset_none,           // no references to cset anymore
           _verify_liveness_complete,   // liveness data must be complete here
           _verify_regions_disable,     // trash regions not yet recycled
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
           _verify_gcstate_stable_weakroots  // heap is still stable, weakroots are in progress
   );
 }
@@ -1022,6 +1050,8 @@ void ShenandoahVerifier::verify_before_evacuation() {
           _verify_cset_disable,                      // non-forwarded references to cset expected
           _verify_liveness_complete,                 // liveness data must be complete here
           _verify_regions_disable,                   // trash regions not yet recycled
+          _verify_size_adjusted_for_padding,         // expect generation and heap sizes to match after adjustments
+                                                     //  for promote in place padding
           _verify_gcstate_stable_weakroots           // heap is still stable, weakroots are in progress
   );
 }
@@ -1035,6 +1065,7 @@ void ShenandoahVerifier::verify_during_evacuation() {
           _verify_cset_disable,       // some cset references are not forwarded yet
           _verify_liveness_disable,   // liveness data might be already stale after pre-evacs
           _verify_regions_disable,    // trash regions not yet recycled
+          _verify_size_disable,       // we don't know how much of promote-in-place work has been completed
           _verify_gcstate_evacuation  // evacuation is in progress
   );
 }
@@ -1048,6 +1079,7 @@ void ShenandoahVerifier::verify_after_evacuation() {
           _verify_cset_forwarded,      // all cset refs are fully forwarded
           _verify_liveness_disable,    // no reliable liveness data anymore
           _verify_regions_notrash,     // trash regions have been recycled already
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
           _verify_gcstate_forwarded    // evacuation produced some forwarded objects
   );
 }
@@ -1056,15 +1088,17 @@ void ShenandoahVerifier::verify_before_updaterefs() {
   verify_at_safepoint(
           "Before Updating References",
           _verify_remembered_before_updating_references,  // verify read-write remembered set
-          _verify_forwarded_allow,                     // forwarded references allowed
-          _verify_marked_complete,                     // bitmaps might be stale, but alloc-after-mark should be well
-          _verify_cset_forwarded,                      // all cset refs are fully forwarded
-          _verify_liveness_disable,                    // no reliable liveness data anymore
-          _verify_regions_notrash,                     // trash regions have been recycled already
-          _verify_gcstate_updating                     // evacuation should have produced some forwarded objects
+          _verify_forwarded_allow,     // forwarded references allowed
+          _verify_marked_complete,     // bitmaps might be stale, but alloc-after-mark should be well
+          _verify_cset_forwarded,      // all cset refs are fully forwarded
+          _verify_liveness_disable,    // no reliable liveness data anymore
+          _verify_regions_notrash,     // trash regions have been recycled already
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
+          _verify_gcstate_updating     // evacuation should have produced some forwarded objects
   );
 }
 
+// We have not yet cleanup (reclaimed) the collection set
 void ShenandoahVerifier::verify_after_updaterefs() {
   verify_at_safepoint(
           "After Updating References",
@@ -1074,6 +1108,7 @@ void ShenandoahVerifier::verify_after_updaterefs() {
           _verify_cset_none,           // no cset references, all updated
           _verify_liveness_disable,    // no reliable liveness data anymore
           _verify_regions_nocset,      // no cset regions, trash regions have appeared
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
           _verify_gcstate_stable       // update refs had cleaned up forwarded objects
   );
 }
@@ -1087,6 +1122,7 @@ void ShenandoahVerifier::verify_after_degenerated() {
           _verify_cset_none,           // no cset references
           _verify_liveness_disable,    // no reliable liveness data anymore
           _verify_regions_notrash_nocset, // no trash, no cset
+          _verify_size_exact,          // expect generation and heap sizes to match exactly
           _verify_gcstate_stable       // degenerated refs had cleaned up forwarded objects
   );
 }
@@ -1100,6 +1136,7 @@ void ShenandoahVerifier::verify_before_fullgc() {
           _verify_cset_disable,        // cset might be foobared
           _verify_liveness_disable,    // no reliable liveness data anymore
           _verify_regions_disable,     // no reliable region data here
+          _verify_size_disable,        // if we degenerate during evacuation, usage not valid: padding and deferred accounting
           _verify_gcstate_disable      // no reliable gcstate data
   );
 }
@@ -1113,6 +1150,7 @@ void ShenandoahVerifier::verify_after_fullgc() {
           _verify_cset_none,           // no cset references
           _verify_liveness_disable,    // no reliable liveness data anymore
           _verify_regions_notrash_nocset, // no trash, no cset
+          _verify_size_exact,           // expect generation and heap sizes to match exactly
           _verify_gcstate_stable        // full gc cleaned up everything
   );
 }
@@ -1314,7 +1352,7 @@ void ShenandoahVerifier::verify_rem_set_before_mark() {
         }
         // else, this humongous object is not marked so no need to verify its internal pointers
         if (!scanner->verify_registration(obj_addr, ctx)) {
-          ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr,
+          ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, nullptr, nullptr,
                                            "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
         }
       } else if (!r->is_humongous()) {
@@ -1330,7 +1368,7 @@ void ShenandoahVerifier::verify_rem_set_before_mark() {
             }
             // else, object's start is marked dirty and obj is not an objArray, so any interesting pointers are covered
             if (!scanner->verify_registration(obj_addr, ctx)) {
-              ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, obj_addr, nullptr,
+              ShenandoahAsserts::print_failure(ShenandoahAsserts::_safe_all, obj, nullptr, nullptr,
                                                "Verify init-mark remembered set violation", "object not properly registered", __FILE__, __LINE__);
             }
             obj_addr += obj->size();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
index 284746be541..67b35644bf5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.hpp
@@ -141,6 +141,17 @@ class ShenandoahVerifier : public CHeapObj<mtGC> {
     _verify_regions_notrash_nocset
   } VerifyRegions;
 
+  typedef enum {
+    // Disable size verification
+    _verify_size_disable,
+
+    // Enforce exact consistency
+    _verify_size_exact,
+
+    // Expect promote-in-place adjustments: padding inserted to temporarily prevent further allocation in regular regions
+    _verify_size_adjusted_for_padding
+  } VerifySize;
+
   typedef enum {
     // Disable gc-state verification
     _verify_gcstate_disable,
@@ -189,6 +200,7 @@ class ShenandoahVerifier : public CHeapObj<mtGC> {
                            VerifyCollectionSet cset,
                            VerifyLiveness liveness,
                            VerifyRegions regions,
+                           VerifySize sizeness,
                            VerifyGCState gcstate);
 
 public:
diff --git a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
index a7f3311b603..4d5d029f4f7 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoah_globals.hpp
@@ -97,7 +97,7 @@
           "collector accepts. In percents of heap region size.")            \
           range(0,100)                                                      \
                                                                             \
-  product(uintx, ShenandoahOldGarbageThreshold, 10, EXPERIMENTAL,           \
+  product(uintx, ShenandoahOldGarbageThreshold, 15, EXPERIMENTAL,           \
           "How much garbage an old region has to contain before it would "  \
           "be taken for collection.")                                       \
           range(0,100)                                                      \
@@ -128,13 +128,6 @@
           "size.")                                                          \
           range(0,100)                                                      \
                                                                             \
-  product(uintx, ShenandoahOldMinFreeThreshold, 5, EXPERIMENTAL,            \
-          "Percentage of free old generation heap memory below which most " \
-          "heuristics trigger collection independent of other triggers. "   \
-          "Provides a safety margin for many heuristics. In percents of "   \
-          "(soft) max heap size.")                                          \
-          range(0,100)                                                      \
-                                                                            \
   product(uintx, ShenandoahAllocationThreshold, 0, EXPERIMENTAL,            \
           "How many new allocations should happen since the last GC cycle " \
           "before some heuristics trigger the collection. In percents of "  \
@@ -210,7 +203,7 @@
           "Heuristics may trigger collections more frequently. Time is in " \
           "milliseconds. Setting this to 0 disables the feature.")          \
                                                                             \
-  product(uintx, ShenandoahGuaranteedYoungGCInterval, 5*60*1000, EXPERIMENTAL, \
+  product(uintx, ShenandoahGuaranteedYoungGCInterval, 5*60*1000,  EXPERIMENTAL,  \
           "Run a collection of the young generation at least this often. "  \
           "Heuristics may trigger collections more frequently. Time is in " \
           "milliseconds. Setting this to 0 disables the feature.")          \
@@ -300,17 +293,20 @@
           "failures, which will trigger stop-the-world Full GC passes.")    \
           range(1.0,100.0)                                                  \
                                                                             \
-  product(double, ShenandoahGenerationalEvacWaste, 2.0, EXPERIMENTAL,       \
-          "For generational mode, how much waste evacuations produce "      \
-          "within the reserved space.  Larger values make evacuations "     \
-          "more resilient against evacuation conflicts, at expense of "     \
-          "evacuating less on each GC cycle.  Smaller values increase "     \
-          "the risk of evacuation failures, which will trigger "            \
-          "stop-the-world Full GC passes.  The default value for "          \
-          "generational mode is 2.0.  The reason for the higher default "   \
-          "value in generational mode is because generational mode "        \
-          "enforces the evacuation budget, triggering degenerated GC "      \
-          "which upgrades to full GC whenever the budget is exceeded.")     \
+  product(double, ShenandoahOldEvacWaste, 1.4, EXPERIMENTAL,                \
+          "How much waste evacuations produce within the reserved space. "  \
+          "Larger values make evacuations more resilient against "          \
+          "evacuation conflicts, at expense of evacuating less on each "    \
+          "GC cycle.  Smaller values increase the risk of evacuation "      \
+          "failures, which will trigger stop-the-world Full GC passes.")    \
+          range(1.0,100.0)                                                  \
+                                                                            \
+  product(double, ShenandoahPromoEvacWaste, 1.2, EXPERIMENTAL,              \
+          "How much waste promotions produce within the reserved space. "   \
+          "Larger values make evacuations more resilient against "          \
+          "evacuation conflicts, at expense of promoting less on each "     \
+          "GC cycle.  Smaller values increase the risk of evacuation "      \
+          "failures, which will trigger stop-the-world Full GC passes.")    \
           range(1.0,100.0)                                                  \
                                                                             \
   product(uintx, ShenandoahMaxEvacLABRatio, 0, EXPERIMENTAL,                \
@@ -339,26 +335,16 @@
           "reserve/waste is incorrect, at the risk that application "       \
           "runs out of memory too early.")                                  \
                                                                             \
-  product(uintx, ShenandoahOldEvacReserve, 2, EXPERIMENTAL,                 \
-          "How much of old-generation heap to reserve for old-generation "  \
-          "evacuations.  Larger values allow GC to evacuate more live "     \
-          "old-generation objects on every cycle, while potentially "       \
-          "creating greater impact on the cadence at which the young- "     \
-          "generation allocation pool is replenished.  During mixed "       \
-          "evacuations, the bound on amount of old-generation heap "        \
-          "regions included in the collecdtion set is the smaller "         \
-          "of the quantities specified by this parameter and the "          \
-          "size of ShenandoahEvacReserve as adjusted by the value of "      \
-          "ShenandoahOldEvacRatioPercent.  In percents of total "           \
-          "old-generation heap size.")                                      \
-          range(1,100)                                                      \
-                                                                            \
-  product(uintx, ShenandoahOldEvacRatioPercent, 12, EXPERIMENTAL,           \
+  product(uintx, ShenandoahOldEvacRatioPercent, 75, EXPERIMENTAL,           \
           "The maximum proportion of evacuation from old-gen memory, as "   \
-          "a percent ratio.  The default value 12 denotes that no more "    \
-          "than one eighth (12%) of the collection set evacuation "         \
-          "workload may be comprised of old-gen heap regions.  A larger "   \
-          "value allows a smaller number of mixed evacuations to process "  \
+          "a percent ratio.  The default value 75 denotes that no more "    \
+          "than 75% of the collection set evacuation "                      \
+          "workload may be evacuate to old-gen heap regions.  This limits " \
+          "both the promotion of aged regions and the compaction of "       \
+          "existing old regions.  A value of 75 denotes that the normal "   \
+          "young-gen evacuation is increased by up to four fold. "          \
+          "A larger value allows quicker promotion and allows"              \
+          "a smaller number of mixed evacuations to process "               \
           "the entire list of old-gen collection candidates at the cost "   \
           "of an increased disruption of the normal cadence of young-gen "  \
           "collections.  A value of 100 allows a mixed evacuation to "      \
@@ -378,7 +364,7 @@
           "to be less than this.")                                          \
           range(0, 100)                                                     \
                                                                             \
-  product(uintx, ShenandoahMaxYoungPercentage, 80, EXPERIMENTAL,            \
+  product(uintx, ShenandoahMaxYoungPercentage, 100, EXPERIMENTAL,           \
           "The maximum percentage of the heap to use for the young "        \
           "generation. Heuristics will not adjust the young generation "    \
           "to be more than this.")                                          \
@@ -431,10 +417,14 @@
           "When running in passive mode, this can be toggled to measure "   \
           "either Degenerated GC or Full GC costs.")                        \
                                                                             \
-  product(uintx, ShenandoahFullGCThreshold, 3, EXPERIMENTAL,                \
+  product(uintx, ShenandoahFullGCThreshold, 64, EXPERIMENTAL,               \
           "How many back-to-back Degenerated GCs should happen before "     \
           "going to a Full GC.")                                            \
                                                                             \
+  product(uintx, ShenandoahOOMGCRetries, 3, EXPERIMENTAL,                   \
+          "How many GCs should happen before we throw OutOfMemoryException "\
+          "for allocation request, including at least one Full GC.")        \
+                                                                            \
   product(bool, ShenandoahImplicitGCInvokesConcurrent, false, EXPERIMENTAL, \
           "Should internally-caused GC requests invoke concurrent cycles, " \
           "should they do the stop-the-world (Degenerated / Full GC)? "     \
@@ -518,20 +508,6 @@
           "Fix references with load reference barrier. Disabling this "     \
           "might degrade performance.")                                     \
                                                                             \
-  product(uintx, ShenandoahBorrowPercent, 30, EXPERIMENTAL,                 \
-          "During evacuation and reference updating in generational "       \
-          "mode, new allocations are allowed to borrow from old-gen "       \
-          "memory up to ShenandoahBorrowPercent / 100 amount of the "       \
-          "young-generation content of the current collection set.  "       \
-          "Any memory borrowed from old-gen during evacuation and "         \
-          "update-references phases of GC will be repaid from the "         \
-          "abundance of young-gen memory produced when the collection "     \
-          "set is recycled at the end of updating references.  The "        \
-          "default value of 30 reserves 70% of the to-be-reclaimed "        \
-          "young collection set memory to be allocated during the "         \
-          "subsequent concurrent mark phase of GC.")                        \
-          range(0, 100)                                                     \
-                                                                            \
   product(uintx, ShenandoahOldCompactionReserve, 8, EXPERIMENTAL,           \
           "During generational GC, prevent promotions from filling "        \
           "this number of heap regions.  These regions are reserved "       \
diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt
index 48495d2a1ea..14c04bfbd66 100644
--- a/test/hotspot/jtreg/ProblemList.txt
+++ b/test/hotspot/jtreg/ProblemList.txt
@@ -83,11 +83,6 @@ gc/stress/gclocker/TestGCLockerWithG1.java 8180622 generic-all
 gc/stress/TestJNIBlockFullGC/TestJNIBlockFullGC.java 8192647 generic-all
 gc/stress/TestStressG1Humongous.java 8286554 windows-x64
 
-gc/shenandoah/oom/TestThreadFailure.java 8306335 generic-all
-gc/shenandoah/oom/TestClassLoaderLeak.java 8306336 generic-all
-gc/stress/gclocker/TestGCLockerWithShenandoah.java#generational 8306341 generic-all
-gc/TestAllocHumongousFragment.java#generational 8306342 generic-all
-
 #############################################################################
 
 # :hotspot_runtime