diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
index 7b097628296..aa8edb2ae6a 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahGenerationalHeuristics.cpp
@@ -171,7 +171,7 @@ void ShenandoahGenerationalHeuristics::choose_collection_set(ShenandoahCollectio
   if (doing_promote_in_place || (preselected_candidates > 0) || (immediate_percent <= ShenandoahImmediateThreshold)) {
     // Only young collections need to prime the collection set.
     if (_generation->is_young()) {
-      heap->old_heuristics()->prime_collection_set(collection_set);
+      heap->old_generation()->heuristics()->prime_collection_set(collection_set);
     }
 
     // Call the subclasses to add young-gen regions into the collection set.
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
index de7f8757894..6770d5d09e2 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
@@ -125,7 +125,7 @@ void ShenandoahYoungHeuristics::choose_young_collection_set(ShenandoahCollection
 
 bool ShenandoahYoungHeuristics::should_start_gc() {
   auto heap = ShenandoahGenerationalHeap::heap();
-  ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
+  ShenandoahOldHeuristics* old_heuristics = heap->old_generation()->heuristics();
 
   // Checks that an old cycle has run for at least ShenandoahMinimumOldMarkTimeMs before allowing a young cycle.
   if (ShenandoahMinimumOldMarkTimeMs > 0 && heap->is_concurrent_old_mark_in_progress()) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
index bd0fbe53dc0..6ac62e8e9ed 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp
@@ -80,14 +80,17 @@ class ShenandoahAllocRequest : StackObj {
   // This is the generation which the request is targeting.
   ShenandoahAffiliation const _affiliation;
 
+  // True if this request is trying to copy any object from young to old (promote).
+  bool _is_promotion;
+
 #ifdef ASSERT
   // Check that this is set before being read.
   bool _actual_size_set;
 #endif
 
-  ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type, ShenandoahAffiliation affiliation) :
+  ShenandoahAllocRequest(size_t _min_size, size_t _requested_size, Type _alloc_type, ShenandoahAffiliation affiliation, bool is_promotion = false) :
           _min_size(_min_size), _requested_size(_requested_size),
-          _actual_size(0), _waste(0), _alloc_type(_alloc_type), _affiliation(affiliation)
+          _actual_size(0), _waste(0), _alloc_type(_alloc_type), _affiliation(affiliation), _is_promotion(is_promotion)
 #ifdef ASSERT
           , _actual_size_set(false)
 #endif
@@ -106,7 +109,11 @@ class ShenandoahAllocRequest : StackObj {
     return ShenandoahAllocRequest(min_size, requested_size, _alloc_plab, ShenandoahAffiliation::OLD_GENERATION);
   }
 
-  static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size, ShenandoahAffiliation affiliation) {
+  static inline ShenandoahAllocRequest for_shared_gc(size_t requested_size, ShenandoahAffiliation affiliation, bool is_promotion = false) {
+    if (is_promotion) {
+      assert(affiliation == ShenandoahAffiliation::OLD_GENERATION, "Should only promote to old generation");
+      return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc, affiliation, true);
+    }
     return ShenandoahAllocRequest(0, requested_size, _alloc_shared_gc, affiliation);
   }
 
@@ -212,6 +219,10 @@ class ShenandoahAllocRequest : StackObj {
   const char* affiliation_name() const {
     return shenandoah_affiliation_name(_affiliation);
   }
+
+  bool is_promotion() const {
+    return _is_promotion;
+  }
 };
 
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHALLOCREQUEST_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
index 2589648bae4..cf61e15b1c2 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp
@@ -31,6 +31,7 @@
 #include "gc/shenandoah/shenandoahBarrierSetStackChunk.hpp"
 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahStackWatermark.hpp"
 #ifdef COMPILER1
 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
@@ -135,7 +136,7 @@ void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
     // PLAB is aligned with the start of each card's memory range.
     // TODO: Assert this in retire_plab?
     if (plab != nullptr) {
-      _heap->retire_plab(plab);
+      ShenandoahGenerationalHeap::heap()->retire_plab(plab);
     }
 
     // SATB protocol requires to keep alive reachable oops from roots at the beginning of GC
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
index 5fe476f22f8..730378fa5c9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
@@ -34,6 +34,7 @@
 #include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
 #include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
 #include "gc/shenandoah/shenandoahForwarding.inline.hpp"
+#include "gc/shenandoah/shenandoahGeneration.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
@@ -258,7 +259,8 @@ inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_st
   shenandoah_assert_marked_if(nullptr, value,
                               !CompressedOops::is_null(value) &&
                               ShenandoahHeap::heap()->is_evacuation_in_progress() &&
-                              !(ShenandoahHeap::heap()->is_gc_generation_young() && ShenandoahHeap::heap()->heap_region_containing(value)->is_old()));
+                              !(ShenandoahHeap::heap()->active_generation()->is_young() &&
+                              ShenandoahHeap::heap()->heap_region_containing(value)->is_old()));
   shenandoah_assert_not_in_cset_if(addr, value, value != nullptr && !ShenandoahHeap::heap()->cancelled_gc());
   ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set();
   bs->iu_barrier(value);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 7ec11988e42..836fb6dcc06 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -291,7 +291,7 @@ class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 
   void heap_region_do(ShenandoahHeapRegion *r) {
     // TODO: Add API to heap to skip free regions
-    if (r->affiliation() != FREE) {
+    if (r->is_affiliated()) {
       _ctx->capture_top_at_mark_start(r);
       r->clear_live_data();
     }
@@ -1196,15 +1196,16 @@ void ShenandoahFullGC::phase5_epilog() {
 
     heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
 
-    // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
-    // abbreviated cycle.
-    if (heap->mode()->is_generational()) {
-      ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
-      ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
-    }
     heap->clear_cancelled_gc(true /* clear oom handler */);
   }
 
   _preserved_marks->restore(heap->workers());
   _preserved_marks->reclaim();
+
+  // We defer generation resizing actions until after cset regions have been recycled.  We do this even following an
+  // abbreviated cycle.
+  if (heap->mode()->is_generational()) {
+    ShenandoahGenerationalFullGC::balance_generations_after_rebuilding_free_set();
+    ShenandoahGenerationalFullGC::rebuild_remembered_set(heap);
+  }
 }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
index f03aeb2e9ff..19deac9a174 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
@@ -32,6 +32,7 @@
 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "gc/shenandoah/shenandoahVerifier.hpp"
@@ -243,11 +244,6 @@ void ShenandoahGeneration::parallel_region_iterate_free(ShenandoahHeapRegionClos
 
 void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap) {
 
-  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
-  size_t regions_available_to_loan = 0;
-  size_t minimum_evacuation_reserve = ShenandoahOldCompactionReserve * region_size_bytes;
-  size_t old_regions_loaned_for_young_evac = 0;
-
   ShenandoahOldGeneration* const old_generation = heap->old_generation();
   ShenandoahYoungGeneration* const young_generation = heap->young_generation();
 
@@ -261,9 +257,6 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
   // critical.  If we cannot promote, there may be degradation of young-gen memory because old objects
   // accumulate there until they can be promoted.  This increases the young-gen marking and evacuation work.
 
-  // Do not fill up old-gen memory with promotions.  Reserve some amount of memory for compaction purposes.
-  size_t young_evac_reserve_max = 0;
-
   // First priority is to reclaim the easy garbage out of young-gen.
 
   // maximum_young_evacuation_reserve is upper bound on memory to be evacuated out of young
@@ -300,7 +293,6 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
   // through ALL of old-gen).  If there is some memory available in old-gen, we will use this for promotions as promotions
   // do not add to the update-refs burden of GC.
 
-  ShenandoahOldHeuristics* const old_heuristics = heap->old_heuristics();
   size_t old_evacuation_reserve, old_promo_reserve;
   if (is_global()) {
     // Global GC is typically triggered by user invocation of System.gc(), and typically indicates that there is lots
@@ -318,7 +310,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
     // expanded based on an existing mixed evacuation workload at the end of the previous GC cycle.  We'll expand
     // the budget for evacuation of old during GLOBAL cset selection.
     old_evacuation_reserve = maximum_old_evacuation_reserve;
-  } else if (old_heuristics->unprocessed_old_collection_candidates() > 0) {
+  } else if (old_generation->has_unprocessed_collection_candidates()) {
     // We reserved all old-gen memory at end of previous GC to hold anticipated evacuations to old-gen.  If this is
     // mixed evacuation, reserve all of this memory for compaction of old-gen and do not promote.  Prioritize compaction
     // over promotion in order to defragment OLD so that it will be better prepared to efficiently receive promoted memory.
@@ -334,7 +326,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
   // We see too many old-evacuation failures if we force ourselves to evacuate into regions that are not initially empty.
   // So we limit the old-evacuation reserve to unfragmented memory.  Even so, old-evacuation is free to fill in nooks and
   // crannies within existing partially used regions and it generally tries to do so.
-  const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * region_size_bytes;
+  const size_t old_free_unfragmented = old_generation->free_unaffiliated_regions() * ShenandoahHeapRegion::region_size_bytes();
   if (old_evacuation_reserve > old_free_unfragmented) {
     const size_t delta = old_evacuation_reserve - old_free_unfragmented;
     old_evacuation_reserve -= delta;
@@ -346,7 +338,7 @@ void ShenandoahGeneration::compute_evacuation_budgets(ShenandoahHeap* const heap
 
   // Preselect regions for promotion by evacuation (obtaining the live data to seed promoted_reserve),
   // and identify regions that will promote in place. These use the tenuring threshold.
-  size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
+  const size_t consumed_by_advance_promotion = select_aged_regions(old_promo_reserve);
   assert(consumed_by_advance_promotion <= maximum_old_evacuation_reserve, "Cannot promote more than available old-gen memory");
 
   // Note that unused old_promo_reserve might not be entirely consumed_by_advance_promotion.  Do not transfer this
@@ -737,11 +729,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
         // coalesce those regions. Only the old regions which are not part of the collection set at this point are
         // eligible for coalescing. As implemented now, this has the side effect of possibly initiating mixed-evacuations
         // after a global cycle for old regions that were not included in this collection set.
-        assert(heap->old_generation()->is_mark_complete(), "Expected old generation mark to be complete after global cycle.");
-        heap->old_heuristics()->prepare_for_old_collections();
-        log_info(gc)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT,
-                     heap->old_heuristics()->unprocessed_old_collection_candidates(),
-                     heap->old_heuristics()->coalesce_and_fill_candidates_count());
+        heap->old_generation()->prepare_for_mixed_collections_after_global_gc();
       }
     } else {
       _heuristics->choose_collection_set(collection_set);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
index b87bb02d564..ec853235288 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
@@ -26,17 +26,19 @@
 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHGENERATION_HPP
 
 #include "memory/allocation.hpp"
-#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 #include "gc/shenandoah/heuristics/shenandoahSpaceInfo.hpp"
 #include "gc/shenandoah/shenandoahGenerationType.hpp"
 #include "gc/shenandoah/shenandoahLock.hpp"
 #include "gc/shenandoah/shenandoahMarkingContext.hpp"
 
+class ShenandoahCollectionSet;
+class ShenandoahHeap;
 class ShenandoahHeapRegion;
 class ShenandoahHeapRegionClosure;
-class ShenandoahReferenceProcessor;
-class ShenandoahHeap;
+class ShenandoahHeuristics;
 class ShenandoahMode;
+class ShenandoahReferenceProcessor;
+
 
 class ShenandoahGeneration : public CHeapObj<mtGC>, public ShenandoahSpaceInfo {
   friend class VMStructs;
@@ -117,7 +119,7 @@ class ShenandoahGeneration : public CHeapObj<mtGC>, public ShenandoahSpaceInfo {
 
   inline ShenandoahGenerationType type() const { return _type; }
 
-  inline ShenandoahHeuristics* heuristics() const { return _heuristics; }
+  virtual ShenandoahHeuristics* heuristics() const { return _heuristics; }
 
   ShenandoahReferenceProcessor* ref_processor() { return _ref_processor; }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
index da5106dce52..8ac6de0eb51 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
@@ -156,7 +156,7 @@ void ShenandoahGenerationalControlThread::run_service() {
       // We should only be here if the regulator requested a cycle or if
       // there is an old generation mark in progress.
       if (cause == GCCause::_shenandoah_concurrent_gc) {
-        if (_requested_generation == OLD && heap->doing_mixed_evacuations()) {
+        if (_requested_generation == OLD && heap->old_generation()->is_doing_mixed_evacuations()) {
           // If a request to start an old cycle arrived while an old cycle was running, but _before_
           // it chose any regions for evacuation we don't want to start a new old cycle. Rather, we want
           // the heuristic to run a young collection so that we can evacuate some old regions.
@@ -512,7 +512,7 @@ void ShenandoahGenerationalControlThread::service_concurrent_old_cycle(Shenandoa
   }
 }
 
-bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause) {
+bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause) {
   assert(ShenandoahHeap::heap()->is_concurrent_old_mark_in_progress(), "Old mark should be in progress");
   log_debug(gc)("Resuming old generation with " UINT32_FORMAT " marking tasks queued", generation->task_queues()->tasks());
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp
index 463e9650406..c6191f86160 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.hpp
@@ -33,6 +33,8 @@
 #include "gc/shenandoah/shenandoahPadding.hpp"
 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
 
+class ShenandoahOldGeneration;
+
 class ShenandoahGenerationalControlThread: public ShenandoahController {
   friend class VMStructs;
 
@@ -82,7 +84,7 @@ class ShenandoahGenerationalControlThread: public ShenandoahController {
   bool check_cancellation_or_degen(ShenandoahGC::ShenandoahDegenPoint point);
 
   // Returns true if the old generation marking completed (i.e., final mark executed for old generation).
-  bool resume_concurrent_old_cycle(ShenandoahGeneration* generation, GCCause::Cause cause);
+  bool resume_concurrent_old_cycle(ShenandoahOldGeneration* generation, GCCause::Cause cause);
   void service_concurrent_cycle(ShenandoahGeneration* generation, GCCause::Cause cause, bool reset_old_bitmap_specially);
   void service_stw_full_cycle(GCCause::Cause cause);
   void service_stw_degenerated_cycle(GCCause::Cause cause, ShenandoahGC::ShenandoahDegenPoint point);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
new file mode 100644
index 00000000000..93442843d92
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
@@ -0,0 +1,285 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
+#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
+#include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahOldGeneration.hpp"
+#include "gc/shenandoah/shenandoahPacer.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
+#include "gc/shenandoah/shenandoahYoungGeneration.hpp"
+#include "gc/shenandoah/shenandoahUtils.hpp"
+
+class ShenandoahConcurrentEvacuator : public ObjectClosure {
+private:
+  ShenandoahGenerationalHeap* const _heap;
+  Thread* const _thread;
+public:
+  explicit ShenandoahConcurrentEvacuator(ShenandoahGenerationalHeap* heap) :
+          _heap(heap), _thread(Thread::current()) {}
+
+  void do_object(oop p) override {
+    shenandoah_assert_marked(nullptr, p);
+    if (!p->is_forwarded()) {
+      _heap->evacuate_object(p, _thread);
+    }
+  }
+};
+
+ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* heap,
+                                                                           ShenandoahRegionIterator* iterator,
+                                                                           bool concurrent) :
+  WorkerTask("Shenandoah Evacuation"),
+  _heap(heap),
+  _regions(iterator),
+  _concurrent(concurrent),
+  _tenuring_threshold(0)
+{
+  shenandoah_assert_generational();
+  _tenuring_threshold = _heap->age_census()->tenuring_threshold();
+}
+
+void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
+  if (_concurrent) {
+    ShenandoahConcurrentWorkerSession worker_session(worker_id);
+    ShenandoahSuspendibleThreadSetJoiner stsj;
+    ShenandoahEvacOOMScope oom_evac_scope;
+    do_work();
+  } else {
+    ShenandoahParallelWorkerSession worker_session(worker_id);
+    ShenandoahEvacOOMScope oom_evac_scope;
+    do_work();
+  }
+}
+
+void ShenandoahGenerationalEvacuationTask::do_work() {
+  ShenandoahConcurrentEvacuator cl(_heap);
+  ShenandoahHeapRegion* r;
+  ShenandoahMarkingContext* const ctx = _heap->marking_context();
+
+  while ((r = _regions->next()) != nullptr) {
+    log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
+            r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
+            r->is_active()? "active": "inactive",
+            r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
+            r->is_cset()? "cset": "not-cset");
+
+    if (r->is_cset()) {
+      assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
+      _heap->marked_object_iterate(r, &cl);
+      if (ShenandoahPacing) {
+        _heap->pacer()->report_evac(r->used() >> LogHeapWordSize);
+      }
+    } else if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
+      if (r->is_humongous_start()) {
+        // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
+        // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
+        // triggers the load-reference barrier (LRB) to copy on reference fetch.
+        promote_humongous(r);
+      } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
+        // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
+        // the LRB to copy on reference fetch.
+        promote_in_place(r);
+      }
+      // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
+      // more garbage than ShenandoahOldGarbageThreshold, we'll promote by evacuation.  If there is room for evacuation
+      // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
+      // by evacuation in some future GC cycle.
+
+      // If an aged regular region has received allocations during the current cycle, we do not promote because the
+      // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
+    }
+    // else, region is free, or OLD, or not in collection set, or humongous_continuation,
+    // or is young humongous_start that is too young to be promoted
+    if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
+      break;
+    }
+  }
+}
+
+// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
+// set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
+// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
+// contained herein.
+void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion* region) {
+  ShenandoahMarkingContext* const marking_context = _heap->marking_context();
+  HeapWord* const tams = marking_context->top_at_mark_start(region);
+
+  {
+    const size_t old_garbage_threshold = (ShenandoahHeapRegion::region_size_bytes() * ShenandoahOldGarbageThreshold) / 100;
+    assert(_heap->active_generation()->is_mark_complete(), "sanity");
+    assert(!_heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
+    assert(region->garbage_before_padded_for_promote() < old_garbage_threshold, "Region " SIZE_FORMAT " has too much garbage for promotion", region->index());
+    assert(region->is_young(), "Only young regions can be promoted");
+    assert(region->is_regular(), "Use different service to promote humongous regions");
+    assert(region->age() >= _heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
+    assert(region->get_top_before_promote() == tams, "Region " SIZE_FORMAT " has been used for allocations before promotion", region->index());
+  }
+
+  // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
+  // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
+  // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
+  // now and then sort out the CLEAN pages during the next remembered set scan.
+  //
+  // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
+  // then registering every live object and every coalesced range of free objects in the loop that follows.
+  _heap->card_scan()->reset_object_range(region->bottom(), region->end());
+  _heap->card_scan()->mark_range_as_dirty(region->bottom(), region->get_top_before_promote() - region->bottom());
+
+  // TODO: use an existing coalesce-and-fill function rather than replicating the code here.
+  HeapWord* obj_addr = region->bottom();
+  while (obj_addr < tams) {
+    oop obj = cast_to_oop(obj_addr);
+    if (marking_context->is_marked(obj)) {
+      assert(obj->klass() != nullptr, "klass should not be NULL");
+      // This thread is responsible for registering all objects in this region.  No need for lock.
+      _heap->card_scan()->register_object_without_lock(obj_addr);
+      obj_addr += obj->size();
+    } else {
+      HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
+      assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
+      size_t fill_size = next_marked_obj - obj_addr;
+      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
+      ShenandoahHeap::fill_with_object(obj_addr, fill_size);
+      _heap->card_scan()->register_object_without_lock(obj_addr);
+      obj_addr = next_marked_obj;
+    }
+  }
+  // We do not need to scan above TAMS because restored top equals tams
+  assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
+
+  ShenandoahOldGeneration* const old_gen = _heap->old_generation();
+  ShenandoahYoungGeneration* const young_gen = _heap->young_generation();
+
+  {
+    ShenandoahHeapLocker locker(_heap->lock());
+
+    HeapWord* update_watermark = region->get_update_watermark();
+
+    // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
+    // is_collector_free range.
+    region->restore_top_before_promote();
+
+    size_t region_used = region->used();
+
+    // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
+    assert(update_watermark >= region->top(), "original top cannot exceed preserved update_watermark");
+    region->set_update_watermark(region->top());
+
+    // Unconditionally transfer one region from young to old. This represents the newly promoted region.
+    // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
+    // if there are already enough unaffiliated regions in old to account for this newly promoted region.
+    // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
+    // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
+    // we would be trading a fully empty region for a partially used region.
+    young_gen->decrease_used(region_used);
+    young_gen->decrement_affiliated_region_count();
+
+    // transfer_to_old() increases capacity of old and decreases capacity of young
+    _heap->generation_sizer()->force_transfer_to_old(1);
+    region->set_affiliation(OLD_GENERATION);
+
+    old_gen->increment_affiliated_region_count();
+    old_gen->increase_used(region_used);
+
+    // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
+    _heap->free_set()->add_old_collector_free_region(region);
+  }
+}
+
+void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegion* region) {
+  ShenandoahMarkingContext* marking_context = _heap->marking_context();
+  oop obj = cast_to_oop(region->bottom());
+  assert(_heap->active_generation()->is_mark_complete(), "sanity");
+  assert(region->is_young(), "Only young regions can be promoted");
+  assert(region->is_humongous_start(), "Should not promote humongous continuation in isolation");
+  assert(region->age() >= _heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
+  assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
+
+  // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
+  // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
+  // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
+  // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
+  // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
+  // has carefully analyzed the required sizes of an application's young-gen and old-gen.
+  const size_t used_bytes = obj->size() * HeapWordSize;
+  const size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
+  const size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
+  const size_t index_limit = region->index() + spanned_regions;
+
+  ShenandoahGeneration* const old_generation = _heap->old_generation();
+  ShenandoahGeneration* const young_generation = _heap->young_generation();
+  {
+    // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
+    // young to old.
+    ShenandoahHeapLocker locker(_heap->lock());
+
+    // We promote humongous objects unconditionally, without checking for availability.  We adjust
+    // usage totals, including humongous waste, after evacuation is done.
+    log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, region->index(), spanned_regions);
+
+    young_generation->decrease_used(used_bytes);
+    young_generation->decrease_humongous_waste(humongous_waste);
+    young_generation->decrease_affiliated_region_count(spanned_regions);
+
+    // transfer_to_old() increases capacity of old and decreases capacity of young
+    _heap->generation_sizer()->force_transfer_to_old(spanned_regions);
+
+    // For this region and each humongous continuation region spanned by this humongous object, change
+    // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
+    // in the last humongous region that is not spanned by obj is currently not used.
+    for (size_t i = region->index(); i < index_limit; i++) {
+      ShenandoahHeapRegion* r = _heap->get_region(i);
+      log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
+              r->index(), p2i(r->bottom()), p2i(r->top()));
+      // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
+      r->set_affiliation(OLD_GENERATION);
+    }
+
+    old_generation->increase_affiliated_region_count(spanned_regions);
+    old_generation->increase_used(used_bytes);
+    old_generation->increase_humongous_waste(humongous_waste);
+  }
+
+  // Since this region may have served previously as OLD, it may hold obsolete object range info.
+  HeapWord* const humongous_bottom = region->bottom();
+  _heap->card_scan()->reset_object_range(humongous_bottom, humongous_bottom + spanned_regions * ShenandoahHeapRegion::region_size_words());
+  // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
+  _heap->card_scan()->register_object_without_lock(humongous_bottom);
+
+  if (obj->is_typeArray()) {
+    // Primitive arrays don't need to be scanned.
+    log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
+            region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
+    _heap->card_scan()->mark_range_as_clean(humongous_bottom, obj->size());
+  } else {
+    log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
+            region->index(), p2i(humongous_bottom), p2i(humongous_bottom + obj->size()));
+    _heap->card_scan()->mark_range_as_dirty(humongous_bottom, obj->size());
+  }
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
new file mode 100644
index 00000000000..1b8de87231d
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALEVACUATIONTASK_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALEVACUATIONTASK_HPP
+
+#include "gc/shared/workerThread.hpp"
+
+class ShenandoahGenerationalHeap;
+class ShenandoahHeapRegion;
+class ShenandoahRegionIterator;
+
+// Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
+// This is needed in order to promote humongous start regions if age() >= tenure threshold.
+class ShenandoahGenerationalEvacuationTask : public WorkerTask {
+private:
+  ShenandoahGenerationalHeap* const _heap;
+  ShenandoahRegionIterator* _regions;
+  bool _concurrent;
+  uint _tenuring_threshold;
+
+public:
+  ShenandoahGenerationalEvacuationTask(ShenandoahGenerationalHeap* sh,
+                                       ShenandoahRegionIterator* iterator,
+                                       bool concurrent);
+  void work(uint worker_id) override;
+private:
+  void do_work();
+
+  void promote_in_place(ShenandoahHeapRegion* region);
+  void promote_humongous(ShenandoahHeapRegion* region);
+};
+
+#endif //SHARE_GC_SHENANDOAH_SHENANDOAHGENERATIONALEVACUATIONTASK_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
index c8188cbb387..00476afde0b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
@@ -25,16 +25,22 @@
 #include "precompiled.hpp"
 
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
+#include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
+#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahHeapRegion.hpp"
 #include "gc/shenandoah/shenandoahInitLogger.hpp"
 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahRegulatorThread.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
-
+#include "gc/shenandoah/shenandoahUtils.hpp"
 #include "logging/log.hpp"
 
+
 class ShenandoahGenerationalInitLogger : public ShenandoahInitLogger {
 public:
   static void print() {
@@ -72,22 +78,29 @@ ShenandoahGenerationalHeap* ShenandoahGenerationalHeap::heap() {
   return checked_cast<ShenandoahGenerationalHeap*>(heap);
 }
 
-size_t ShenandoahGenerationalHeap::calculate_min_plab() const {
+size_t ShenandoahGenerationalHeap::calculate_min_plab() {
   return align_up(PLAB::min_size(), CardTable::card_size_in_words());
 }
 
-size_t ShenandoahGenerationalHeap::calculate_max_plab() const {
+size_t ShenandoahGenerationalHeap::calculate_max_plab() {
   size_t MaxTLABSizeWords = ShenandoahHeapRegion::max_tlab_size_words();
   return ((ShenandoahMaxEvacLABRatio > 0)?
           align_down(MIN2(MaxTLABSizeWords, PLAB::min_size() * ShenandoahMaxEvacLABRatio), CardTable::card_size_in_words()):
           align_down(MaxTLABSizeWords, CardTable::card_size_in_words()));
 }
 
+// Returns size in bytes
+size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
+  return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
+}
+
 ShenandoahGenerationalHeap::ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy) :
   ShenandoahHeap(policy),
   _min_plab_size(calculate_min_plab()),
   _max_plab_size(calculate_max_plab()),
-  _regulator_thread(nullptr) {
+  _regulator_thread(nullptr),
+  _young_gen_memory_pool(nullptr),
+  _old_gen_memory_pool(nullptr) {
   assert(is_aligned(_min_plab_size, CardTable::card_size_in_words()), "min_plab_size must be aligned");
   assert(is_aligned(_max_plab_size, CardTable::card_size_in_words()), "max_plab_size must be aligned");
 }
@@ -133,6 +146,395 @@ void ShenandoahGenerationalHeap::stop() {
   ShenandoahHeap::stop();
 }
 
+oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
+  assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
+  if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
+    // This thread went through the OOM during evac protocol and it is safe to return
+    // the forward pointer. It must not attempt to evacuate anymore.
+    return ShenandoahBarrierSet::resolve_forwarded(p);
+  }
+
+  assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
+
+  ShenandoahHeapRegion* r = heap_region_containing(p);
+  assert(!r->is_humongous(), "never evacuate humongous objects");
+
+  ShenandoahAffiliation target_gen = r->affiliation();
+  if (active_generation()->is_young() && target_gen == YOUNG_GENERATION) {
+    markWord mark = p->mark();
+    if (mark.is_marked()) {
+      // Already forwarded.
+      return ShenandoahBarrierSet::resolve_forwarded(p);
+    }
+
+    if (mark.has_displaced_mark_helper()) {
+      // We don't want to deal with MT here just to ensure we read the right mark word.
+      // Skip the potential promotion attempt for this one.
+    } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
+      oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
+      if (result != nullptr) {
+        return result;
+      }
+      // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
+    }
+  }
+  return try_evacuate_object(p, thread, r, target_gen);
+}
+
+// try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
+// to OLD_GENERATION.
+oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
+                                        ShenandoahAffiliation target_gen) {
+  bool alloc_from_lab = true;
+  bool has_plab = false;
+  HeapWord* copy = nullptr;
+  size_t size = p->size();
+  bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
+
+#ifdef ASSERT
+  if (ShenandoahOOMDuringEvacALot &&
+      (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
+    copy = nullptr;
+  } else {
+#endif
+    if (UseTLAB) {
+      switch (target_gen) {
+        case YOUNG_GENERATION: {
+          copy = allocate_from_gclab(thread, size);
+          if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
+            // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
+            // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
+            ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
+            copy = allocate_from_gclab(thread, size);
+            // If we still get nullptr, we'll try a shared allocation below.
+          }
+          break;
+        }
+        case OLD_GENERATION: {
+          assert(mode()->is_generational(), "OLD Generation only exists in generational mode");
+          PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+          if (plab != nullptr) {
+            has_plab = true;
+          }
+          copy = allocate_from_plab(thread, size, is_promotion);
+          if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
+              ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
+            // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
+            // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
+            // where abundance is defined as >= ShenGenHeap::plab_min_size().  In the former case, we try resetting the desired
+            // PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
+
+            // In this situation, PLAB memory is precious.  We'll try to preserve our existing PLAB by forcing
+            // this particular allocation to be shared.
+            if (plab->words_remaining() < plab_min_size()) {
+              ShenandoahThreadLocalData::set_plab_size(thread, plab_min_size());
+              copy = allocate_from_plab(thread, size, is_promotion);
+              // If we still get nullptr, we'll try a shared allocation below.
+              if (copy == nullptr) {
+                // If retry fails, don't continue to retry until we have success (probably in next GC pass)
+                ShenandoahThreadLocalData::disable_plab_retries(thread);
+              }
+            }
+            // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
+          }
+          break;
+        }
+        default: {
+          ShouldNotReachHere();
+          break;
+        }
+      }
+    }
+
+    if (copy == nullptr) {
+      // If we failed to allocate in LAB, we'll try a shared allocation.
+      if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
+        ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen, is_promotion);
+        copy = allocate_memory(req);
+        alloc_from_lab = false;
+      }
+      // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
+      // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
+      // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
+      // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
+    }
+#ifdef ASSERT
+  }
+#endif
+
+  if (copy == nullptr) {
+    if (target_gen == OLD_GENERATION) {
+      if (from_region->is_young()) {
+        // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
+        old_generation()->handle_failed_promotion(thread, size);
+        return nullptr;
+      } else {
+        // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
+        // after the evacuation threads have finished.
+        old_generation()->handle_failed_evacuation();
+      }
+    }
+
+    control_thread()->handle_alloc_failure_evac(size);
+
+    oom_evac_handler()->handle_out_of_memory_during_evacuation();
+
+    return ShenandoahBarrierSet::resolve_forwarded(p);
+  }
+
+  // Copy the object:
+  evac_tracker()->begin_evacuation(thread, size * HeapWordSize);
+  Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
+
+  oop copy_val = cast_to_oop(copy);
+
+  if (target_gen == YOUNG_GENERATION && is_aging_cycle()) {
+    ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
+  }
+
+  // Try to install the new forwarding pointer.
+  ContinuationGCSupport::relativize_stack_chunk(copy_val);
+
+  oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
+  if (result == copy_val) {
+    // Successfully evacuated. Our copy is now the public one!
+    evac_tracker()->end_evacuation(thread, size * HeapWordSize);
+    if (target_gen == OLD_GENERATION) {
+      old_generation()->handle_evacuation(copy, size, from_region->is_young());
+    } else {
+      // When copying to the old generation above, we don't care
+      // about recording object age in the census stats.
+      assert(target_gen == YOUNG_GENERATION, "Error");
+      // We record this census only when simulating pre-adaptive tenuring behavior, or
+      // when we have been asked to record the census at evacuation rather than at mark
+      if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
+        evac_tracker()->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
+      }
+    }
+    shenandoah_assert_correct(nullptr, copy_val);
+    return copy_val;
+  }  else {
+    // Failed to evacuate. We need to deal with the object that is left behind. Since this
+    // new allocation is certainly after TAMS, it will be considered live in the next cycle.
+    // But if it happens to contain references to evacuated regions, those references would
+    // not get updated for this stale copy during this cycle, and we will crash while scanning
+    // it the next cycle.
+    if (alloc_from_lab) {
+      // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
+      // object will overwrite this stale copy, or the filler object on LAB retirement will
+      // do this.
+      switch (target_gen) {
+        case YOUNG_GENERATION: {
+          ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
+          break;
+        }
+        case OLD_GENERATION: {
+          ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
+          if (is_promotion) {
+            ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
+          } else {
+            ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize);
+          }
+          break;
+        }
+        default: {
+          ShouldNotReachHere();
+          break;
+        }
+      }
+    } else {
+      // For non-LAB allocations, we have no way to retract the allocation, and
+      // have to explicitly overwrite the copy with the filler object. With that overwrite,
+      // we have to keep the fwdptr initialized and pointing to our (stale) copy.
+      assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
+      fill_with_object(copy, size);
+      shenandoah_assert_correct(nullptr, copy_val);
+      // For non-LAB allocations, the object has already been registered
+    }
+    shenandoah_assert_correct(nullptr, result);
+    return result;
+  }
+}
+
+inline HeapWord* ShenandoahGenerationalHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
+  assert(UseTLAB, "TLABs should be enabled");
+
+  PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+  HeapWord* obj;
+
+  if (plab == nullptr) {
+    assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
+    // No PLABs in this thread, fallback to shared allocation
+    return nullptr;
+  } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
+    return nullptr;
+  }
+  // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
+  obj = plab->allocate(size);
+  if ((obj == nullptr) && (plab->words_remaining() < plab_min_size())) {
+    // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
+    obj = allocate_from_plab_slow(thread, size, is_promotion);
+  }
+  // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
+  if (obj == nullptr) {
+    return nullptr;
+  }
+
+  if (is_promotion) {
+    ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
+  } else {
+    ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize);
+  }
+  return obj;
+}
+
+// Establish a new PLAB and allocate size HeapWords within it.
+HeapWord* ShenandoahGenerationalHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
+  // New object should fit the PLAB size
+
+  assert(mode()->is_generational(), "PLABs only relevant to generational GC");
+  const size_t plab_min_size = this->plab_min_size();
+  const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
+
+  // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.  PLABs must align on size
+  // of card table in order to avoid the need for synchronization when registering newly allocated objects within
+  // the card table.
+  size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
+  if (cur_size == 0) {
+    cur_size = plab_min_size;
+  }
+
+  // Limit growth of PLABs to the smaller of ShenandoahMaxEvacLABRatio * the minimum size and ShenandoahHumongousThreshold.
+  // This minimum value is represented by generational_heap->plab_max_size().  Enforcing this limit enables more equitable
+  // distribution of available evacuation budget between the many threads that are coordinating in the evacuation effort.
+  size_t future_size = MIN2(cur_size * 2, plab_max_size());
+  assert(is_aligned(future_size, CardTable::card_size_in_words()), "Align by design, future_size: " SIZE_FORMAT
+          ", alignment: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT,
+         future_size, (size_t) CardTable::card_size_in_words(), cur_size, plab_max_size());
+
+  // Record new heuristic value even if we take any shortcut. This captures
+  // the case when moderately-sized objects always take a shortcut. At some point,
+  // heuristics should catch up with them.  Note that the requested cur_size may
+  // not be honored, but we remember that this is the preferred size.
+  ShenandoahThreadLocalData::set_plab_size(thread, future_size);
+  if (cur_size < size) {
+    // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
+    // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
+    return nullptr;
+  }
+
+  // Retire current PLAB, and allocate a new one.
+  PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+  if (plab->words_remaining() < plab_min_size) {
+    // Retire current PLAB, and allocate a new one.
+    // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
+    // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
+    // aligned with the start of a card's memory range.
+    retire_plab(plab, thread);
+
+    size_t actual_size = 0;
+    // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
+    // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
+    HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
+    if (plab_buf == nullptr) {
+      if (min_size == plab_min_size) {
+        // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size.  This allows us
+        // to fail faster on subsequent promotion attempts.
+        ShenandoahThreadLocalData::disable_plab_promotions(thread);
+      }
+      return nullptr;
+    } else {
+      ShenandoahThreadLocalData::enable_plab_retries(thread);
+    }
+    // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
+    if (ZeroTLAB) {
+      // ... and clear it.
+      Copy::zero_to_words(plab_buf, actual_size);
+    } else {
+      // ...and zap just allocated object.
+#ifdef ASSERT
+      // Skip mangling the space corresponding to the object header to
+      // ensure that the returned space is not considered parsable by
+      // any concurrent GC thread.
+      size_t hdr_size = oopDesc::header_size();
+      Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
+#endif // ASSERT
+    }
+    assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
+    plab->set_buf(plab_buf, actual_size);
+    if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
+      return nullptr;
+    }
+    return plab->allocate(size);
+  } else {
+    // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
+    // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
+    // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
+    // reduce the likelihood of evacuation failures, and we reduce the need for downsizing our PLABs.
+    return nullptr;
+  }
+}
+
+HeapWord* ShenandoahGenerationalHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
+  // Align requested sizes to card-sized multiples.  Align down so that we don't violate max size of TLAB.
+  assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
+  assert(word_size >= min_size, "Requested PLAB is too small");
+
+  ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
+  // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
+  // if we are at risk of infringing on the old-gen evacuation budget.
+  HeapWord* res = allocate_memory(req);
+  if (res != nullptr) {
+    *actual_size = req.actual_size();
+  } else {
+    *actual_size = 0;
+  }
+  assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
+  return res;
+}
+
+// TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
+// this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
+// would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
+// this object registration loop can be performed without acquiring a lock.
+void ShenandoahGenerationalHeap::retire_plab(PLAB* plab, Thread* thread) {
+  // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
+  // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
+  // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
+  // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
+
+  // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
+  // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
+  //  1. Some of the plab may have been dedicated to evacuations.
+  //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
+  size_t not_promoted =
+          ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
+  ShenandoahThreadLocalData::reset_plab_promoted(thread);
+  ShenandoahThreadLocalData::reset_plab_evacuated(thread);
+  ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
+  if (not_promoted > 0) {
+    old_generation()->unexpend_promoted(not_promoted);
+  }
+  const size_t original_waste = plab->waste();
+  HeapWord* const top = plab->top();
+
+  // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
+  // It adds the size of this unused memory, in words, to plab->waste().
+  plab->retire();
+  if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
+    // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
+    // safely walk the region backing the plab.
+    log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
+                  plab->waste() - original_waste, p2i(top));
+    card_scan()->register_object_without_lock(top);
+  }
+}
+
+void ShenandoahGenerationalHeap::retire_plab(PLAB* plab) {
+  Thread* thread = Thread::current();
+  retire_plab(plab, thread);
+}
+
 ShenandoahGenerationalHeap::TransferResult ShenandoahGenerationalHeap::balance_generations() {
   shenandoah_assert_heaplocked_or_safepoint();
 
@@ -201,13 +603,11 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t old_xfer_
 
   // Decide how much old space we should reserve for a mixed collection
   size_t reserve_for_mixed = 0;
-  const size_t mixed_candidates = old_heuristics()->unprocessed_old_collection_candidates();
-  const bool doing_mixed = (mixed_candidates > 0);
-  if (doing_mixed) {
+  if (old_generation()->has_unprocessed_collection_candidates()) {
     // We want this much memory to be unfragmented in order to reliably evacuate old.  This is conservative because we
     // may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
     const size_t max_evac_need = (size_t)
-            (old_heuristics()->unprocessed_old_collection_candidates_live_memory() * ShenandoahOldEvacWaste);
+            (old_generation()->unprocessed_collection_candidates_live_memory() * ShenandoahOldEvacWaste);
     assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
            "Unaffiliated available must be less than total available");
     const size_t old_fragmented_available =
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
index df88f843adc..98693a7f90c 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.hpp
@@ -31,24 +31,37 @@ class ShenandoahRegulatorThread;
 class ShenandoahGenerationalControlThread;
 
 class ShenandoahGenerationalHeap : public ShenandoahHeap {
-private:
-  const size_t _min_plab_size;
-  const size_t _max_plab_size;
-
-  size_t calculate_min_plab() const;
-  size_t calculate_max_plab() const;
-
 public:
   explicit ShenandoahGenerationalHeap(ShenandoahCollectorPolicy* policy);
 
-
   static ShenandoahGenerationalHeap* heap();
 
-  inline size_t plab_min_size() const { return _min_plab_size; }
-  inline size_t plab_max_size() const { return _max_plab_size; }
-
   void print_init_logger() const override;
+  size_t unsafe_max_tlab_alloc(Thread *thread) const override;
+
+  // ---------- Evacuations and Promotions
+  //
+  oop evacuate_object(oop p, Thread* thread) override;
+  oop try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
 
+  size_t plab_min_size() const { return _min_plab_size; }
+  size_t plab_max_size() const { return _max_plab_size; }
+
+  void retire_plab(PLAB* plab);
+  void retire_plab(PLAB* plab, Thread* thread);
+
+private:
+  HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion);
+  HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion);
+  HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size);
+
+  const size_t _min_plab_size;
+  const size_t _max_plab_size;
+
+  static size_t calculate_min_plab();
+  static size_t calculate_max_plab();
+
+public:
   // ---------- Serviceability
   //
   void initialize_serviceability() override;
@@ -60,7 +73,7 @@ class ShenandoahGenerationalHeap : public ShenandoahHeap {
 
   void stop() override;
 
-  // Used for logging the result of a region transfer outside of the heap lock
+  // Used for logging the result of a region transfer outside the heap lock
   struct TransferResult {
     bool success;
     size_t region_count;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 77834cb0736..83682ba5beb 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -47,10 +47,10 @@
 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
-#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 #include "gc/shenandoah/shenandoahControlThread.hpp"
 #include "gc/shenandoah/shenandoahFreeSet.hpp"
+#include "gc/shenandoah/shenandoahGenerationalEvacuationTask.hpp"
 #include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 #include "gc/shenandoah/shenandoahGlobalGeneration.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
@@ -693,26 +693,6 @@ ShenandoahHeuristics* ShenandoahHeap::heuristics() {
   return _global_generation->heuristics();
 }
 
-ShenandoahOldHeuristics* ShenandoahHeap::old_heuristics() {
-  return (ShenandoahOldHeuristics*) _old_generation->heuristics();
-}
-
-ShenandoahYoungHeuristics* ShenandoahHeap::young_heuristics() {
-  return (ShenandoahYoungHeuristics*) _young_generation->heuristics();
-}
-
-bool ShenandoahHeap::doing_mixed_evacuations() {
-  return _old_generation->state() == ShenandoahOldGeneration::EVACUATING;
-}
-
-bool ShenandoahHeap::is_old_bitmap_stable() const {
-  return _old_generation->is_mark_complete();
-}
-
-bool ShenandoahHeap::is_gc_generation_young() const {
-  return _gc_generation != nullptr && _gc_generation->is_young();
-}
-
 size_t ShenandoahHeap::used() const {
   return global_generation()->used();
 }
@@ -992,154 +972,23 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size)
   return gclab->allocate(size);
 }
 
-// Establish a new PLAB and allocate size HeapWords within it.
-HeapWord* ShenandoahHeap::allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion) {
-  // New object should fit the PLAB size
-
-  assert(mode()->is_generational(), "PLABs only relevant to generational GC");
-  ShenandoahGenerationalHeap* generational_heap = (ShenandoahGenerationalHeap*) this;
-  const size_t plab_min_size = generational_heap->plab_min_size();
-  const size_t min_size = (size > plab_min_size)? align_up(size, CardTable::card_size_in_words()): plab_min_size;
-
-  // Figure out size of new PLAB, looking back at heuristics. Expand aggressively.  PLABs must align on size
-  // of card table in order to avoid the need for synchronization when registering newly allocated objects within
-  // the card table.
-  size_t cur_size = ShenandoahThreadLocalData::plab_size(thread);
-  if (cur_size == 0) {
-    cur_size = plab_min_size;
-  }
-
-  // Limit growth of PLABs to the smaller of ShenandoahMaxEvacLABRatio * the minimum size and ShenandoahHumongousThreshold.
-  // This minimum value is represented by generational_heap->plab_max_size().  Enforcing this limit enables more equitable
-  // distribution of available evacuation budget between the many threads that are coordinating in the evacuation effort.
-  size_t future_size = MIN2(cur_size * 2, generational_heap->plab_max_size());
-  assert(is_aligned(future_size, CardTable::card_size_in_words()), "Align by design, future_size: " SIZE_FORMAT
-         ", alignment: " SIZE_FORMAT ", cur_size: " SIZE_FORMAT ", max: " SIZE_FORMAT,
-         future_size, (size_t) CardTable::card_size_in_words(), cur_size, generational_heap->plab_max_size());
-
-  // Record new heuristic value even if we take any shortcut. This captures
-  // the case when moderately-sized objects always take a shortcut. At some point,
-  // heuristics should catch up with them.  Note that the requested cur_size may
-  // not be honored, but we remember that this is the preferred size.
-  ShenandoahThreadLocalData::set_plab_size(thread, future_size);
-  if (cur_size < size) {
-    // The PLAB to be allocated is still not large enough to hold the object. Fall back to shared allocation.
-    // This avoids retiring perfectly good PLABs in order to represent a single large object allocation.
-    return nullptr;
-  }
-
-  // Retire current PLAB, and allocate a new one.
-  PLAB* plab = ShenandoahThreadLocalData::plab(thread);
-  if (plab->words_remaining() < plab_min_size) {
-    // Retire current PLAB, and allocate a new one.
-    // CAUTION: retire_plab may register the remnant filler object with the remembered set scanner without a lock.  This
-    // is safe iff it is assured that each PLAB is a whole-number multiple of card-mark memory size and each PLAB is
-    // aligned with the start of a card's memory range.
-    retire_plab(plab, thread);
-
-    size_t actual_size = 0;
-    // allocate_new_plab resets plab_evacuated and plab_promoted and disables promotions if old-gen available is
-    // less than the remaining evacuation need.  It also adjusts plab_preallocated and expend_promoted if appropriate.
-    HeapWord* plab_buf = allocate_new_plab(min_size, cur_size, &actual_size);
-    if (plab_buf == nullptr) {
-      if (min_size == plab_min_size) {
-        // Disable plab promotions for this thread because we cannot even allocate a plab of minimal size.  This allows us
-        // to fail faster on subsequent promotion attempts.
-        ShenandoahThreadLocalData::disable_plab_promotions(thread);
-      }
-      return NULL;
-    } else {
-      ShenandoahThreadLocalData::enable_plab_retries(thread);
-    }
-    // Since the allocated PLAB may have been down-sized for alignment, plab->allocate(size) below may still fail.
-    if (ZeroTLAB) {
-      // ..and clear it.
-      Copy::zero_to_words(plab_buf, actual_size);
-    } else {
-      // ...and zap just allocated object.
-#ifdef ASSERT
-      // Skip mangling the space corresponding to the object header to
-      // ensure that the returned space is not considered parsable by
-      // any concurrent GC thread.
-      size_t hdr_size = oopDesc::header_size();
-      Copy::fill_to_words(plab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
-#endif // ASSERT
-    }
-    assert(is_aligned(actual_size, CardTable::card_size_in_words()), "Align by design");
-    plab->set_buf(plab_buf, actual_size);
-    if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
-      return nullptr;
-    }
-    return plab->allocate(size);
-  } else {
-    // If there's still at least min_size() words available within the current plab, don't retire it.  Let's gnaw
-    // away on this plab as long as we can.  Meanwhile, return nullptr to force this particular allocation request
-    // to be satisfied with a shared allocation.  By packing more promotions into the previously allocated PLAB, we
-    // reduce the likelihood of evacuation failures, and we we reduce the need for downsizing our PLABs.
-    return nullptr;
-  }
-}
-
-// TODO: It is probably most efficient to register all objects (both promotions and evacuations) that were allocated within
-// this plab at the time we retire the plab.  A tight registration loop will run within both code and data caches.  This change
-// would allow smaller and faster in-line implementation of alloc_from_plab().  Since plabs are aligned on card-table boundaries,
-// this object registration loop can be performed without acquiring a lock.
-void ShenandoahHeap::retire_plab(PLAB* plab, Thread* thread) {
-  // We don't enforce limits on plab_evacuated.  We let it consume all available old-gen memory in order to reduce
-  // probability of an evacuation failure.  We do enforce limits on promotion, to make sure that excessive promotion
-  // does not result in an old-gen evacuation failure.  Note that a failed promotion is relatively harmless.  Any
-  // object that fails to promote in the current cycle will be eligible for promotion in a subsequent cycle.
-
-  // When the plab was instantiated, its entirety was treated as if the entire buffer was going to be dedicated to
-  // promotions.  Now that we are retiring the buffer, we adjust for the reality that the plab is not entirely promotions.
-  //  1. Some of the plab may have been dedicated to evacuations.
-  //  2. Some of the plab may have been abandoned due to waste (at the end of the plab).
-  size_t not_promoted =
-    ShenandoahThreadLocalData::get_plab_preallocated_promoted(thread) - ShenandoahThreadLocalData::get_plab_promoted(thread);
-  ShenandoahThreadLocalData::reset_plab_promoted(thread);
-  ShenandoahThreadLocalData::reset_plab_evacuated(thread);
-  ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
-  if (not_promoted > 0) {
-    old_generation()->unexpend_promoted(not_promoted);
-  }
-  const size_t original_waste = plab->waste();
-  HeapWord* const top = plab->top();
-
-  // plab->retire() overwrites unused memory between plab->top() and plab->hard_end() with a dummy object to make memory parsable.
-  // It adds the size of this unused memory, in words, to plab->waste().
-  plab->retire();
-  if (top != nullptr && plab->waste() > original_waste && is_in_old(top)) {
-    // If retiring the plab created a filler object, then we need to register it with our card scanner so it can
-    // safely walk the region backing the plab.
-    log_debug(gc)("retire_plab() is registering remnant of size " SIZE_FORMAT " at " PTR_FORMAT,
-                  plab->waste() - original_waste, p2i(top));
-    card_scan()->register_object_without_lock(top);
-  }
-}
-
-void ShenandoahHeap::retire_plab(PLAB* plab) {
-  Thread* thread = Thread::current();
-  retire_plab(plab, thread);
-}
-
 void ShenandoahHeap::cancel_old_gc() {
   shenandoah_assert_safepoint();
-  assert(_old_generation != nullptr, "Should only have mixed collections in generation mode.");
-  if (_old_generation->state() == ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP) {
-    assert(!old_generation()->is_concurrent_mark_in_progress(), "Cannot be marking in IDLE");
-    assert(!old_heuristics()->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE");
-    assert(!old_heuristics()->unprocessed_old_collection_candidates(), "Cannot have mixed collection candidates in IDLE");
-    assert(!young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE");
+  assert(old_generation() != nullptr, "Should only have mixed collections in generation mode.");
+  if (old_generation()->state() == ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP) {
+#ifdef ASSERT
+    old_generation()->validate_waiting_for_bootstrap();
+#endif
   } else {
     log_info(gc)("Terminating old gc cycle.");
     // Stop marking
     old_generation()->cancel_marking();
     // Stop tracking old regions
-    old_heuristics()->abandon_collection_candidates();
+    old_generation()->abandon_collection_candidates();
     // Remove old generation access to young generation mark queues
     young_generation()->set_old_gen_task_queues(nullptr);
     // Transition to IDLE now.
-    _old_generation->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
+    old_generation()->transition_to(ShenandoahOldGeneration::WAITING_FOR_BOOTSTRAP);
   }
 }
 
@@ -1148,7 +997,7 @@ HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
                                             size_t requested_size,
                                             size_t* actual_size) {
   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
-  HeapWord* res = allocate_memory(req, false);
+  HeapWord* res = allocate_memory(req);
   if (res != nullptr) {
     *actual_size = req.actual_size();
   } else {
@@ -1161,7 +1010,7 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
                                              size_t word_size,
                                              size_t* actual_size) {
   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
-  HeapWord* res = allocate_memory(req, false);
+  HeapWord* res = allocate_memory(req);
   if (res != nullptr) {
     *actual_size = req.actual_size();
   } else {
@@ -1170,27 +1019,10 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
   return res;
 }
 
-HeapWord* ShenandoahHeap::allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size) {
-  // Align requested sizes to card-sized multiples.  Align down so that we don't violate max size of TLAB.
-  assert(is_aligned(min_size, CardTable::card_size_in_words()), "Align by design");
-  assert(word_size >= min_size, "Requested PLAB is too small");
-
-  ShenandoahAllocRequest req = ShenandoahAllocRequest::for_plab(min_size, word_size);
-  // Note that allocate_memory() sets a thread-local flag to prohibit further promotions by this thread
-  // if we are at risk of infringing on the old-gen evacuation budget.
-  HeapWord* res = allocate_memory(req, false);
-  if (res != nullptr) {
-    *actual_size = req.actual_size();
-  } else {
-    *actual_size = 0;
-  }
-  assert(is_aligned(res, CardTable::card_size_in_words()), "Align by design");
-  return res;
-}
 
 // is_promotion is true iff this allocation is known for sure to hold the result of young-gen evacuation
 // to old-gen.  plab allocates are not known as such, since they may hold old-gen evacuations.
-HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_promotion) {
+HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
   intptr_t pacer_epoch = 0;
   bool in_new_region = false;
   HeapWord* result = nullptr;
@@ -1202,7 +1034,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_p
     }
 
     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
-      result = allocate_memory_under_lock(req, in_new_region, is_promotion);
+      result = allocate_memory_under_lock(req, in_new_region);
     }
 
     // Check that gc overhead is not exceeded.
@@ -1228,7 +1060,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_p
     while (result == nullptr
         && (get_gc_no_progress_count() == 0 || original_count == shenandoah_policy()->full_gc_count())) {
       control_thread()->handle_alloc_failure(req, true);
-      result = allocate_memory_under_lock(req, in_new_region, is_promotion);
+      result = allocate_memory_under_lock(req, in_new_region);
     }
 
     if (log_is_enabled(Debug, gc, alloc)) {
@@ -1239,7 +1071,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_p
 
   } else {
     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
-    result = allocate_memory_under_lock(req, in_new_region, is_promotion);
+    result = allocate_memory_under_lock(req, in_new_region);
     // Do not call handle_alloc_failure() here, because we cannot block.
     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
   }
@@ -1277,7 +1109,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req, bool is_p
   return result;
 }
 
-HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region, bool is_promotion) {
+HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
   bool try_smaller_lab_size = false;
   size_t smaller_lab_size;
   {
@@ -1329,7 +1161,7 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
             promotion_avail = promotion_avail - (promotion_expended + requested_bytes);
             promotion_eligible = true;
           }
-        } else if (is_promotion) {
+        } else if (req.is_promotion()) {
           // This is a shared alloc for promotion
           size_t promotion_avail = old_generation()->get_promoted_reserve();
           size_t promotion_expended = old_generation()->get_promoted_expended();
@@ -1380,7 +1212,7 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
               ShenandoahThreadLocalData::disable_plab_promotions(thread);
               ShenandoahThreadLocalData::set_plab_preallocated_promoted(thread, 0);
             }
-          } else if (is_promotion) {
+          } else if (req.is_promotion()) {
             // Shared promotion.  Assume size is requested_bytes.
             old_generation()->expend_promoted(requested_bytes);
           }
@@ -1404,7 +1236,7 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
         // The thread allocating b and the thread allocating c can "race" in various ways, resulting in confusion, such as
         // last-start representing object b while first-start represents object c.  This is why we need to require all
         // register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
-        ShenandoahHeap::heap()->card_scan()->register_object(result);
+        card_scan()->register_object(result);
       }
     } else {
       // The allocation failed.  If this was a plab allocation, We've already retired it and no longer have a plab.
@@ -1457,7 +1289,7 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
   // Note that shrinking the preferred size gets us past the gatekeeper that checks whether there's available memory to
   // satisfy the allocation request.  The reality is the actual TLAB size is likely to be even smaller, because it will
   // depend on how much memory is available within mutator regions that are not yet fully used.
-  HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region, is_promotion);
+  HeapWord* result = allocate_memory_under_lock(smaller_req, in_new_region);
   if (result != nullptr) {
     req.set_actual_size(smaller_req.actual_size());
   }
@@ -1467,7 +1299,7 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
                                         bool*  gc_overhead_limit_was_exceeded) {
   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
-  return allocate_memory(req, false);
+  return allocate_memory(req);
 }
 
 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
@@ -1556,102 +1388,11 @@ class ShenandoahEvacuationTask : public WorkerTask {
     ShenandoahHeapRegion* r;
     while ((r =_cs->claim_next()) != nullptr) {
       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
-
       _sh->marked_object_iterate(r, &cl);
 
       if (ShenandoahPacing) {
         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
       }
-      if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
-        break;
-      }
-    }
-  }
-};
-
-// Unlike ShenandoahEvacuationTask, this iterates over all regions rather than just the collection set.
-// This is needed in order to promote humongous start regions if age() >= tenure threshold.
-class ShenandoahGenerationalEvacuationTask : public WorkerTask {
-private:
-  ShenandoahHeap* const _sh;
-  ShenandoahRegionIterator *_regions;
-  bool _concurrent;
-  uint _tenuring_threshold;
-
-public:
-  ShenandoahGenerationalEvacuationTask(ShenandoahHeap* sh,
-                                       ShenandoahRegionIterator* iterator,
-                                       bool concurrent) :
-    WorkerTask("Shenandoah Evacuation"),
-    _sh(sh),
-    _regions(iterator),
-    _concurrent(concurrent),
-    _tenuring_threshold(0)
-  {
-    if (_sh->mode()->is_generational()) {
-      _tenuring_threshold = _sh->age_census()->tenuring_threshold();
-    }
-  }
-
-  void work(uint worker_id) {
-    if (_concurrent) {
-      ShenandoahConcurrentWorkerSession worker_session(worker_id);
-      ShenandoahSuspendibleThreadSetJoiner stsj;
-      ShenandoahEvacOOMScope oom_evac_scope;
-      do_work();
-    } else {
-      ShenandoahParallelWorkerSession worker_session(worker_id);
-      ShenandoahEvacOOMScope oom_evac_scope;
-      do_work();
-    }
-  }
-
-private:
-  void do_work() {
-    ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
-    ShenandoahHeapRegion* r;
-    ShenandoahMarkingContext* const ctx = ShenandoahHeap::heap()->marking_context();
-    size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
-    size_t old_garbage_threshold = (region_size_bytes * ShenandoahOldGarbageThreshold) / 100;
-    while ((r = _regions->next()) != nullptr) {
-      log_debug(gc)("GenerationalEvacuationTask do_work(), looking at %s region " SIZE_FORMAT ", (age: %d) [%s, %s, %s]",
-                    r->is_old()? "old": r->is_young()? "young": "free", r->index(), r->age(),
-                    r->is_active()? "active": "inactive",
-                    r->is_humongous()? (r->is_humongous_start()? "humongous_start": "humongous_continuation"): "regular",
-                    r->is_cset()? "cset": "not-cset");
-
-      if (r->is_cset()) {
-        assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
-        _sh->marked_object_iterate(r, &cl);
-        if (ShenandoahPacing) {
-          _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
-        }
-      } else if (r->is_young() && r->is_active() && (r->age() >= _tenuring_threshold)) {
-        HeapWord* tams = ctx->top_at_mark_start(r);
-        if (r->is_humongous_start()) {
-          // We promote humongous_start regions along with their affiliated continuations during evacuation rather than
-          // doing this work during a safepoint.  We cannot put humongous regions into the collection set because that
-          // triggers the load-reference barrier (LRB) to copy on reference fetch.
-          r->promote_humongous();
-        } else if (r->is_regular() && (r->get_top_before_promote() != nullptr)) {
-          assert(r->garbage_before_padded_for_promote() < old_garbage_threshold,
-                 "Region " SIZE_FORMAT " has too much garbage for promotion", r->index());
-          assert(r->get_top_before_promote() == tams,
-                 "Region " SIZE_FORMAT " has been used for allocations before promotion", r->index());
-          // Likewise, we cannot put promote-in-place regions into the collection set because that would also trigger
-          // the LRB to copy on reference fetch.
-          r->promote_in_place();
-        }
-        // Aged humongous continuation regions are handled with their start region.  If an aged regular region has
-        // more garbage than ShenandoahOldGarbageTrheshold, we'll promote by evacuation.  If there is room for evacuation
-        // in this cycle, the region will be in the collection set.  If there is not room, the region will be promoted
-        // by evacuation in some future GC cycle.
-
-        // If an aged regular region has received allocations during the current cycle, we do not promote because the
-        // newly allocated objects do not have appropriate age; this region's age will be reset to zero at end of cycle.
-      }
-      // else, region is free, or OLD, or not in collection set, or humongous_continuation,
-      // or is young humongous_start that is too young to be promoted
 
       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
         break;
@@ -1661,9 +1402,9 @@ class ShenandoahGenerationalEvacuationTask : public WorkerTask {
 };
 
 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
-  if (ShenandoahHeap::heap()->mode()->is_generational()) {
+  if (mode()->is_generational()) {
     ShenandoahRegionIterator regions;
-    ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent);
+    ShenandoahGenerationalEvacuationTask task(ShenandoahGenerationalHeap::heap(), &regions, concurrent);
     workers()->run_task(&task);
   } else {
     ShenandoahEvacuationTask task(this, _collection_set, concurrent);
@@ -1671,15 +1412,30 @@ void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
   }
 }
 
-// try_evacuate_object registers the object and dirties the associated remembered set information when evacuating
-// to OLD_GENERATION.
+oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
+  assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
+  if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
+    // This thread went through the OOM during evac protocol. It is safe to return
+    // the forward pointer. It must not attempt to evacuate any other objects.
+    return ShenandoahBarrierSet::resolve_forwarded(p);
+  }
+
+  assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
+
+  ShenandoahHeapRegion* r = heap_region_containing(p);
+  assert(!r->is_humongous(), "never evacuate humongous objects");
+
+  ShenandoahAffiliation target_gen = r->affiliation();
+  return try_evacuate_object(p, thread, r, target_gen);
+}
+
 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
                                                ShenandoahAffiliation target_gen) {
+  assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
+  assert(from_region->is_young(), "Only expect evacuations from young in this mode");
   bool alloc_from_lab = true;
-  bool has_plab = false;
   HeapWord* copy = nullptr;
   size_t size = p->size();
-  bool is_promotion = (target_gen == OLD_GENERATION) && from_region->is_young();
 
 #ifdef ASSERT
   if (ShenandoahOOMDuringEvacALot &&
@@ -1688,85 +1444,28 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
   } else {
 #endif
     if (UseTLAB) {
-      switch (target_gen) {
-        case YOUNG_GENERATION: {
-          copy = allocate_from_gclab(thread, size);
-          if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
-            // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
-            // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
-            ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
-            copy = allocate_from_gclab(thread, size);
-            // If we still get nullptr, we'll try a shared allocation below.
-          }
-          break;
-        }
-        case OLD_GENERATION: {
-          assert(mode()->is_generational(), "OLD Generation only exists in generational mode");
-          ShenandoahGenerationalHeap* gen_heap = (ShenandoahGenerationalHeap*) this;
-          PLAB* plab = ShenandoahThreadLocalData::plab(thread);
-          if (plab != nullptr) {
-            has_plab = true;
-          }
-          copy = allocate_from_plab(thread, size, is_promotion);
-          if ((copy == nullptr) && (size < ShenandoahThreadLocalData::plab_size(thread)) &&
-              ShenandoahThreadLocalData::plab_retries_enabled(thread)) {
-            // PLAB allocation failed because we are bumping up against the limit on old evacuation reserve or because
-            // the requested object does not fit within the current plab but the plab still has an "abundance" of memory,
-            // where abundance is defined as >= ShenGenHeap::plab_min_size().  In the former case, we try resetting the desired
-            // PLAB size and retry PLAB allocation to avoid cascading of shared memory allocations.
-
-            // In this situation, PLAB memory is precious.  We'll try to preserve our existing PLAB by forcing
-            // this particular allocation to be shared.
-            if (plab->words_remaining() < gen_heap->plab_min_size()) {
-              ShenandoahThreadLocalData::set_plab_size(thread, gen_heap->plab_min_size());
-              copy = allocate_from_plab(thread, size, is_promotion);
-              // If we still get nullptr, we'll try a shared allocation below.
-              if (copy == nullptr) {
-                // If retry fails, don't continue to retry until we have success (probably in next GC pass)
-                ShenandoahThreadLocalData::disable_plab_retries(thread);
-              }
-            }
-            // else, copy still equals nullptr.  this causes shared allocation below, preserving this plab for future needs.
-          }
-          break;
-        }
-        default: {
-          ShouldNotReachHere();
-          break;
-        }
+      copy = allocate_from_gclab(thread, size);
+      if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
+        // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
+        // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
+        // TODO: is this right? using PLAB::min_size() here for gc lab size?
+        ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
+        copy = allocate_from_gclab(thread, size);
+        // If we still get nullptr, we'll try a shared allocation below.
       }
     }
 
     if (copy == nullptr) {
       // If we failed to allocate in LAB, we'll try a shared allocation.
-      if (!is_promotion || !has_plab || (size > PLAB::min_size())) {
-        ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
-        copy = allocate_memory(req, is_promotion);
-        alloc_from_lab = false;
-      }
-      // else, we leave copy equal to nullptr, signaling a promotion failure below if appropriate.
-      // We choose not to promote objects smaller than PLAB::min_size() by way of shared allocations, as this is too
-      // costly.  Instead, we'll simply "evacuate" to young-gen memory (using a GCLAB) and will promote in a future
-      // evacuation pass.  This condition is denoted by: is_promotion && has_plab && (size <= PLAB::min_size())
+      ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
+      copy = allocate_memory(req);
+      alloc_from_lab = false;
     }
 #ifdef ASSERT
   }
 #endif
 
   if (copy == nullptr) {
-    if (target_gen == OLD_GENERATION) {
-      assert(mode()->is_generational(), "Should only be here in generational mode.");
-      if (from_region->is_young()) {
-        // Signal that promotion failed. Will evacuate this old object somewhere in young gen.
-        old_generation()->handle_failed_promotion(thread, size);
-        return nullptr;
-      } else {
-        // Remember that evacuation to old gen failed. We'll want to trigger a full gc to recover from this
-        // after the evacuation threads have finished.
-        old_generation()->handle_failed_evacuation();
-      }
-    }
-
     control_thread()->handle_alloc_failure_evac(size);
 
     _oom_evac_handler.handle_out_of_memory_during_evacuation();
@@ -1780,10 +1479,6 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
 
   oop copy_val = cast_to_oop(copy);
 
-  if (mode()->is_generational() && target_gen == YOUNG_GENERATION && is_aging_cycle()) {
-    ShenandoahHeap::increase_object_age(copy_val, from_region->age() + 1);
-  }
-
   // Try to install the new forwarding pointer.
   ContinuationGCSupport::relativize_stack_chunk(copy_val);
 
@@ -1791,20 +1486,6 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
   if (result == copy_val) {
     // Successfully evacuated. Our copy is now the public one!
     _evac_tracker->end_evacuation(thread, size * HeapWordSize);
-    if (mode()->is_generational()) {
-      if (target_gen == OLD_GENERATION) {
-        old_generation()->handle_evacuation(copy, size, from_region->is_young());
-      } else {
-        // When copying to the old generation above, we don't care
-        // about recording object age in the census stats.
-        assert(target_gen == YOUNG_GENERATION, "Error");
-        // We record this census only when simulating pre-adaptive tenuring behavior, or
-        // when we have been asked to record the census at evacuation rather than at mark
-        if (ShenandoahGenerationalCensusAtEvac || !ShenandoahGenerationalAdaptiveTenuring) {
-          _evac_tracker->record_age(thread, size * HeapWordSize, ShenandoahHeap::get_object_age(copy_val));
-        }
-      }
-    }
     shenandoah_assert_correct(nullptr, copy_val);
     return copy_val;
   }  else {
@@ -1817,25 +1498,7 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
       // object will overwrite this stale copy, or the filler object on LAB retirement will
       // do this.
-      switch (target_gen) {
-        case YOUNG_GENERATION: {
-          ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
-          break;
-        }
-        case OLD_GENERATION: {
-          ShenandoahThreadLocalData::plab(thread)->undo_allocation(copy, size);
-          if (is_promotion) {
-            ShenandoahThreadLocalData::subtract_from_plab_promoted(thread, size * HeapWordSize);
-          } else {
-            ShenandoahThreadLocalData::subtract_from_plab_evacuated(thread, size * HeapWordSize);
-          }
-          break;
-        }
-        default: {
-          ShouldNotReachHere();
-          break;
-        }
-      }
+      ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
     } else {
       // For non-LAB allocations, we have no way to retract the allocation, and
       // have to explicitly overwrite the copy with the filler object. With that overwrite,
@@ -1908,9 +1571,11 @@ class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
 
-    PLAB* plab = ShenandoahThreadLocalData::plab(thread);
-    assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
-    assert(plab->words_remaining() == 0, "PLAB should not need retirement");
+    if (ShenandoahHeap::heap()->mode()->is_generational()) {
+      PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+      assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
+      assert(plab->words_remaining() == 0, "PLAB should not need retirement");
+    }
   }
 };
 
@@ -1927,15 +1592,17 @@ class ShenandoahRetireGCLABClosure : public ThreadClosure {
       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
     }
 
-    PLAB* plab = ShenandoahThreadLocalData::plab(thread);
-    assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
+    if (ShenandoahHeap::heap()->mode()->is_generational()) {
+      PLAB* plab = ShenandoahThreadLocalData::plab(thread);
+      assert(plab != nullptr, "PLAB should be initialized for %s", thread->name());
 
-    // There are two reasons to retire all plabs between old-gen evacuation passes.
-    //  1. We need to make the plab memory parsable by remembered-set scanning.
-    //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
-    ShenandoahHeap::heap()->retire_plab(plab, thread);
-    if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
-      ShenandoahThreadLocalData::set_plab_size(thread, 0);
+      // There are two reasons to retire all plabs between old-gen evacuation passes.
+      //  1. We need to make the plab memory parsable by remembered-set scanning.
+      //  2. We need to establish a trustworthy UpdateWaterMark value within each old-gen heap region
+      ShenandoahGenerationalHeap::heap()->retire_plab(plab, thread);
+      if (_resize && ShenandoahThreadLocalData::plab_size(thread) > 0) {
+        ShenandoahThreadLocalData::set_plab_size(thread, 0);
+      }
     }
   }
 };
@@ -1996,12 +1663,8 @@ void ShenandoahHeap::gclabs_retire(bool resize) {
 
 // Returns size in bytes
 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
-  if (mode()->is_generational()) {
-    return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
-  } else {
-    // Return the max allowed size, and let the allocation path figure out the safe size for current allocation.
-    return ShenandoahHeapRegion::max_tlab_size_bytes();
-  }
+  // Return the max allowed size, and let the allocation path figure out the safe size for current allocation.
+  return ShenandoahHeapRegion::max_tlab_size_bytes();
 }
 
 size_t ShenandoahHeap::max_tlab_size() const {
@@ -2840,7 +2503,8 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask {
     _regions(regions),
     _work_chunks(work_chunks)
   {
-    log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(_heap->is_old_bitmap_stable()));
+    bool old_bitmap_stable = _heap->old_generation()->is_mark_complete();
+    log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
   }
 
   void work(uint worker_id) {
@@ -3061,11 +2725,11 @@ class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapR
   bool _is_generational;
 
 public:
-  ShenandoahFinalUpdateRefsUpdateRegionStateClosure(
-    ShenandoahMarkingContext* ctx) : _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
-                                     _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
+  ShenandoahFinalUpdateRefsUpdateRegionStateClosure(ShenandoahMarkingContext* ctx) :
+    _ctx(ctx), _lock(ShenandoahHeap::heap()->lock()),
+    _is_generational(ShenandoahHeap::heap()->mode()->is_generational()) { }
 
-  void heap_region_do(ShenandoahHeapRegion* r) {
+  void heap_region_do(ShenandoahHeapRegion* r) override {
 
     // Maintenance of region age must follow evacuation in order to account for evacuation allocations within survivor
     // regions.  We consult region age during the subsequent evacuation to determine whether certain objects need to
@@ -3074,7 +2738,7 @@ class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapR
       HeapWord *tams = _ctx->top_at_mark_start(r);
       HeapWord *top = r->top();
 
-      // Allocations move the watermark when top moves.  However compacting
+      // Allocations move the watermark when top moves.  However, compacting
       // objects will sometimes lower top beneath the watermark, after which,
       // attempts to read the watermark will assert out (watermark should not be
       // higher than top).
@@ -3104,7 +2768,7 @@ class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapR
     }
   }
 
-  bool is_thread_safe() { return true; }
+  bool is_thread_safe() override { return true; }
 };
 
 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
@@ -3133,7 +2797,6 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
   ShenandoahGCPhase phase(concurrent ?
                           ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
                           ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
-  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
   ShenandoahHeapLocker locker(lock());
   size_t young_cset_regions, old_cset_regions;
   size_t first_old_region, last_old_region, old_region_count;
@@ -3154,7 +2817,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
 
     // The computation of bytes_of_allocation_runway_before_gc_trigger is quite conservative so consider all of this
     // available for transfer to old. Note that transfer of humongous regions does not impact available.
-    size_t allocation_runway = young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
+    size_t allocation_runway = young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(young_cset_regions);
     ShenandoahGenerationalHeap::heap()->compute_old_generation_balance(allocation_runway, old_cset_regions);
 
     // Total old_available may have been expanded to hold anticipated promotions.  We trigger if the fragmented available
@@ -3169,51 +2832,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
   _free_set->rebuild(young_cset_regions, old_cset_regions);
 
   if (mode()->is_generational() && (ShenandoahGenerationalHumongousReserve > 0)) {
-    size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0;
-    size_t allowed_old_gen_span = num_regions() - (ShenandoahGenerationalHumongousReserve * num_regions() / 100);
-
-    // Tolerate lower density if total span is small.  Here's the implementation:
-    //   if old_gen spans more than 100% and density < 75%, trigger old-defrag
-    //   else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag
-    //   else if old_gen spans more than 75% and density < 50%, trigger old-defrag
-    //   else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag
-    //   else if old_gen spans more than 50% and density < 25%, trigger old-defrag
-    //
-    // A previous implementation was more aggressive in triggering, resulting in degraded throughput when
-    // humongous allocation was not required.
-
-    ShenandoahGeneration* old_gen = old_generation();
-    size_t old_available = old_gen->available();
-    size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
-    size_t old_unaffiliated_available = old_gen->free_unaffiliated_regions() * region_size_bytes;
-    assert(old_available >= old_unaffiliated_available, "sanity");
-    size_t old_fragmented_available = old_available - old_unaffiliated_available;
-
-    size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available;
-    size_t old_bytes_spanned = old_region_span * region_size_bytes;
-    double old_density = ((double) old_bytes_consumed) / old_bytes_spanned;
-
-    uint eighths = 8;
-    for (uint i = 0; i < 5; i++) {
-      size_t span_threshold = eighths * allowed_old_gen_span / 8;
-      double density_threshold = (eighths - 2) / 8.0;
-      if ((old_region_span >= span_threshold) && (old_density < density_threshold)) {
-        old_heuristics()->trigger_old_is_fragmented(old_density, first_old_region, last_old_region);
-        break;
-      }
-      eighths--;
-    }
-
-    size_t old_used = old_generation()->used() + old_generation()->get_humongous_waste();
-    size_t trigger_threshold = old_generation()->usage_trigger_threshold();
-    // Detects unsigned arithmetic underflow
-    assert(old_used <= capacity(),
-           "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
-           old_generation()->used(), old_generation()->get_humongous_waste(), capacity());
-
-    if (old_used > trigger_threshold) {
-      old_heuristics()->trigger_old_has_grown();
-    }
+    old_generation()->maybe_trigger_collection(first_old_region, last_old_region, old_region_count);
   }
 }
 
@@ -3478,3 +3097,15 @@ void ShenandoahHeap::log_heap_status(const char* msg) const {
     global_generation()->log_status(msg);
   }
 }
+
+void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
+  if (mode()->is_generational()) {
+    _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
+  }
+}
+
+void ShenandoahHeap::mark_card_as_dirty(void* location) {
+  if (mode()->is_generational()) {
+    _card_scan->mark_card_as_dirty((HeapWord*)location);
+  }
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
index f4e4df97d24..a14a1630acb 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp
@@ -168,12 +168,6 @@ class ShenandoahHeap : public CollectedHeap {
   }
 
   ShenandoahHeuristics* heuristics();
-  ShenandoahOldHeuristics* old_heuristics();
-  ShenandoahYoungHeuristics* young_heuristics();
-
-  bool doing_mixed_evacuations();
-  bool is_old_bitmap_stable() const;
-  bool is_gc_generation_young() const;
 
 // ---------- Initialization, termination, identification, printing routines
 //
@@ -511,6 +505,8 @@ class ShenandoahHeap : public CollectedHeap {
   ShenandoahPhaseTimings*      phase_timings()   const { return _phase_timings;     }
   ShenandoahEvacuationTracker* evac_tracker()    const { return _evac_tracker;      }
 
+  ShenandoahEvacOOMHandler* oom_evac_handler() { return &_oom_evac_handler; }
+
   void on_cycle_start(GCCause::Cause cause, ShenandoahGeneration* generation);
   void on_cycle_end(ShenandoahGeneration* generation);
 
@@ -635,19 +631,16 @@ class ShenandoahHeap : public CollectedHeap {
 
 // ---------- Allocation support
 //
-private:
-  HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region, bool is_promotion);
-
+protected:
   inline HeapWord* allocate_from_gclab(Thread* thread, size_t size);
+
+private:
+  HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region);
   HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size);
   HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size);
 
-  inline HeapWord* allocate_from_plab(Thread* thread, size_t size, bool is_promotion);
-  HeapWord* allocate_from_plab_slow(Thread* thread, size_t size, bool is_promotion);
-  HeapWord* allocate_new_plab(size_t min_size, size_t word_size, size_t* actual_size);
-
 public:
-  HeapWord* allocate_memory(ShenandoahAllocRequest& request, bool is_promotion);
+  HeapWord* allocate_memory(ShenandoahAllocRequest& request);
   HeapWord* mem_allocate(size_t size, bool* what) override;
   MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                size_t size,
@@ -736,7 +729,7 @@ class ShenandoahHeap : public CollectedHeap {
 
   // Evacuates or promotes object src. Returns the evacuated object, either evacuated
   // by this thread, or by some other thread.
-  inline oop evacuate_object(oop src, Thread* thread);
+  virtual oop evacuate_object(oop src, Thread* thread);
 
   // Call before/after evacuation.
   inline void enter_evacuation(Thread* t);
@@ -751,8 +744,6 @@ class ShenandoahHeap : public CollectedHeap {
   inline RememberedScanner* card_scan() { return _card_scan; }
   void clear_cards_for(ShenandoahHeapRegion* region);
   void mark_card_as_dirty(void* location);
-  void retire_plab(PLAB* plab);
-  void retire_plab(PLAB* plab, Thread* thread);
   void cancel_old_gc();
 
 // ---------- Helper functions
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
index e642205d44f..ec1ff27debe 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
@@ -41,10 +41,9 @@
 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
-#include "gc/shenandoah/shenandoahGenerationalControlThread.hpp"
+#include "gc/shenandoah/shenandoahGeneration.hpp"
 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
-#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/mode/shenandoahMode.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
@@ -298,79 +297,12 @@ inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size
   return allocate_from_gclab_slow(thread, size);
 }
 
-inline HeapWord* ShenandoahHeap::allocate_from_plab(Thread* thread, size_t size, bool is_promotion) {
-  assert(UseTLAB, "TLABs should be enabled");
-
-  PLAB* plab = ShenandoahThreadLocalData::plab(thread);
-  HeapWord* obj;
-
-  if (plab == nullptr) {
-    assert(!thread->is_Java_thread() && !thread->is_Worker_thread(), "Performance: thread should have PLAB: %s", thread->name());
-    // No PLABs in this thread, fallback to shared allocation
-    return nullptr;
-  } else if (is_promotion && !ShenandoahThreadLocalData::allow_plab_promotions(thread)) {
-    return nullptr;
-  }
-  // if plab->word_size() <= 0, thread's plab not yet initialized for this pass, so allow_plab_promotions() is not trustworthy
-  obj = plab->allocate(size);
-  if ((obj == nullptr) && (plab->words_remaining() < ShenandoahGenerationalHeap::heap()->plab_min_size())) {
-    // allocate_from_plab_slow will establish allow_plab_promotions(thread) for future invocations
-    obj = allocate_from_plab_slow(thread, size, is_promotion);
-  }
-  // if plab->words_remaining() >= ShenGenHeap::heap()->plab_min_size(), just return nullptr so we can use a shared allocation
-  if (obj == nullptr) {
-    return nullptr;
-  }
-
-  if (is_promotion) {
-    ShenandoahThreadLocalData::add_to_plab_promoted(thread, size * HeapWordSize);
-  } else {
-    ShenandoahThreadLocalData::add_to_plab_evacuated(thread, size * HeapWordSize);
-  }
-  return obj;
-}
-
 inline ShenandoahAgeCensus* ShenandoahHeap::age_census() const {
   assert(mode()->is_generational(), "Only in generational mode");
   assert(_age_census != nullptr, "Error: not initialized");
   return _age_census;
 }
 
-inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
-  assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
-  if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
-    // This thread went through the OOM during evac protocol and it is safe to return
-    // the forward pointer. It must not attempt to evacuate any more.
-    return ShenandoahBarrierSet::resolve_forwarded(p);
-  }
-
-  assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
-
-  ShenandoahHeapRegion* r = heap_region_containing(p);
-  assert(!r->is_humongous(), "never evacuate humongous objects");
-
-  ShenandoahAffiliation target_gen = r->affiliation();
-  if (mode()->is_generational() && ShenandoahHeap::heap()->is_gc_generation_young() &&
-      target_gen == YOUNG_GENERATION) {
-    markWord mark = p->mark();
-    if (mark.is_marked()) {
-      // Already forwarded.
-      return ShenandoahBarrierSet::resolve_forwarded(p);
-    }
-    if (mark.has_displaced_mark_helper()) {
-      // We don't want to deal with MT here just to ensure we read the right mark word.
-      // Skip the potential promotion attempt for this one.
-    } else if (r->age() + mark.age() >= age_census()->tenuring_threshold()) {
-      oop result = try_evacuate_object(p, thread, r, OLD_GENERATION);
-      if (result != nullptr) {
-        return result;
-      }
-      // If we failed to promote this aged object, we'll fall through to code below and evacuate to young-gen.
-    }
-  }
-  return try_evacuate_object(p, thread, r, target_gen);
-}
-
 void ShenandoahHeap::increase_object_age(oop obj, uint additional_age) {
   // This operates on new copy of an object. This means that the object's mark-word
   // is thread-local and therefore safe to access. However, when the mark is
@@ -468,7 +400,7 @@ inline bool ShenandoahHeap::is_in_old(const void* p) const {
 }
 
 inline bool ShenandoahHeap::is_old(oop obj) const {
-  return is_gc_generation_young() && is_in_old(obj);
+  return active_generation()->is_young() && is_in_old(obj);
 }
 
 inline ShenandoahAffiliation ShenandoahHeap::region_affiliation(const ShenandoahHeapRegion *r) {
@@ -728,16 +660,4 @@ inline ShenandoahMarkingContext* ShenandoahHeap::marking_context() const {
   return _marking_context;
 }
 
-inline void ShenandoahHeap::clear_cards_for(ShenandoahHeapRegion* region) {
-  if (mode()->is_generational()) {
-    _card_scan->mark_range_as_empty(region->bottom(), pointer_delta(region->end(), region->bottom()));
-  }
-}
-
-inline void ShenandoahHeap::mark_card_as_dirty(void* location) {
-  if (mode()->is_generational()) {
-    _card_scan->mark_card_as_dirty((HeapWord*)location);
-  }
-}
-
 #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index e641f1ecccf..bd2c835e186 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -547,56 +547,6 @@ bool ShenandoahHeapRegion::oop_fill_and_coalesce() {
   return true;
 }
 
-void ShenandoahHeapRegion::global_oop_iterate_and_fill_dead(OopIterateClosure* blk) {
-  if (!is_active()) return;
-  if (is_humongous()) {
-    // No need to fill dead within humongous regions.  Either the entire region is dead, or the entire region is
-    // unchanged.  A humongous region holds no more than one humongous object.
-    oop_iterate_humongous(blk);
-  } else {
-    global_oop_iterate_objects_and_fill_dead(blk);
-  }
-}
-
-void ShenandoahHeapRegion::global_oop_iterate_objects_and_fill_dead(OopIterateClosure* blk) {
-  assert(!is_humongous(), "no humongous region here");
-  HeapWord* obj_addr = bottom();
-
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  ShenandoahMarkingContext* marking_context = heap->marking_context();
-  RememberedScanner* rem_set_scanner = heap->card_scan();
-  // Objects allocated above TAMS are not marked, but are considered live for purposes of current GC efforts.
-  HeapWord* t = marking_context->top_at_mark_start(this);
-
-  assert(heap->active_generation()->is_mark_complete(), "sanity");
-
-  while (obj_addr < t) {
-    oop obj = cast_to_oop(obj_addr);
-    if (marking_context->is_marked(obj)) {
-      assert(obj->klass() != nullptr, "klass should not be nullptr");
-      // when promoting an entire region, we have to register the marked objects as well
-      obj_addr += obj->oop_iterate_size(blk);
-    } else {
-      // Object is not marked.  Coalesce and fill dead object with dead neighbors.
-      HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, t);
-      assert(next_marked_obj <= t, "next marked object cannot exceed top");
-      size_t fill_size = next_marked_obj - obj_addr;
-      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
-      ShenandoahHeap::fill_with_object(obj_addr, fill_size);
-      // coalesce_objects() unregisters all but first object subsumed within coalesced range.
-      rem_set_scanner->coalesce_objects(obj_addr, fill_size);
-      obj_addr = next_marked_obj;
-    }
-  }
-
-  // Any object above TAMS and below top() is considered live.
-  t = top();
-  while (obj_addr < t) {
-    oop obj = cast_to_oop(obj_addr);
-    obj_addr += obj->oop_iterate_size(blk);
-  }
-}
-
 // DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
 // must finish the work before it can be cancelled.
 void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
@@ -638,24 +588,6 @@ void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, b
   }
 }
 
-void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk, HeapWord* start, size_t words) {
-  assert(is_humongous(), "only humongous region here");
-  // Find head.
-  ShenandoahHeapRegion* r = humongous_start_region();
-  assert(r->is_humongous_start(), "need humongous head here");
-  oop obj = cast_to_oop(r->bottom());
-  obj->oop_iterate(blk, MemRegion(start, start + words));
-}
-
-void ShenandoahHeapRegion::oop_iterate_humongous(OopIterateClosure* blk) {
-  assert(is_humongous(), "only humongous region here");
-  // Find head.
-  ShenandoahHeapRegion* r = humongous_start_region();
-  assert(r->is_humongous_start(), "need humongous head here");
-  oop obj = cast_to_oop(r->bottom());
-  obj->oop_iterate(blk, MemRegion(bottom(), top()));
-}
-
 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   assert(is_humongous(), "Must be a part of the humongous region");
@@ -981,169 +913,6 @@ void ShenandoahHeapRegion::set_affiliation(ShenandoahAffiliation new_affiliation
   heap->set_affiliation(this, new_affiliation);
 }
 
-// When we promote a region in place, we can continue to use the established marking context to guide subsequent remembered
-// set scans of this region's content.  The region will be coalesced and filled prior to the next old-gen marking effort.
-// We identify the entirety of the region as DIRTY to force the next remembered set scan to identify the "interesting poitners"
-// contained herein.
-void ShenandoahHeapRegion::promote_in_place() {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  ShenandoahMarkingContext* marking_context = heap->marking_context();
-  HeapWord* tams = marking_context->top_at_mark_start(this);
-  assert(heap->active_generation()->is_mark_complete(), "sanity");
-  assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot promote in place during old marking");
-  assert(is_young(), "Only young regions can be promoted");
-  assert(is_regular(), "Use different service to promote humongous regions");
-  assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
-
-  ShenandoahOldGeneration* old_gen = heap->old_generation();
-  ShenandoahYoungGeneration* young_gen = heap->young_generation();
-  size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
-
-  assert(get_top_before_promote() == tams, "Cannot promote regions in place if top has advanced beyond TAMS");
-
-  // Rebuild the remembered set information and mark the entire range as DIRTY.  We do NOT scan the content of this
-  // range to determine which cards need to be DIRTY.  That would force us to scan the region twice, once now, and
-  // once during the subsequent remembered set scan.  Instead, we blindly (conservatively) mark everything as DIRTY
-  // now and then sort out the CLEAN pages during the next remembered set scan.
-  //
-  // Rebuilding the remembered set consists of clearing all object registrations (reset_object_range()) here,
-  // then registering every live object and every coalesced range of free objects in the loop that follows.
-  heap->card_scan()->reset_object_range(bottom(), end());
-  heap->card_scan()->mark_range_as_dirty(bottom(), get_top_before_promote() - bottom());
-
-  // TODO: use an existing coalesce-and-fill function rather than replicating the code here.
-  HeapWord* obj_addr = bottom();
-  while (obj_addr < tams) {
-    oop obj = cast_to_oop(obj_addr);
-    if (marking_context->is_marked(obj)) {
-      assert(obj->klass() != nullptr, "klass should not be NULL");
-      // This thread is responsible for registering all objects in this region.  No need for lock.
-      heap->card_scan()->register_object_without_lock(obj_addr);
-      obj_addr += obj->size();
-    } else {
-      HeapWord* next_marked_obj = marking_context->get_next_marked_addr(obj_addr, tams);
-      assert(next_marked_obj <= tams, "next marked object cannot exceed tams");
-      size_t fill_size = next_marked_obj - obj_addr;
-      assert(fill_size >= ShenandoahHeap::min_fill_size(), "previously allocated objects known to be larger than min_size");
-      ShenandoahHeap::fill_with_object(obj_addr, fill_size);
-      heap->card_scan()->register_object_without_lock(obj_addr);
-      obj_addr = next_marked_obj;
-    }
-  }
-  // We do not need to scan above TAMS because restored top equals tams
-  assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams");
-
-  {
-    ShenandoahHeapLocker locker(heap->lock());
-
-    HeapWord* update_watermark = get_update_watermark();
-
-    // Now that this region is affiliated with old, we can allow it to receive allocations, though it may not be in the
-    // is_collector_free range.
-    restore_top_before_promote();
-
-    size_t region_capacity = free();
-    size_t region_used = used();
-
-    // The update_watermark was likely established while we had the artificially high value of top.  Make it sane now.
-    assert(update_watermark >= top(), "original top cannot exceed preserved update_watermark");
-    set_update_watermark(top());
-
-    // Unconditionally transfer one region from young to old to represent the newly promoted region.
-    // This expands old and shrinks new by the size of one region.  Strictly, we do not "need" to expand old
-    // if there are already enough unaffiliated regions in old to account for this newly promoted region.
-    // However, if we do not transfer the capacities, we end up reducing the amount of memory that would have
-    // otherwise been available to hold old evacuations, because old available is max_capacity - used and now
-    // we would be trading a fully empty region for a partially used region.
-
-    young_gen->decrease_used(region_used);
-    young_gen->decrement_affiliated_region_count();
-
-    // transfer_to_old() increases capacity of old and decreases capacity of young
-    heap->generation_sizer()->force_transfer_to_old(1);
-    set_affiliation(OLD_GENERATION);
-
-    old_gen->increment_affiliated_region_count();
-    old_gen->increase_used(region_used);
-
-    // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
-    heap->free_set()->add_old_collector_free_region(this);
-  }
-}
-
-void ShenandoahHeapRegion::promote_humongous() {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  ShenandoahMarkingContext* marking_context = heap->marking_context();
-  assert(heap->active_generation()->is_mark_complete(), "sanity");
-  assert(is_young(), "Only young regions can be promoted");
-  assert(is_humongous_start(), "Should not promote humongous continuation in isolation");
-  assert(age() >= heap->age_census()->tenuring_threshold(), "Only promote regions that are sufficiently aged");
-
-  ShenandoahGeneration* old_generation = heap->old_generation();
-  ShenandoahGeneration* young_generation = heap->young_generation();
-
-  oop obj = cast_to_oop(bottom());
-  assert(marking_context->is_marked(obj), "promoted humongous object should be alive");
-
-  // TODO: Consider not promoting humongous objects that represent primitive arrays.  Leaving a primitive array
-  // (obj->is_typeArray()) in young-gen is harmless because these objects are never relocated and they are not
-  // scanned.  Leaving primitive arrays in young-gen memory allows their memory to be reclaimed more quickly when
-  // it becomes garbage.  Better to not make this change until sizes of young-gen and old-gen are completely
-  // adaptive, as leaving primitive arrays in young-gen might be perceived as an "astonishing result" by someone
-  // has carefully analyzed the required sizes of an application's young-gen and old-gen.
-  size_t used_bytes = obj->size() * HeapWordSize;
-  size_t spanned_regions = ShenandoahHeapRegion::required_regions(used_bytes);
-  size_t humongous_waste = spanned_regions * ShenandoahHeapRegion::region_size_bytes() - obj->size() * HeapWordSize;
-  size_t index_limit = index() + spanned_regions;
-  {
-    // We need to grab the heap lock in order to avoid a race when changing the affiliations of spanned_regions from
-    // young to old.
-    ShenandoahHeapLocker locker(heap->lock());
-
-    // We promote humongous objects unconditionally, without checking for availability.  We adjust
-    // usage totals, including humongous waste, after evacuation is done.
-    log_debug(gc)("promoting humongous region " SIZE_FORMAT ", spanning " SIZE_FORMAT, index(), spanned_regions);
-
-    young_generation->decrease_used(used_bytes);
-    young_generation->decrease_humongous_waste(humongous_waste);
-    young_generation->decrease_affiliated_region_count(spanned_regions);
-
-    // transfer_to_old() increases capacity of old and decreases capacity of young
-    heap->generation_sizer()->force_transfer_to_old(spanned_regions);
-
-    // For this region and each humongous continuation region spanned by this humongous object, change
-    // affiliation to OLD_GENERATION and adjust the generation-use tallies.  The remnant of memory
-    // in the last humongous region that is not spanned by obj is currently not used.
-    for (size_t i = index(); i < index_limit; i++) {
-      ShenandoahHeapRegion* r = heap->get_region(i);
-      log_debug(gc)("promoting humongous region " SIZE_FORMAT ", from " PTR_FORMAT " to " PTR_FORMAT,
-                    r->index(), p2i(r->bottom()), p2i(r->top()));
-      // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here
-      r->set_affiliation(OLD_GENERATION);
-    }
-
-    old_generation->increase_affiliated_region_count(spanned_regions);
-    old_generation->increase_used(used_bytes);
-    old_generation->increase_humongous_waste(humongous_waste);
-  }
-
-  // Since this region may have served previously as OLD, it may hold obsolete object range info.
-  heap->card_scan()->reset_object_range(bottom(), bottom() + spanned_regions * ShenandoahHeapRegion::region_size_words());
-  // Since the humongous region holds only one object, no lock is necessary for this register_object() invocation.
-  heap->card_scan()->register_object_without_lock(bottom());
-
-  if (obj->is_typeArray()) {
-    // Primitive arrays don't need to be scanned.
-    log_debug(gc)("Clean cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
-                  index(), p2i(bottom()), p2i(bottom() + obj->size()));
-    heap->card_scan()->mark_range_as_clean(bottom(), obj->size());
-  } else {
-    log_debug(gc)("Dirty cards for promoted humongous object (Region " SIZE_FORMAT ") from " PTR_FORMAT " to " PTR_FORMAT,
-                  index(), p2i(bottom()), p2i(bottom() + obj->size()));
-    heap->card_scan()->mark_range_as_dirty(bottom(), obj->size());
-  }
-}
-
 void ShenandoahHeapRegion::decrement_humongous_waste() const {
   assert(is_humongous(), "Should only use this for humongous regions");
   size_t waste_bytes = free();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
index 92b748bfa48..e9855827fd5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
@@ -403,13 +403,6 @@ class ShenandoahHeapRegion {
   // Like oop_fill_and_coalesce(), but without honoring cancellation requests.
   bool oop_fill_and_coalesce_without_cancel();
 
-  // During global collections, this service iterates through an old-gen heap region that is not part of collection
-  // set to fill and register ranges of dead memory.  Note that live objects were previously registered.  Some dead objects
-  // that are subsumed into coalesced ranges of dead memory need to be "unregistered".
-  void global_oop_iterate_and_fill_dead(OopIterateClosure* cl);
-  void oop_iterate_humongous(OopIterateClosure* cl);
-  void oop_iterate_humongous(OopIterateClosure* cl, HeapWord* start, size_t words);
-
   // Invoke closure on every reference contained within the humongous object that spans this humongous
   // region if the reference is contained within a DIRTY card and the reference is no more than words following
   // start within the humongous object.
@@ -476,19 +469,11 @@ class ShenandoahHeapRegion {
 
   CENSUS_NOISE(void clear_youth() { _youth = 0; })
 
-  // Register all objects.  Set all remembered set cards to dirty.
-  void promote_humongous();
-  void promote_in_place();
-
 private:
   void decrement_humongous_waste() const;
   void do_commit();
   void do_uncommit();
 
-  // This is an old-region that was not part of the collection set during a GLOBAL collection.  We coalesce the dead
-  // objects, but do not need to register the live objects as they are already registered.
-  void global_oop_iterate_objects_and_fill_dead(OopIterateClosure* cl);
-
   inline void internal_increase_live_data(size_t s);
 
   void set_state(RegionState to);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index 3121b06ec47..c80c13bbd76 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -27,7 +27,6 @@
 #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_INLINE_HPP
 
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
-
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
 #include "runtime/atomic.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
index 29c79be8555..604dd1077a0 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMark.inline.hpp
@@ -306,9 +306,9 @@ inline void ShenandoahMark::mark_through_ref(T *p, ShenandoahObjToScanQueue* q,
       shenandoah_assert_marked(p, obj);
       // TODO: As implemented herein, GLOBAL collections reconstruct the card table during GLOBAL concurrent
       // marking. Note that the card table is cleaned at init_mark time so it needs to be reconstructed to support
-      // future young-gen collections.  It might be better to reconstruct card table in
-      // ShenandoahHeapRegion::global_oop_iterate_and_fill_dead.  We could either mark all live memory as dirty, or could
-      // use the GLOBAL update-refs scanning of pointers to determine precisely which cards to flag as dirty.
+      // future young-gen collections.  It might be better to reconstruct card table in a different phase.  We could
+      // either mark all live memory as dirty, or could use the GLOBAL update-refs scanning of pointers to determine
+      // precisely which cards to flag as dirty.
       if (GENERATION == YOUNG && heap->is_in_old(p)) {
         // Mark card as dirty because remembered set scanning still finds interesting pointer.
         heap->mark_card_as_dirty((HeapWord*)p);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp
index 1a77a0beb00..947070c3ae1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMarkingContext.hpp
@@ -33,6 +33,7 @@
 #include "oops/oopsHierarchy.hpp"
 
 class ShenandoahObjToScanQueueSet;
+class ShenandoahHeapRegion;
 
 /**
  * Encapsulate a marking bitmap with the top-at-mark-start and top-bitmaps array.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
index 3766dfdf5c5..766d5deaf42 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.cpp
@@ -38,8 +38,8 @@
 #include "utilities/events.hpp"
 
 
-ShenandoahOldGC::ShenandoahOldGC(ShenandoahGeneration* generation, ShenandoahSharedFlag& allow_preemption) :
-    ShenandoahConcurrentGC(generation, false), _allow_preemption(allow_preemption) {
+ShenandoahOldGC::ShenandoahOldGC(ShenandoahOldGeneration* generation, ShenandoahSharedFlag& allow_preemption) :
+    ShenandoahConcurrentGC(generation, false), _old_generation(generation), _allow_preemption(allow_preemption) {
 }
 
 // Final mark for old-gen is different than for young or old, so we
@@ -85,8 +85,8 @@ void ShenandoahOldGC::op_final_mark() {
 
 bool ShenandoahOldGC::collect(GCCause::Cause cause) {
   auto heap = ShenandoahGenerationalHeap::heap();
-  assert(!heap->doing_mixed_evacuations(), "Should not start an old gc with pending mixed evacuations");
-  assert(!heap->is_prepare_for_old_mark_in_progress(), "Old regions need to be parsable during concurrent mark.");
+  assert(!_old_generation->is_doing_mixed_evacuations(), "Should not start an old gc with pending mixed evacuations");
+  assert(!_old_generation->is_preparing_for_mark(), "Old regions need to be parsable during concurrent mark.");
 
   // Enable preemption of old generation mark.
   _allow_preemption.set();
@@ -149,7 +149,7 @@ bool ShenandoahOldGC::collect(GCCause::Cause cause) {
 
   // We do not rebuild_free following increments of old marking because memory has not been reclaimed. However, we may
   // need to transfer memory to OLD in order to efficiently support the mixed evacuations that might immediately follow.
-  size_t allocation_runway = heap->young_heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
+  size_t allocation_runway = heap->young_generation()->heuristics()->bytes_of_allocation_runway_before_gc_trigger(0);
   heap->compute_old_generation_balance(allocation_runway, 0);
 
   ShenandoahGenerationalHeap::TransferResult result;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp
index e6ca77226d2..74932b835fb 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGC.hpp
@@ -29,18 +29,18 @@
 #include "gc/shenandoah/shenandoahConcurrentGC.hpp"
 #include "gc/shenandoah/shenandoahVerifier.hpp"
 
-class ShenandoahGeneration;
+class ShenandoahOldGeneration;
 
 class ShenandoahOldGC : public ShenandoahConcurrentGC {
  public:
-  ShenandoahOldGC(ShenandoahGeneration* generation, ShenandoahSharedFlag& allow_preemption);
+  ShenandoahOldGC(ShenandoahOldGeneration* generation, ShenandoahSharedFlag& allow_preemption);
   bool collect(GCCause::Cause cause);
 
  protected:
   virtual void op_final_mark();
 
  private:
-
+  ShenandoahOldGeneration* _old_generation;
   ShenandoahSharedFlag& _allow_preemption;
 };
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
index cf28a8be45f..b76c2221655 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
@@ -40,6 +40,7 @@
 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
@@ -301,7 +302,6 @@ bool ShenandoahOldGeneration::coalesce_and_fill() {
   ShenandoahHeap* const heap = ShenandoahHeap::heap();
   transition_to(FILLING);
 
-  ShenandoahOldHeuristics* old_heuristics = heap->old_heuristics();
   WorkerThreads* workers = heap->workers();
   uint nworkers = workers->active_workers();
 
@@ -310,13 +310,13 @@ bool ShenandoahOldGeneration::coalesce_and_fill() {
   // This code will see the same set of regions to fill on each resumption as it did
   // on the initial run. That's okay because each region keeps track of its own coalesce
   // and fill state. Regions that were filled on a prior attempt will not try to fill again.
-  uint coalesce_and_fill_regions_count = old_heuristics->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
+  uint coalesce_and_fill_regions_count = heuristics()->get_coalesce_and_fill_candidates(_coalesce_and_fill_region_array);
   assert(coalesce_and_fill_regions_count <= heap->num_regions(), "Sanity");
   ShenandoahConcurrentCoalesceAndFillTask task(nworkers, _coalesce_and_fill_region_array, coalesce_and_fill_regions_count);
 
   workers->run_task(&task);
   if (task.is_completed()) {
-    old_heuristics->abandon_collection_candidates();
+    abandon_collection_candidates();
     return true;
   } else {
     // Coalesce-and-fill has been preempted. We'll finish that effort in the future.  Do not invoke
@@ -431,7 +431,7 @@ void ShenandoahOldGeneration::transition_to(State new_state) {
 //               |   |          +-----------------+     |
 //               |   |            |                     |
 //               |   |            | Filling Complete    | <-> A global collection may
-//               |   |            v                     |     may move the old generation
+//               |   |            v                     |     move the old generation
 //               |   |          +-----------------+     |     directly from waiting for
 //               |   +--------> |     WAITING     |     |     bootstrap to filling or
 //               |   |    +---- |  FOR BOOTSTRAP  | ----+     evacuating.
@@ -465,14 +465,12 @@ void ShenandoahOldGeneration::validate_transition(State new_state) {
   switch (new_state) {
     case FILLING:
       assert(_state != BOOTSTRAPPING, "Cannot beging making old regions parsable after bootstrapping");
-      assert(heap->is_old_bitmap_stable(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state));
+      assert(is_mark_complete(), "Cannot begin filling without first completing marking, state is '%s'", state_name(_state));
       assert(_old_heuristics->has_coalesce_and_fill_candidates(), "Cannot begin filling without something to fill.");
       break;
     case WAITING_FOR_BOOTSTRAP:
       // GC cancellation can send us back here from any state.
-      assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark.");
-      assert(_old_heuristics->unprocessed_old_collection_candidates() == 0, "Cannot become ready for bootstrap with collection candidates");
-      assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping.");
+      validate_waiting_for_bootstrap();
       break;
     case BOOTSTRAPPING:
       assert(_state == WAITING_FOR_BOOTSTRAP, "Cannot reset bitmap without making old regions parsable, state is '%s'", state_name(_state));
@@ -492,6 +490,17 @@ void ShenandoahOldGeneration::validate_transition(State new_state) {
       fatal("Unknown new state");
   }
 }
+
+bool ShenandoahOldGeneration::validate_waiting_for_bootstrap() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  assert(!heap->is_concurrent_old_mark_in_progress(), "Cannot become ready for bootstrap during old mark.");
+  assert(heap->young_generation()->old_gen_task_queues() == nullptr, "Cannot become ready for bootstrap when still setup for bootstrapping.");
+  assert(!is_concurrent_mark_in_progress(), "Cannot be marking in IDLE");
+  assert(!heap->young_generation()->is_bootstrap_cycle(), "Cannot have old mark queues if IDLE");
+  assert(!heuristics()->has_coalesce_and_fill_candidates(), "Cannot have coalesce and fill candidates in IDLE");
+  assert(heuristics()->unprocessed_old_collection_candidates() == 0, "Cannot have mixed collection candidates in IDLE");
+  return true;
+}
 #endif
 
 ShenandoahHeuristics* ShenandoahOldGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
@@ -570,6 +579,74 @@ void ShenandoahOldGeneration::handle_evacuation(HeapWord* obj, size_t words, boo
   }
 }
 
+bool ShenandoahOldGeneration::has_unprocessed_collection_candidates() {
+  return _old_heuristics->unprocessed_old_collection_candidates() > 0;
+}
+
+size_t ShenandoahOldGeneration::unprocessed_collection_candidates_live_memory() {
+  return _old_heuristics->unprocessed_old_collection_candidates_live_memory();
+}
+
+void ShenandoahOldGeneration::abandon_collection_candidates() {
+  _old_heuristics->abandon_collection_candidates();
+}
+
+void ShenandoahOldGeneration::prepare_for_mixed_collections_after_global_gc() {
+  assert(is_mark_complete(), "Expected old generation mark to be complete after global cycle.");
+  _old_heuristics->prepare_for_old_collections();
+  log_info(gc)("After choosing global collection set, mixed candidates: " UINT32_FORMAT ", coalescing candidates: " SIZE_FORMAT,
+               _old_heuristics->unprocessed_old_collection_candidates(),
+               _old_heuristics->coalesce_and_fill_candidates_count());
+}
+
+void ShenandoahOldGeneration::maybe_trigger_collection(size_t first_old_region, size_t last_old_region, size_t old_region_count) {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  const size_t old_region_span = (first_old_region <= last_old_region)? (last_old_region + 1 - first_old_region): 0;
+  const size_t allowed_old_gen_span = heap->num_regions() - (ShenandoahGenerationalHumongousReserve * heap->num_regions() / 100);
+
+  // Tolerate lower density if total span is small.  Here's the implementation:
+  //   if old_gen spans more than 100% and density < 75%, trigger old-defrag
+  //   else if old_gen spans more than 87.5% and density < 62.5%, trigger old-defrag
+  //   else if old_gen spans more than 75% and density < 50%, trigger old-defrag
+  //   else if old_gen spans more than 62.5% and density < 37.5%, trigger old-defrag
+  //   else if old_gen spans more than 50% and density < 25%, trigger old-defrag
+  //
+  // A previous implementation was more aggressive in triggering, resulting in degraded throughput when
+  // humongous allocation was not required.
+
+  const size_t old_available = available();
+  const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+  const size_t old_unaffiliated_available = free_unaffiliated_regions() * region_size_bytes;
+  assert(old_available >= old_unaffiliated_available, "sanity");
+  const size_t old_fragmented_available = old_available - old_unaffiliated_available;
+
+  const size_t old_bytes_consumed = old_region_count * region_size_bytes - old_fragmented_available;
+  const size_t old_bytes_spanned = old_region_span * region_size_bytes;
+  const double old_density = ((double) old_bytes_consumed) / old_bytes_spanned;
+
+  uint eighths = 8;
+  for (uint i = 0; i < 5; i++) {
+    size_t span_threshold = eighths * allowed_old_gen_span / 8;
+    double density_threshold = (eighths - 2) / 8.0;
+    if ((old_region_span >= span_threshold) && (old_density < density_threshold)) {
+      heuristics()->trigger_old_is_fragmented(old_density, first_old_region, last_old_region);
+      break;
+    }
+    eighths--;
+  }
+
+  const size_t old_used = used() + get_humongous_waste();
+  const size_t trigger_threshold = usage_trigger_threshold();
+  // Detects unsigned arithmetic underflow
+  assert(old_used <= heap->free_set()->capacity(),
+         "Old used (" SIZE_FORMAT ", " SIZE_FORMAT") must not be more than heap capacity (" SIZE_FORMAT ")",
+         used(), get_humongous_waste(), heap->free_set()->capacity());
+
+  if (old_used > trigger_threshold) {
+    heuristics()->trigger_old_has_grown();
+  }
+}
+
 void ShenandoahOldGeneration::parallel_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
   // Iterate over old and free regions (exclude young).
   ShenandoahExcludeRegionClosure<YOUNG_GENERATION> exclude_cl(cl);
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
index 68f831c2e14..e00de6ae76b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.hpp
@@ -25,12 +25,12 @@
 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP
 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHOLDGENERATION_HPP
 
+#include "gc/shenandoah/heuristics/shenandoahOldHeuristics.hpp"
 #include "gc/shenandoah/shenandoahGeneration.hpp"
 #include "gc/shenandoah/shenandoahSharedVariables.hpp"
 
 class ShenandoahHeapRegion;
 class ShenandoahHeapRegionClosure;
-class ShenandoahOldHeuristics;
 
 class ShenandoahOldGeneration : public ShenandoahGeneration {
 private:
@@ -87,6 +87,10 @@ class ShenandoahOldGeneration : public ShenandoahGeneration {
     return "OLD";
   }
 
+  ShenandoahOldHeuristics* heuristics() const override {
+    return _old_heuristics;
+  }
+
   // See description in field declaration
   void set_promoted_reserve(size_t new_val);
   size_t get_promoted_reserve() const;
@@ -152,6 +156,7 @@ class ShenandoahOldGeneration : public ShenandoahGeneration {
   bool is_concurrent_mark_in_progress() override;
 
   bool entry_coalesce_and_fill();
+  void prepare_for_mixed_collections_after_global_gc();
   void prepare_gc() override;
   void prepare_regions_and_collection_set(bool concurrent) override;
   void record_success_concurrent(bool abbreviated) override;
@@ -177,11 +182,33 @@ class ShenandoahOldGeneration : public ShenandoahGeneration {
   // the performance impact would be too severe.
   void transfer_pointers_from_satb();
 
+  // True if there are old regions waiting to be selected for a mixed collection
+  bool has_unprocessed_collection_candidates();
+
+  bool is_doing_mixed_evacuations() const {
+    return state() == EVACUATING;
+  }
+
+  bool is_preparing_for_mark() const {
+    return state() == FILLING;
+  }
+
+  // Amount of live memory (bytes) in regions waiting for mixed collections
+  size_t unprocessed_collection_candidates_live_memory();
+
+  // Abandon any regions waiting for mixed collections
+  void abandon_collection_candidates();
+
+  void maybe_trigger_collection(size_t first_old_region, size_t last_old_region, size_t old_region_count);
 public:
   enum State {
     FILLING, WAITING_FOR_BOOTSTRAP, BOOTSTRAPPING, MARKING, EVACUATING
   };
 
+#ifdef ASSERT
+  bool validate_waiting_for_bootstrap();
+#endif
+
 private:
   State _state;
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
index f643835bebe..fb807b150f8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahReferenceProcessor.cpp
@@ -30,6 +30,7 @@
 #include "gc/shenandoah/shenandoahGeneration.hpp"
 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "runtime/atomic.hpp"
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
index 9766660138a..707b22b79f6 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahRootVerifier.cpp
@@ -31,6 +31,7 @@
 #include "code/codeCache.hpp"
 #include "gc/shenandoah/shenandoahAsserts.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
+#include "gc/shenandoah/shenandoahGeneration.hpp"
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc/shenandoah/shenandoahRootVerifier.hpp"
 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
@@ -70,7 +71,7 @@ void ShenandoahRootVerifier::roots_do(OopIterateClosure* oops) {
   }
 
   ShenandoahHeap* heap = ShenandoahHeap::heap();
-  if (heap->mode()->is_generational() && heap->is_gc_generation_young()) {
+  if (heap->mode()->is_generational() && heap->active_generation()->is_young()) {
     shenandoah_assert_safepoint();
     heap->card_scan()->roots_do(oops);
   }
@@ -93,7 +94,7 @@ void ShenandoahRootVerifier::strong_roots_do(OopIterateClosure* oops) {
   }
 
   ShenandoahHeap* heap = ShenandoahHeap::heap();
-  if (heap->mode()->is_generational() && heap->is_gc_generation_young()) {
+  if (heap->mode()->is_generational() && heap->active_generation()->is_young()) {
     heap->card_scan()->roots_do(oops);
   }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
index 93f4d703dd7..636c983dec1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.cpp
@@ -25,6 +25,7 @@
 
 #include "precompiled.hpp"
 
+#include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
@@ -137,7 +138,8 @@ ShenandoahScanRememberedTask::ShenandoahScanRememberedTask(ShenandoahObjToScanQu
                                                            ShenandoahRegionChunkIterator* work_list, bool is_concurrent) :
   WorkerTask("Scan Remembered Set"),
   _queue_set(queue_set), _old_queue_set(old_queue_set), _rp(rp), _work_list(work_list), _is_concurrent(is_concurrent) {
-  log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(ShenandoahHeap::heap()->is_old_bitmap_stable()));
+  bool old_bitmap_stable = ShenandoahHeap::heap()->old_generation()->is_mark_complete();
+  log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
 }
 
 void ShenandoahScanRememberedTask::work(uint worker_id) {
@@ -445,7 +447,6 @@ void ShenandoahReconstructRememberedSetTask::work(uint worker_id) {
         HeapWord* t = r->top();
         while (obj_addr < t) {
           oop obj = cast_to_oop(obj_addr);
-          size_t size = obj->size();
           scanner->register_object_without_lock(obj_addr);
           obj_addr += obj->oop_iterate_size(&dirty_cards_for_cross_generational_pointers);
         }
@@ -454,4 +455,4 @@ void ShenandoahReconstructRememberedSetTask::work(uint worker_id) {
     // else, this region is FREE or YOUNG or inactive and we can ignore it.
     r = _regions->next();
   }
-}
\ No newline at end of file
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
index 7c7b030da0d..fbf123670fe 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
@@ -33,8 +33,10 @@
 #include "gc/shenandoah/shenandoahCardTable.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
+#include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahScanRemembered.hpp"
 #include "gc/shenandoah/mode/shenandoahMode.hpp"
+#include "logging/log.hpp"
 
 inline size_t
 ShenandoahDirectCardMarkRememberedSet::last_valid_index() const {
@@ -583,7 +585,7 @@ void ShenandoahScanRemembered<RememberedSet>::process_clusters(size_t first_clus
   // the old generation marking. These include objects allocated since the
   // start of old generation marking (being those above TAMS).
   const ShenandoahHeap* heap = ShenandoahHeap::heap();
-  const ShenandoahMarkingContext* ctx = heap->is_old_bitmap_stable() ?
+  const ShenandoahMarkingContext* ctx = heap->old_generation()->is_mark_complete() ?
                                         heap->marking_context() : nullptr;
 
   // The region we will scan is the half-open interval [start_addr, end_addr),
@@ -890,7 +892,8 @@ ShenandoahScanRemembered<RememberedSet>::addr_for_cluster(size_t cluster_no) {
 template<typename RememberedSet>
 void ShenandoahScanRemembered<RememberedSet>::roots_do(OopIterateClosure* cl) {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
-  log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(heap->is_old_bitmap_stable()));
+  bool old_bitmap_stable = heap->old_generation()->is_mark_complete();
+  log_info(gc, remset)("Scan remembered set using bitmap: %s", BOOL_TO_STR(old_bitmap_stable));
   for (size_t i = 0, n = heap->num_regions(); i < n; ++i) {
     ShenandoahHeapRegion* region = heap->get_region(i);
     if (region->is_old() && region->is_active() && !region->is_cset()) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp
index 42e99ae0026..1a143922f40 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.cpp
@@ -26,6 +26,8 @@
 #include "precompiled.hpp"
 
 #include "gc/shenandoah/mode/shenandoahMode.hpp"
+#include "gc/shenandoah/shenandoahEvacTracker.hpp"
+#include "gc/shenandoah/shenandoahGenerationalHeap.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
 
@@ -54,7 +56,7 @@ ShenandoahThreadLocalData::~ShenandoahThreadLocalData() {
     delete _gclab;
   }
   if (_plab != nullptr) {
-    ShenandoahHeap::heap()->retire_plab(_plab);
+    ShenandoahGenerationalHeap::heap()->retire_plab(_plab);
     delete _plab;
   }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
index 3ac615592c4..a715474c447 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahThreadLocalData.hpp
@@ -118,8 +118,10 @@ class ShenandoahThreadLocalData {
 
     // In theory, plabs are only need if heap->mode()->is_generational().  However, some threads
     // instantiated before we are able to answer that question.
-    data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words()));
-    data(thread)->_plab_size = 0;
+    if (ShenandoahHeap::heap()->mode()->is_generational()) {
+      data(thread)->_plab = new PLAB(align_up(PLAB::min_size(), CardTable::card_size_in_words()));
+      data(thread)->_plab_size = 0;
+    }
   }
 
   static PLAB* gclab(Thread* thread) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index c70764bde76..3bc53354b67 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -33,6 +33,7 @@
 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
+#include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
 #include "gc/shenandoah/shenandoahVerifier.hpp"
@@ -188,7 +189,7 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
           // fallthrough for fast failure for un-live regions:
         case ShenandoahVerifier::_verify_liveness_conservative:
           check(ShenandoahAsserts::_safe_oop, obj, obj_reg->has_live() ||
-                (obj_reg->is_old() && ShenandoahHeap::heap()->is_gc_generation_young()),
+                (obj_reg->is_old() && _heap->active_generation()->is_young()),
                    "Object must belong to region with live data");
           break;
         default:
@@ -1352,9 +1353,10 @@ void ShenandoahVerifier::verify_rem_set_before_mark() {
   ShenandoahVerifyRemSetClosure check_interesting_pointers(true);
   ShenandoahMarkingContext* ctx;
 
-  log_debug(gc)("Verifying remembered set at %s mark", _heap->doing_mixed_evacuations()? "mixed": "young");
+  ShenandoahOldGeneration* old_generation = _heap->old_generation();
+  log_debug(gc)("Verifying remembered set at %s mark", old_generation->is_doing_mixed_evacuations() ? "mixed" : "young");
 
-  if (_heap->is_old_bitmap_stable() || _heap->active_generation()->is_global()) {
+  if (old_generation->is_mark_complete() || _heap->active_generation()->is_global()) {
     ctx = _heap->complete_marking_context();
   } else {
     ctx = nullptr;
@@ -1446,7 +1448,7 @@ void ShenandoahVerifier::verify_rem_set_before_update_ref() {
   ShenandoahRegionIterator iterator;
   ShenandoahMarkingContext* ctx;
 
-  if (_heap->is_old_bitmap_stable() || _heap->active_generation()->is_global()) {
+  if (_heap->old_generation()->is_mark_complete() || _heap->active_generation()->is_global()) {
     ctx = _heap->complete_marking_context();
   } else {
     ctx = nullptr;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp
index 5e6bca6ff28..53840a46448 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.cpp
@@ -25,11 +25,8 @@
 
 #include "gc/shenandoah/shenandoahFreeSet.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
-#include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
-#include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahUtils.hpp"
-#include "gc/shenandoah/shenandoahVerifier.hpp"
 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
 #include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
 
@@ -66,6 +63,11 @@ void ShenandoahYoungGeneration::heap_region_iterate(ShenandoahHeapRegionClosure*
   ShenandoahHeap::heap()->heap_region_iterate(&young_regions_cl);
 }
 
+void ShenandoahYoungGeneration::parallel_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
+  ShenandoahExcludeRegionClosure<OLD_GENERATION> exclude_cl(cl);
+  ShenandoahHeap::heap()->parallel_heap_region_iterate(&exclude_cl);
+}
+
 bool ShenandoahYoungGeneration::is_concurrent_mark_in_progress() {
   return ShenandoahHeap::heap()->is_concurrent_young_mark_in_progress();
 }
@@ -82,7 +84,8 @@ bool ShenandoahYoungGeneration::contains(oop obj) const {
 }
 
 ShenandoahHeuristics* ShenandoahYoungGeneration::initialize_heuristics(ShenandoahMode* gc_mode) {
-  _heuristics = new ShenandoahYoungHeuristics(this);
+  _young_heuristics = new ShenandoahYoungHeuristics(this);
+  _heuristics = _young_heuristics;
   _heuristics->set_guaranteed_gc_interval(ShenandoahGuaranteedYoungGCInterval);
   confirm_heuristics_mode();
   return _heuristics;
@@ -100,7 +103,3 @@ size_t ShenandoahYoungGeneration::soft_available() const {
   return MIN2(available, ShenandoahHeap::heap()->free_set()->available());
 }
 
-void ShenandoahYoungGeneration::parallel_region_iterate_free(ShenandoahHeapRegionClosure* cl) {
-  ShenandoahExcludeRegionClosure<OLD_GENERATION> exclude_cl(cl);
-  ShenandoahHeap::heap()->parallel_heap_region_iterate(&exclude_cl);
-}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp
index 23bdd0f005b..6fdff17e5db 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahYoungGeneration.hpp
@@ -26,10 +26,12 @@
 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHYOUNGGENERATION_HPP
 
 #include "gc/shenandoah/shenandoahGeneration.hpp"
+#include "gc/shenandoah/heuristics/shenandoahYoungHeuristics.hpp"
 
 class ShenandoahYoungGeneration : public ShenandoahGeneration {
 private:
   ShenandoahObjToScanQueueSet* _old_gen_task_queues;
+  ShenandoahYoungHeuristics* _young_heuristics;
 
 public:
   ShenandoahYoungGeneration(uint max_queues, size_t max_capacity, size_t max_soft_capacity);
@@ -40,6 +42,10 @@ class ShenandoahYoungGeneration : public ShenandoahGeneration {
     return "YOUNG";
   }
 
+  ShenandoahYoungHeuristics* heuristics() const override {
+    return _young_heuristics;
+  }
+
   void set_concurrent_mark_in_progress(bool in_progress) override;
   bool is_concurrent_mark_in_progress() override;
 
diff --git a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp
index 65c95232d0f..7bcbbf909d9 100644
--- a/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp
+++ b/test/hotspot/gtest/gc/shenandoah/test_shenandoahOldHeuristic.cpp
@@ -80,7 +80,7 @@ class ShenandoahOldHeuristicTest : public ::testing::Test {
       _collection_set(nullptr) {
     SKIP_IF_NOT_SHENANDOAH();
     _heap = ShenandoahHeap::heap();
-    _heuristics = _heap->old_heuristics();
+    _heuristics = _heap->old_generation()->heuristics();
     _collection_set = _heap->collection_set();
     ShenandoahHeapLocker locker(_heap->lock());
     ShenandoahResetRegions reset;