diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index a9bb5f9434c..f9fd188a907 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -977,7 +977,7 @@ class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure {
     // Make empty regions that have been allocated into regular
     if (r->is_empty() && live > 0) {
       if (!_is_generational) {
-        r->make_young_maybe();
+        r->make_affiliated_maybe();
       }
       // else, generational mode compaction has already established affiliation.
       r->make_regular_bypass();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
index 382d97a4afe..84eb0ba8861 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
@@ -872,7 +872,7 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
         if (is_mixed) {
           if (r->is_humongous()) {
             // Need to examine both dirty and clean cards during mixed evac.
-            r->oop_iterate_humongous_slice(&cl, false, start_of_range, assignment._chunk_size, true);
+            r->oop_iterate_humongous_slice_all(&cl,start_of_range, assignment._chunk_size);
           } else {
             // Since this is mixed evacuation, old regions that are candidates for collection have not been coalesced
             // and filled.  This will use mark bits to find objects that need to be updated.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index 9745abdb42a..0aac650f71e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -118,8 +118,9 @@ void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affilia
 }
 
 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
-// behavior previously performed as a side effect of make_regular_bypass().  This is used by Full GC
-void ShenandoahHeapRegion::make_young_maybe() {
+// behavior previously performed as a side effect of make_regular_bypass().  This is used by Full GC in non-generational
+// modes to transition regions from FREE. Note that all non-free regions in single-generational modes are young.
+void ShenandoahHeapRegion::make_affiliated_maybe() {
   shenandoah_assert_heaplocked();
   assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational");
   switch (_state) {
@@ -137,7 +138,7 @@ void ShenandoahHeapRegion::make_young_maybe() {
    case _pinned:
      return;
    default:
-     assert(false, "Unexpected _state in make_young_maybe");
+     assert(false, "Unexpected _state in make_affiliated_maybe");
   }
 }
 
@@ -453,9 +454,6 @@ void ShenandoahHeapRegion::print_on(outputStream* st) const {
 // oop_iterate without closure, return true if completed without cancellation
 bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
 
-  // Consider yielding to cancel/preemption request after this many coalesce operations (skip marked, or coalesce free).
-  const size_t preemption_stride = 128;
-
   assert(!is_humongous(), "No need to fill or coalesce humongous regions");
   if (!is_active()) {
     end_preemptible_coalesce_and_fill();
@@ -479,7 +477,6 @@ bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
   // Resume coalesce and fill from this address
   HeapWord* obj_addr = resume_coalesce_and_fill();
 
-  size_t ops_before_preempt_check = preemption_stride;
   while (obj_addr < t) {
     oop obj = cast_to_oop(obj_addr);
     if (marking_context->is_marked(obj)) {
@@ -495,12 +492,9 @@ bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
       heap->old_generation()->card_scan()->coalesce_objects(obj_addr, fill_size);
       obj_addr = next_marked_obj;
     }
-    if (cancellable && ops_before_preempt_check-- == 0) {
-      if (heap->cancelled_gc()) {
-        suspend_coalesce_and_fill(obj_addr);
-        return false;
-      }
-      ops_before_preempt_check = preemption_stride;
+    if (cancellable && heap->cancelled_gc()) {
+      suspend_coalesce_and_fill(obj_addr);
+      return false;
     }
   }
   // Mark that this region has been coalesced and filled
@@ -508,47 +502,51 @@ bool ShenandoahHeapRegion::oop_coalesce_and_fill(bool cancellable) {
   return true;
 }
 
-// DO NOT CANCEL.  If this worker thread has accepted responsibility for scanning a particular range of addresses, it
-// must finish the work before it can be cancelled.
-void ShenandoahHeapRegion::oop_iterate_humongous_slice(OopIterateClosure* blk, bool dirty_only,
-                                                       HeapWord* start, size_t words, bool write_table) {
+size_t get_card_count(size_t words) {
   assert(words % CardTable::card_size_in_words() == 0, "Humongous iteration must span whole number of cards");
-  assert(is_humongous(), "only humongous region here");
-  ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
-
-  // Find head.
-  ShenandoahHeapRegion* r = humongous_start_region();
-  assert(r->is_humongous_start(), "need humongous head here");
   assert(CardTable::card_size_in_words() * (words / CardTable::card_size_in_words()) == words,
          "slice must be integral number of cards");
+  return words / CardTable::card_size_in_words();
+}
+
+void ShenandoahHeapRegion::oop_iterate_humongous_slice_dirty(OopIterateClosure* blk,
+                                                             HeapWord* start, size_t words, bool write_table) const {
+  assert(is_humongous(), "only humongous region here");
 
+  ShenandoahHeapRegion* r = humongous_start_region();
   oop obj = cast_to_oop(r->bottom());
+  size_t num_cards = get_card_count(words);
+
+  ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
   ShenandoahScanRemembered* scanner = heap->old_generation()->card_scan();
   size_t card_index = scanner->card_index_for_addr(start);
-  size_t num_cards = words / CardTable::card_size_in_words();
-
-  if (dirty_only) {
-    if (write_table) {
-      while (num_cards-- > 0) {
-        if (scanner->is_write_card_dirty(card_index++)) {
-          obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
-        }
-        start += CardTable::card_size_in_words();
-      }
-    } else {
-      while (num_cards-- > 0) {
-        if (scanner->is_card_dirty(card_index++)) {
-          obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
-        }
-        start += CardTable::card_size_in_words();
+  if (write_table) {
+    while (num_cards-- > 0) {
+      if (scanner->is_write_card_dirty(card_index++)) {
+        obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
       }
+      start += CardTable::card_size_in_words();
     }
   } else {
-    // Scan all data, regardless of whether cards are dirty
-    obj->oop_iterate(blk, MemRegion(start, start + num_cards * CardTable::card_size_in_words()));
+    while (num_cards-- > 0) {
+      if (scanner->is_card_dirty(card_index++)) {
+        obj->oop_iterate(blk, MemRegion(start, start + CardTable::card_size_in_words()));
+      }
+      start += CardTable::card_size_in_words();
+    }
   }
 }
 
+void ShenandoahHeapRegion::oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const {
+  assert(is_humongous(), "only humongous region here");
+
+  ShenandoahHeapRegion* r = humongous_start_region();
+  oop obj = cast_to_oop(r->bottom());
+
+  // Scan all data, regardless of whether cards are dirty
+  obj->oop_iterate(cl, MemRegion(start, start + words));
+}
+
 ShenandoahHeapRegion* ShenandoahHeapRegion::humongous_start_region() const {
   ShenandoahHeap* heap = ShenandoahHeap::heap();
   assert(is_humongous(), "Must be a part of the humongous region");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
index ffc207ef4c7..2be4370f7f6 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp
@@ -173,7 +173,7 @@ class ShenandoahHeapRegion {
 
   // Allowed transitions from the outside code:
   void make_regular_allocation(ShenandoahAffiliation affiliation);
-  void make_young_maybe();
+  void make_affiliated_maybe();
   void make_regular_bypass();
   void make_humongous_start();
   void make_humongous_cont();
@@ -400,15 +400,20 @@ class ShenandoahHeapRegion {
     return _coalesce_and_fill_boundary;
   }
 
-  // Coalesce contiguous spans of garbage objects by filling header and reregistering start locations with remembered set.
-  // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable.  Return true iff
-  // region is completely coalesced and filled.  Returns false if cancelled before task is complete.
+  // Coalesce contiguous spans of garbage objects by filling header and registering start locations with remembered set.
+  // This is used by old-gen GC following concurrent marking to make old-gen HeapRegions parsable. Old regions must be
+  // parsable because the mark bitmap is not reliable during the concurrent old mark.
+  // Return true iff region is completely coalesced and filled.  Returns false if cancelled before task is complete.
   bool oop_coalesce_and_fill(bool cancellable);
 
   // Invoke closure on every reference contained within the humongous object that spans this humongous
   // region if the reference is contained within a DIRTY card and the reference is no more than words following
   // start within the humongous object.
-  void oop_iterate_humongous_slice(OopIterateClosure* cl, bool dirty_only, HeapWord* start, size_t words, bool write_table);
+  void oop_iterate_humongous_slice_dirty(OopIterateClosure* cl, HeapWord* start, size_t words, bool write_table) const;
+
+  // Invoke closure on every reference contained within the humongous object starting from start and
+  // ending at start + words.
+  void oop_iterate_humongous_slice_all(OopIterateClosure* cl, HeapWord* start, size_t words) const;
 
   HeapWord* block_start(const void* p) const;
   size_t block_size(const HeapWord* p) const;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index 17464a76d28..bda2a7efc4b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -38,7 +38,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq
   assert(req.is_lab_alloc(), "allocate_aligned() only applies to LAB allocations");
   assert(is_object_aligned(size), "alloc size breaks alignment: " SIZE_FORMAT, size);
   assert(is_old(), "aligned allocations are only taken from OLD regions to support PLABs");
-  assert(is_aligned(alignment_in_bytes, HeapWordSize), "Expect hea word alignment");
+  assert(is_aligned(alignment_in_bytes, HeapWordSize), "Expect heap word alignment");
 
   HeapWord* orig_top = top();
   size_t alignment_in_words = alignment_in_bytes / HeapWordSize;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
index 6b73d80b15b..e4491317586 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahScanRemembered.inline.hpp
@@ -308,7 +308,7 @@ ShenandoahScanRemembered::process_humongous_clusters(ShenandoahHeapRegion* r, si
   size_t first_card_index = first_cluster * ShenandoahCardCluster::CardsPerCluster;
   HeapWord* first_cluster_addr = _rs->addr_for_card_index(first_card_index);
   size_t spanned_words = count * ShenandoahCardCluster::CardsPerCluster * CardTable::card_size_in_words();
-  start_region->oop_iterate_humongous_slice(cl, true, first_cluster_addr, spanned_words, use_write_table);
+  start_region->oop_iterate_humongous_slice_dirty(cl, first_cluster_addr, spanned_words, use_write_table);
 }