diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
index a58c49399d8..f171eef0969 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
@@ -74,7 +74,6 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
   __ load(gc_state_addr, flag_val);
 
   // Create a mask to test if the marking bit is set.
-  // TODO: can we directly test if bit is set?
   LIR_Opr mask = LIR_OprFact::intConst(ShenandoahHeap::MARKING);
   LIR_Opr mask_reg = gen->new_register(T_INT);
   __ move(mask, mask_reg);
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
index f6be8ef2284..f49cdfcd687 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
@@ -111,10 +111,6 @@ class ShenandoahHeuristics : public CHeapObj<mtGC> {
 
   static int compare_by_garbage(RegionData a, RegionData b);
 
-  // TODO: We need to enhance this API to give visibility to accompanying old-gen evacuation effort.
-  // In the case that the old-gen evacuation effort is small or zero, the young-gen heuristics
-  // should feel free to dedicate increased efforts to young-gen evacuation.
-
   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
                                                      RegionData* data, size_t data_size,
                                                      size_t free) = 0;
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
index b3f076c8af7..be12b92fa80 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp
@@ -367,10 +367,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
 
   _old_generation->set_live_bytes_after_last_mark(live_data);
 
-  // TODO: Consider not running mixed collects if we recovered some threshold percentage of memory from immediate garbage.
-  // This would be similar to young and global collections shortcutting evacuation, though we'd probably want a separate
-  // threshold for the old generation.
-
   // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first.  We sort by live-data.
   // Some regular regions may have been promoted in place with no garbage but also with very little live data.  When we "compact"
   // old-gen, we want to pack these underutilized regions together so we can have more unaffiliated (unfragmented) free regions
@@ -378,11 +374,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
 
   QuickSort::sort<RegionData>(candidates, cand_idx, compare_by_live);
 
-  // Any old-gen region that contains (ShenandoahOldGarbageThreshold (default value 25)% garbage or more is to be
-  // added to the list of candidates for subsequent mixed evacuations.
-  //
-  // TODO: allow ShenandoahOldGarbageThreshold to be determined adaptively, by heuristics.
-
   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 
   // The convention is to collect regions that have more than this amount of garbage.
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
index ac96e40b54c..73e6087bc34 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahYoungHeuristics.cpp
@@ -209,15 +209,6 @@ size_t ShenandoahYoungHeuristics::bytes_of_allocation_runway_before_gc_trigger(s
   // but evac_slack_spiking is only relevant if is_spiking, as defined below.
 
   double avg_cycle_time = _gc_cycle_time_history->davg() + (_margin_of_error_sd * _gc_cycle_time_history->dsd());
-
-  // TODO: Consider making conservative adjustments to avg_cycle_time, such as: (avg_cycle_time *= 2) in cases where
-  // we expect a longer-than-normal GC duration.  This includes mixed evacuations, evacuation that perform promotion
-  // including promotion in place, and OLD GC bootstrap cycles.  It has been observed that these cycles sometimes
-  // require twice or more the duration of "normal" GC cycles.  We have experimented with this approach.  While it
-  // does appear to reduce the frequency of degenerated cycles due to late triggers, it also has the effect of reducing
-  // evacuation slack so that there is less memory available to be transferred to OLD.  The result is that we
-  // throttle promotion and it takes too long to move old objects out of the young generation.
-
   double avg_alloc_rate = _allocation_rate.upper_bound(_margin_of_error_sd);
   size_t evac_slack_avg;
   if (anticipated_available > avg_cycle_time * avg_alloc_rate + penalties + spike_headroom) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
index f4f1a1817d7..5a2e7a1517d 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.cpp
@@ -440,23 +440,6 @@ void ShenandoahAsserts::assert_heaplocked_or_safepoint(const char* file, int lin
     return;
   }
 
-  if (ShenandoahSafepoint::is_at_shenandoah_safepoint() && Thread::current()->is_VM_thread()) {
-    return;
-  }
-
-  ShenandoahMessageBuffer msg("Heap lock must be owned by current thread, or be at safepoint");
-  report_vm_error(file, line, msg.buffer());
-}
-
-// Unlike assert_heaplocked_or_safepoint(), this does not require current thread in safepoint to be a VM thread
-// TODO: This should be more aptly named. Nothing in this method checks we are actually in Full GC.
-void ShenandoahAsserts::assert_heaplocked_or_fullgc_safepoint(const char* file, int line) {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-
-  if (heap->lock()->owned_by_self()) {
-    return;
-  }
-
   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
     return;
   }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
index c8b1b11b474..63a49de3810 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahAsserts.hpp
@@ -73,7 +73,6 @@ class ShenandoahAsserts {
   static void assert_heaplocked(const char* file, int line);
   static void assert_not_heaplocked(const char* file, int line);
   static void assert_heaplocked_or_safepoint(const char* file, int line);
-  static void assert_heaplocked_or_fullgc_safepoint(const char* file, int line);
 
 #ifdef ASSERT
 #define shenandoah_assert_in_heap(interior_loc, obj) \
@@ -166,8 +165,6 @@ class ShenandoahAsserts {
 #define shenandoah_assert_heaplocked_or_safepoint() \
                     ShenandoahAsserts::assert_heaplocked_or_safepoint(__FILE__, __LINE__)
 
-#define shenandoah_assert_heaplocked_or_fullgc_safepoint() \
-                    ShenandoahAsserts::assert_heaplocked_or_fullgc_safepoint(__FILE__, __LINE__)
 #define shenandoah_assert_control_or_vm_thread() \
                     assert(Thread::current()->is_VM_thread() || Thread::current() == ShenandoahHeap::heap()->control_thread(), "Expected control thread or vm thread")
 // A stronger version of the above that checks that we are at a safepoint if the vm thread
@@ -238,7 +235,6 @@ class ShenandoahAsserts {
 #define shenandoah_assert_heaplocked()
 #define shenandoah_assert_not_heaplocked()
 #define shenandoah_assert_heaplocked_or_safepoint()
-#define shenandoah_assert_heaplocked_or_fullgc_safepoint()
 #define shenandoah_assert_control_or_vm_thread()
 #define shenandoah_assert_control_or_vm_thread_at_safepoint()
 #define shenandoah_assert_generational()
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
index 52863721a21..a41e8ffdcd9 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.inline.hpp
@@ -426,7 +426,6 @@ void ShenandoahBarrierSet::arraycopy_barrier(T* src, T* dst, size_t count) {
 
   if (_heap->mode()->is_generational()) {
     assert(ShenandoahSATBBarrier, "Generational mode assumes SATB mode");
-    // TODO: Could we optimize here by checking that dst is in an old region?
     if ((gc_state & ShenandoahHeap::OLD_MARKING) != 0) {
       // Note that we can't do the arraycopy marking using the 'src' array when
       // SATB mode is enabled (so we can't do this as part of the iteration for
@@ -479,14 +478,6 @@ void ShenandoahBarrierSet::arraycopy_marking(T* src, T* dst, size_t count, bool
       // Non-generational, marking
       arraycopy_work<T, false, false, true>(array, count);
     }
-  } else {
-    // Incremental Update mode, marking
-    T* array = src;
-    HeapWord* array_addr = reinterpret_cast<HeapWord*>(array);
-    ShenandoahHeapRegion* r = _heap->heap_region_containing(array_addr);
-    if (array_addr < _heap->marking_context()->top_at_mark_start(r)) {
-      arraycopy_work<T, false, false, true>(array, count);
-    }
   }
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
index 9949a8dd3ad..e4ff32a71d3 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahConcurrentGC.cpp
@@ -633,10 +633,6 @@ void ShenandoahConcurrentGC::op_init_mark() {
   if (_do_old_gc_bootstrap) {
     shenandoah_assert_generational();
     // Update region state for both young and old regions
-    // TODO: We should be able to pull this out of the safepoint for the bootstrap
-    // cycle. The top of an old region will only move when a GC cycle evacuates
-    // objects into it. When we start an old cycle, we know that nothing can touch
-    // the top of old regions.
     ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_region_states);
     ShenandoahInitMarkUpdateRegionStateClosure cl;
     heap->parallel_heap_region_iterate(&cl);
@@ -867,11 +863,9 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::do_oop(oop* p) {
     if (!_mark_context->is_marked(obj)) {
       shenandoah_assert_generations_reconciled();
       if (_heap->is_in_active_generation(obj)) {
-        // TODO: This worries me. Here we are asserting that an unmarked from-space object is 'correct'.
-        // Normally, I would call this a bogus assert, but there seems to be a legitimate use-case for
-        // accessing from-space objects during class unloading. However, the from-space object may have
-        // been "filled". We've made no effort to prevent old generation classes being unloaded by young
-        // gen (and vice-versa).
+        // Here we are asserting that an unmarked from-space object is 'correct'. There seems to be a legitimate
+        // use-case for accessing from-space objects during concurrent class unloading. In all modes of Shenandoah,
+        // concurrent class unloading only happens during a global collection.
         shenandoah_assert_correct(p, obj);
         ShenandoahHeap::atomic_clear_oop(p, obj);
       }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 63cd1cc7873..2025c87b91b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -43,6 +43,7 @@
 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
 #include "gc/shenandoah/shenandoahMark.inline.hpp"
 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
+#include "gc/shenandoah/shenandoahHeapRegionClosures.hpp"
 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
@@ -181,7 +182,6 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
 
     // b. Cancel all concurrent marks, if in progress
     if (heap->is_concurrent_mark_in_progress()) {
-      // TODO: Send cancel_concurrent_mark upstream? Does it really not have it already?
       heap->cancel_concurrent_mark();
     }
     assert(!heap->is_concurrent_mark_in_progress(), "sanity");
@@ -214,7 +214,7 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
   }
 
   if (UseTLAB) {
-    // TODO: Do we need to explicitly retire PLABs?
+    // Note: PLABs are also retired with GCLABs in generational mode.
     heap->gclabs_retire(ResizeTLAB);
     heap->tlabs_retire(ResizeTLAB);
   }
@@ -289,15 +289,12 @@ class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure {
 public:
   ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
 
-  void heap_region_do(ShenandoahHeapRegion *r) {
-    // TODO: Add API to heap to skip free regions
-    if (r->is_affiliated()) {
-      _ctx->capture_top_at_mark_start(r);
-      r->clear_live_data();
-    }
+  void heap_region_do(ShenandoahHeapRegion *r) override {
+    _ctx->capture_top_at_mark_start(r);
+    r->clear_live_data();
   }
 
-  bool is_thread_safe() { return true; }
+  bool is_thread_safe() override { return true; }
 };
 
 void ShenandoahFullGC::phase1_mark_heap() {
@@ -306,7 +303,8 @@ void ShenandoahFullGC::phase1_mark_heap() {
 
   ShenandoahHeap* heap = ShenandoahHeap::heap();
 
-  ShenandoahPrepareForMarkClosure cl;
+  ShenandoahPrepareForMarkClosure prepare_for_mark;
+  ShenandoahExcludeRegionClosure<FREE> cl(&prepare_for_mark);
   heap->parallel_heap_region_iterate(&cl);
 
   heap->set_unload_classes(heap->global_generation()->heuristics()->can_unload_classes());
@@ -570,27 +568,18 @@ class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure
     _heap(ShenandoahHeap::heap()),
     _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
 
-  void heap_region_do(ShenandoahHeapRegion* r) {
-    if (!r->is_affiliated()) {
-      // Ignore free regions
-      // TODO: change iterators so they do not process FREE regions.
-      return;
-    }
-
+  void heap_region_do(ShenandoahHeapRegion* r) override {
     if (r->is_humongous_start()) {
       oop humongous_obj = cast_to_oop(r->bottom());
       if (!_ctx->is_marked(humongous_obj)) {
-        assert(!r->has_live(),
-               "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
+        assert(!r->has_live(), "Region " SIZE_FORMAT " is not marked, should not have live", r->index());
         _heap->trash_humongous_region_at(r);
       } else {
-        assert(r->has_live(),
-               "Region " SIZE_FORMAT " should have live", r->index());
+        assert(r->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
       }
     } else if (r->is_humongous_continuation()) {
       // If we hit continuation, the non-live humongous starts should have been trashed already
-      assert(r->humongous_start_region()->has_live(),
-             "Region " SIZE_FORMAT " should have live", r->index());
+      assert(r->humongous_start_region()->has_live(), "Region " SIZE_FORMAT " should have live", r->index());
     } else if (r->is_regular()) {
       if (!r->has_live()) {
         r->make_trash_immediate();
@@ -758,8 +747,9 @@ void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet
 
   {
     // Trash the immediately collectible regions before computing addresses
-    ShenandoahTrashImmediateGarbageClosure tigcl;
-    heap->heap_region_iterate(&tigcl);
+    ShenandoahTrashImmediateGarbageClosure trash_immediate_garbage;
+    ShenandoahExcludeRegionClosure<FREE> cl(&trash_immediate_garbage);
+    heap->heap_region_iterate(&cl);
 
     // Make sure regions are in good state: committed, active, clean.
     // This is needed because we are potentially sliding the data through them.
@@ -773,8 +763,6 @@ void ShenandoahFullGC::phase2_calculate_target_addresses(ShenandoahHeapRegionSet
 
     distribute_slices(worker_slices);
 
-    // TODO: This is ResourceMark is missing upstream.
-    ResourceMark rm;
     ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices);
     heap->workers()->run_task(&task);
   }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
index 3956b20412a..4b4662a7702 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
@@ -857,7 +857,7 @@ void ShenandoahGeneration::scan_remembered_set(bool is_concurrent) {
 }
 
 size_t ShenandoahGeneration::increment_affiliated_region_count() {
-  shenandoah_assert_heaplocked_or_fullgc_safepoint();
+  shenandoah_assert_heaplocked_or_safepoint();
   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
   // a coherent value.
@@ -866,31 +866,29 @@ size_t ShenandoahGeneration::increment_affiliated_region_count() {
 }
 
 size_t ShenandoahGeneration::decrement_affiliated_region_count() {
-  shenandoah_assert_heaplocked_or_fullgc_safepoint();
+  shenandoah_assert_heaplocked_or_safepoint();
   // During full gc, multiple GC worker threads may change region affiliations without a lock.  No lock is enforced
   // on read and write of _affiliated_region_count.  At the end of full gc, a single thread overwrites the count with
   // a coherent value.
   _affiliated_region_count--;
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
          "used + humongous cannot exceed regions");
   return _affiliated_region_count;
 }
 
 size_t ShenandoahGeneration::increase_affiliated_region_count(size_t delta) {
-  shenandoah_assert_heaplocked_or_fullgc_safepoint();
+  shenandoah_assert_heaplocked_or_safepoint();
   _affiliated_region_count += delta;
   return _affiliated_region_count;
 }
 
 size_t ShenandoahGeneration::decrease_affiliated_region_count(size_t delta) {
-  shenandoah_assert_heaplocked_or_fullgc_safepoint();
+  shenandoah_assert_heaplocked_or_safepoint();
   assert(_affiliated_region_count >= delta, "Affiliated region count cannot be negative");
 
   _affiliated_region_count -= delta;
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_used + _humongous_waste <= _affiliated_region_count * ShenandoahHeapRegion::region_size_bytes()),
          "used + humongous cannot exceed regions");
   return _affiliated_region_count;
@@ -922,8 +920,7 @@ void ShenandoahGeneration::decrease_humongous_waste(size_t bytes) {
 }
 
 void ShenandoahGeneration::decrease_used(size_t bytes) {
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_used >= bytes), "cannot reduce bytes used by generation below zero");
   Atomic::sub(&_used, bytes);
 }
@@ -970,15 +967,13 @@ size_t ShenandoahGeneration::increase_capacity(size_t increment) {
   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
   // which may be violated during certain transitions, such as when we are forcing transfers for the purpose of promoting regions
   // in place.
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_max_capacity + increment <= ShenandoahHeap::heap()->max_capacity()), "Generation cannot be larger than heap size");
   assert(increment % ShenandoahHeapRegion::region_size_bytes() == 0, "Generation capacity must be multiple of region size");
   _max_capacity += increment;
 
   // This detects arithmetic wraparound on _used
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
          "Affiliated regions must hold more than what is currently used");
   return _max_capacity;
@@ -1002,15 +997,12 @@ size_t ShenandoahGeneration::decrease_capacity(size_t decrement) {
   _max_capacity -= decrement;
 
   // This detects arithmetic wraparound on _used
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
          "Affiliated regions must hold more than what is currently used");
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_used <= _max_capacity), "Cannot use more than capacity");
-  // TODO: REMOVE IS_GLOBAL() QUALIFIER AFTER WE FIX GLOBAL AFFILIATED REGION ACCOUNTING
-  assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
+  assert(ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity),
          "Cannot use more than capacity");
   return _max_capacity;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index 688627f98a1..ada5ac6b0a1 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -1260,7 +1260,6 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
       if ((copy == nullptr) && (size < ShenandoahThreadLocalData::gclab_size(thread))) {
         // GCLAB allocation failed because we are bumping up against the limit on young evacuation reserve.  Try resetting
         // the desired GCLAB size and retry GCLAB allocation to avoid cascading of shared memory allocations.
-        // TODO: is this right? using PLAB::min_size() here for gc lab size?
         ShenandoahThreadLocalData::set_gclab_size(thread, PLAB::min_size());
         copy = allocate_from_gclab(thread, size);
         // If we still get nullptr, we'll try a shared allocation below.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
index 71deede2a6e..dff0809769e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp
@@ -424,7 +424,7 @@ inline void ShenandoahHeap::assert_lock_for_affiliation(ShenandoahAffiliation or
   // Note: during full GC, all transitions between states are possible.  During Full GC, we should be in a safepoint.
 
   if ((orig_affiliation == ShenandoahAffiliation::FREE) || (new_affiliation == ShenandoahAffiliation::FREE)) {
-    shenandoah_assert_heaplocked_or_fullgc_safepoint();
+    shenandoah_assert_heaplocked_or_safepoint();
   }
 }