diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
index 8d4e47e72c9..23f2e518950 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp
@@ -34,452 +34,670 @@
 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
 #include "gc/shenandoah/shenandoahScanRemembered.inline.hpp"
 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
+#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
+#include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/orderAccess.hpp"
 
-ShenandoahSetsOfFree::ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set) :
+static const char* partition_name(ShenandoahFreeSetPartitionId t) {
+  switch (t) {
+    case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
+    case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
+    case ShenandoahFreeSetPartitionId::Collector: return "Collector";
+    case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
+    default:
+      ShouldNotReachHere();
+      return "Unrecognized";
+  }
+}
+
+#ifndef PRODUCT
+void ShenandoahRegionPartitions::dump_bitmap() const {
+  log_info(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
+               "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+               _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
+               _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
+               _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
+               _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
+               _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
+               _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
+  log_info(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
+               "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
+               "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+               _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
+               _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
+               _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+               _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+               _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+               _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
+
+  log_info(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
+  dump_bitmap_range(0, _max-1);
+}
+
+void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
+  assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
+  assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
+  idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
+  idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
+  idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
+  while (aligned_start <= aligned_end) {
+    dump_bitmap_row(aligned_start);
+    aligned_start += alignment;
+  }
+}
+
+void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
+  assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
+  idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
+  uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
+  uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
+  uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
+  uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
+  uintx notfree_bits =  ~free_bits;
+  log_info(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
+               aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
+}
+#endif
+
+ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
     _max(max_regions),
+    _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
     _free_set(free_set),
-    _region_size_bytes(ShenandoahHeapRegion::region_size_bytes())
+    _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
 {
-  _membership = NEW_C_HEAP_ARRAY(ShenandoahFreeMemoryType, max_regions, mtGC);
-  clear_internal();
+  make_all_regions_unavailable();
 }
 
-ShenandoahSetsOfFree::~ShenandoahSetsOfFree() {
-  FREE_C_HEAP_ARRAY(ShenandoahFreeMemoryType, _membership);
+inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
+  return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 }
 
+inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
+  ShenandoahHeapRegion* r = _heap->get_region(idx);
+  return can_allocate_from(r);
+}
 
-void ShenandoahSetsOfFree::clear_internal() {
-  for (size_t idx = 0; idx < _max; idx++) {
-    _membership[idx] = NotFree;
+inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
+  if (r->is_trash()) {
+    // This would be recycled on allocation path
+    return ShenandoahHeapRegion::region_size_bytes();
+  } else {
+    return r->free();
   }
+}
+
+inline size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
+  ShenandoahHeapRegion* r = _heap->get_region(idx);
+  return alloc_capacity(r);
+}
+
+inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
+  return alloc_capacity(r) > 0;
+}
 
-  for (size_t idx = 0; idx < NumFreeSets; idx++) {
-    _leftmosts[idx] = _max;
-    _rightmosts[idx] = 0;
-    _leftmosts_empty[idx] = _max;
-    _rightmosts_empty[idx] = 0;
-    _capacity_of[idx] = 0;
-    _used_by[idx] = 0;
+inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const {
+  assert (which_partition < NumPartitions, "selected free partition must be valid");
+  idx_t idx = _leftmosts[int(which_partition)];
+  if (idx >= _max) {
+    return _max;
+  } else {
+    // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
+    // to query the original value of leftmost when leftmost must be adjusted because the interval representing
+    // which_partition is shrinking after the region that used to be leftmost is retired.
+    return idx;
   }
+}
 
-  _left_to_right_bias[Mutator] = true;
-  _left_to_right_bias[Collector] = false;
-  _left_to_right_bias[OldCollector] = false;
+inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const {
+  assert (which_partition < NumPartitions, "selected free partition must be valid");
+  idx_t idx = _rightmosts[int(which_partition)];
+  // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
+  // to query the original value of leftmost when leftmost must be adjusted because the interval representing
+  // which_partition is shrinking after the region that used to be leftmost is retired.
+  return idx;
+}
 
-  _region_counts[Mutator] = 0;
-  _region_counts[Collector] = 0;
-  _region_counts[OldCollector] = 0;
-  _region_counts[NotFree] = _max;
+void ShenandoahRegionPartitions::make_all_regions_unavailable() {
+  for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
+    _membership[partition_id].clear_all();
+    _leftmosts[partition_id] = _max;
+    _rightmosts[partition_id] = -1;
+    _leftmosts_empty[partition_id] = _max;
+    _rightmosts_empty[partition_id] = -1;;
+    _capacity[partition_id] = 0;
+    _used[partition_id] = 0;
+  }
+  _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 }
 
-void ShenandoahSetsOfFree::clear_all() {
-  clear_internal();
+void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
+                                                             idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
+                                                             size_t mutator_region_count, size_t mutator_used) {
+  _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
+  _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
+  _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
+  _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
+
+  _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
+  _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
+  _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
+
+  _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
+  _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
+  _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
+  _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
+
+  _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
+  _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
+  _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 }
 
-void ShenandoahSetsOfFree::increase_used(ShenandoahFreeMemoryType which_set, size_t bytes) {
-  assert (which_set > NotFree && which_set < NumFreeSets, "Set must correspond to a valid freeset");
-  _used_by[which_set] += bytes;
-  assert (_used_by[which_set] <= _capacity_of[which_set],
+void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
+                                                                   idx_t old_collector_leftmost_empty,
+                                                                   idx_t old_collector_rightmost_empty,
+                                                                   size_t old_collector_region_count, size_t old_collector_used) {
+  _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
+  _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
+  _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
+  _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
+
+  _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
+  _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
+  _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
+}
+
+void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
+  assert (which_partition < NumPartitions, "Partition must be valid");
+  _used[int(which_partition)] += bytes;
+  assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
-          _used_by[which_set], _capacity_of[which_set], bytes);
+          _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 }
 
-inline void ShenandoahSetsOfFree::shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx) {
-  if (idx == _leftmosts[set]) {
-    while ((_leftmosts[set] < _max) && !in_free_set(_leftmosts[set], set)) {
-      _leftmosts[set]++;
+inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
+  ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
+  assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
+  if (low_idx == leftmost(partition)) {
+    assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
+    if (high_idx + 1 == _max) {
+      _leftmosts[int(partition)] = _max;
+    } else {
+      _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
     }
-    if (_leftmosts_empty[set] < _leftmosts[set]) {
+    if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
-      _leftmosts_empty[set] = _leftmosts[set];
+      _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
     }
   }
-  if (idx == _rightmosts[set]) {
-    while (_rightmosts[set] > 0 && !in_free_set(_rightmosts[set], set)) {
-      _rightmosts[set]--;
+  if (high_idx == _rightmosts[int(partition)]) {
+    assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
+    if (low_idx == 0) {
+      _rightmosts[int(partition)] = -1;
+    } else {
+      _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
     }
-    if (_rightmosts_empty[set] > _rightmosts[set]) {
+    if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
-      _rightmosts_empty[set] = _rightmosts[set];
+      _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
     }
   }
+  if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
+    _leftmosts[int(partition)] = _max;
+    _rightmosts[int(partition)] = -1;
+    _leftmosts_empty[int(partition)] = _max;
+    _rightmosts_empty[int(partition)] = -1;
+  }
+}
+
+inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) {
+  shrink_interval_if_range_modifies_either_boundary(partition, idx, idx);
 }
 
-inline void ShenandoahSetsOfFree::expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t region_capacity) {
-  if (region_capacity == _region_size_bytes) {
-    if (_leftmosts_empty[set] > idx) {
-      _leftmosts_empty[set] = idx;
+inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition,
+                                                                             idx_t idx, size_t region_available) {
+  if (_leftmosts[int(partition)] > idx) {
+    _leftmosts[int(partition)] = idx;
+  }
+  if (_rightmosts[int(partition)] < idx) {
+    _rightmosts[int(partition)] = idx;
+  }
+  if (region_available == _region_size_bytes) {
+    if (_leftmosts_empty[int(partition)] > idx) {
+      _leftmosts_empty[int(partition)] = idx;
     }
-    if (_rightmosts_empty[set] < idx) {
-      _rightmosts_empty[set] = idx;
+    if (_rightmosts_empty[int(partition)] < idx) {
+      _rightmosts_empty[int(partition)] = idx;
     }
   }
-  if (_leftmosts[set] > idx) {
-    _leftmosts[set] = idx;
+}
+
+void ShenandoahRegionPartitions::retire_range_from_partition(
+  ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
+
+  // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
+  assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT,
+          low_idx, high_idx, _max);
+  assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
+
+  for (idx_t idx = low_idx; idx <= high_idx; idx++) {
+    assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
+    _membership[int(partition)].clear_bit(idx);
   }
-  if (_rightmosts[set] < idx) {
-    _rightmosts[set] = idx;
+  _region_counts[int(partition)] -= high_idx + 1 - low_idx;
+  shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx);
+}
+
+void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) {
+
+  // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
+  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+  assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
+  assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
+
+  if (used_bytes < _region_size_bytes) {
+    // Count the alignment pad remnant of memory as used when we retire this region
+    increase_used(partition, _region_size_bytes - used_bytes);
   }
+  _membership[int(partition)].clear_bit(idx);
+  shrink_interval_if_boundary_modified(partition, idx);
+  _region_counts[int(partition)]--;
 }
 
-void ShenandoahSetsOfFree::remove_from_free_sets(size_t idx) {
+void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
-  ShenandoahFreeMemoryType orig_set = membership(idx);
-  assert (orig_set > NotFree && orig_set < NumFreeSets, "Cannot remove from free sets if not already free");
-  _membership[idx] = NotFree;
-  shrink_bounds_if_touched(orig_set, idx);
+  assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
+  assert (which_partition < NumPartitions, "selected free partition must be valid");
+  assert (available <= _region_size_bytes, "Available cannot exceed region size");
+
+  _membership[int(which_partition)].set_bit(idx);
+  _capacity[int(which_partition)] += _region_size_bytes;
+  _used[int(which_partition)] += _region_size_bytes - available;
+  expand_interval_if_boundary_modified(which_partition, idx, available);
+  _region_counts[int(which_partition)]++;
+}
 
-  _region_counts[orig_set]--;
-  _region_counts[NotFree]++;
+bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
+  return (p == ShenandoahFreeSetPartitionId::Mutator);
 }
 
+bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
+  return (p == ShenandoahFreeSetPartitionId::Collector);
+}
 
-void ShenandoahSetsOfFree::make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity) {
-  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
-  assert (_membership[idx] == NotFree, "Cannot make free if already free");
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  _membership[idx] = which_set;
-  _capacity_of[which_set] += region_capacity;
-  expand_bounds_maybe(which_set, idx, region_capacity);
+bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
+  return (p == ShenandoahFreeSetPartitionId::OldCollector);
+}
 
-  _region_counts[NotFree]--;
-  _region_counts[which_set]++;
+bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
+  return (available_in_region == _region_size_bytes);
 }
 
-void ShenandoahSetsOfFree::move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity) {
+
+void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
+                                                                  ShenandoahFreeSetPartitionId new_partition, size_t available) {
+  ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
-  assert ((new_set > NotFree) && (new_set < NumFreeSets), "New set must be valid");
-  ShenandoahFreeMemoryType orig_set = _membership[idx];
-  assert ((orig_set > NotFree) && (orig_set < NumFreeSets), "Cannot move free unless already free");
+  assert (orig_partition < NumPartitions, "Original partition must be valid");
+  assert (new_partition < NumPartitions, "New partition must be valid");
+  assert (available <= _region_size_bytes, "Available cannot exceed region size");
+  assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
+  assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
+                             (r->used() + available == _region_size_bytes)),
+          "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
+          ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
+
   // Expected transitions:
-  //  During rebuild: Mutator => Collector
-  //                  Mutator empty => Collector
-  //  During flip_to_gc:
-  //                  Mutator empty => Collector
-  //                  Mutator empty => Old Collector
-  // At start of update refs:
-  //                  Collector => Mutator
-  //                  OldCollector Empty => Mutator
-  assert((region_capacity <= _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == Collector && new_set == Mutator)))
-      || (region_capacity == _region_size_bytes && ((orig_set == Mutator && new_set == Collector) || (orig_set == OldCollector && new_set == Mutator) || new_set == OldCollector)),
-      "Unexpected movement between sets");
-
-  _membership[idx] = new_set;
-  _capacity_of[orig_set] -= region_capacity;
-  shrink_bounds_if_touched(orig_set, idx);
-
-  _capacity_of[new_set] += region_capacity;
-  expand_bounds_maybe(new_set, idx, region_capacity);
-
-  _region_counts[orig_set]--;
-  _region_counts[new_set]++;
-}
-
-inline ShenandoahFreeMemoryType ShenandoahSetsOfFree::membership(size_t idx) const {
-  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
-  return _membership[idx];
+  //  During rebuild:         Mutator => Collector
+  //                          Mutator empty => Collector
+  //                          Mutator empty => OldCollector
+  //  During flip_to_gc:      Mutator empty => Collector
+  //                          Mutator empty => OldCollector
+  // At start of update refs: Collector => Mutator
+  //                          OldCollector Empty => Mutator
+  assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
+          (is_mutator_partition(orig_partition) &&
+           available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
+          (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
+          (is_old_collector_partition(orig_partition)
+           && available_implies_empty(available) && is_mutator_partition(new_partition)),
+          "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
+          ", orig_partition: %s, new_partition: %s",
+          available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
+
+  size_t used = _region_size_bytes - available;
+  assert (_used[int(orig_partition)] >= used,
+          "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
+          _used[int(orig_partition)], used, idx);
+
+  _membership[int(orig_partition)].clear_bit(idx);
+  _membership[int(new_partition)].set_bit(idx);
+
+  _capacity[int(orig_partition)] -= _region_size_bytes;
+  _used[int(orig_partition)] -= used;
+  shrink_interval_if_boundary_modified(orig_partition, idx);
+
+  _capacity[int(new_partition)] += _region_size_bytes;;
+  _used[int(new_partition)] += used;
+  expand_interval_if_boundary_modified(new_partition, idx, available);
+
+  _region_counts[int(orig_partition)]--;
+  _region_counts[int(new_partition)]++;
 }
 
-  // Returns true iff region idx is in the test_set free_set.  Before returning true, asserts that the free
-  // set is not empty.  Requires that test_set != NotFree or NumFreeSets.
-inline bool ShenandoahSetsOfFree::in_free_set(size_t idx, ShenandoahFreeMemoryType test_set) const {
-  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
-  if (_membership[idx] == test_set) {
-    assert (test_set == NotFree || _free_set->alloc_capacity(idx) > 0, "Free regions must have alloc capacity");
-    return true;
-  } else {
-    return false;
-  }
+const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
+  return partition_name(membership(idx));
 }
 
-inline size_t ShenandoahSetsOfFree::leftmost(ShenandoahFreeMemoryType which_set) const {
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  size_t idx = _leftmosts[which_set];
-  if (idx >= _max) {
-    return _max;
-  } else {
-    assert (in_free_set(idx, which_set), "left-most region must be free");
-    return idx;
+inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const {
+  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+  ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree;
+  for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) {
+    if (_membership[partition_id].is_set(idx)) {
+      assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition");
+      result = (ShenandoahFreeSetPartitionId) partition_id;
+    }
   }
+  return result;
 }
 
-inline size_t ShenandoahSetsOfFree::rightmost(ShenandoahFreeMemoryType which_set) const {
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  size_t idx = _rightmosts[which_set];
-  assert ((_leftmosts[which_set] == _max) || in_free_set(idx, which_set), "right-most region must be free");
-  return idx;
+#ifdef ASSERT
+inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const {
+  assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
+  assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition");
+
+  return membership(idx) == test_partition;
 }
+#endif
 
-inline bool ShenandoahSetsOfFree::is_empty(ShenandoahFreeMemoryType which_set) const {
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  return (leftmost(which_set) > rightmost(which_set));
+inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId which_partition) const {
+  assert (which_partition < NumPartitions, "selected free partition must be valid");
+  return (leftmost(which_partition) > rightmost(which_partition));
 }
 
-size_t ShenandoahSetsOfFree::leftmost_empty(ShenandoahFreeMemoryType which_set) {
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  for (size_t idx = _leftmosts_empty[which_set]; idx < _max; idx++) {
-    if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
-      _leftmosts_empty[which_set] = idx;
-      return idx;
-    }
+inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region(
+  ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const {
+  idx_t rightmost_idx = rightmost(which_partition);
+  idx_t leftmost_idx = leftmost(which_partition);
+  if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
+  if (start_index < leftmost_idx) {
+    start_index = leftmost_idx;
   }
-  _leftmosts_empty[which_set] = _max;
-  _rightmosts_empty[which_set] = 0;
-  return _max;
+  idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1);
+  if (result > rightmost_idx) {
+    result = _max;
+  }
+  assert (result >= start_index, "Requires progress");
+  return result;
 }
 
-inline size_t ShenandoahSetsOfFree::rightmost_empty(ShenandoahFreeMemoryType which_set) {
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  for (intptr_t idx = _rightmosts_empty[which_set]; idx >= 0; idx--) {
-    if ((membership(idx) == which_set) && (_free_set->alloc_capacity(idx) == _region_size_bytes)) {
-      _rightmosts_empty[which_set] = idx;
-      return idx;
-    }
+inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region(
+  ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const {
+  idx_t rightmost_idx = rightmost(which_partition);
+  idx_t leftmost_idx = leftmost(which_partition);
+  // if (leftmost_idx == max) then (last_index < leftmost_idx)
+  if (last_index < leftmost_idx) return -1;
+  if (last_index > rightmost_idx) {
+    last_index = rightmost_idx;
   }
-  _leftmosts_empty[which_set] = _max;
-  _rightmosts_empty[which_set] = 0;
-  return 0;
+  idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index);
+  if (result < leftmost_idx) {
+    result = -1;
+  }
+  assert (result <= last_index, "Requires progress");
+  return result;
 }
 
-inline bool ShenandoahSetsOfFree::alloc_from_left_bias(ShenandoahFreeMemoryType which_set) {
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-  return _left_to_right_bias[which_set];
+inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions(
+  ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const {
+  idx_t rightmost_idx = rightmost(which_partition);
+  idx_t leftmost_idx = leftmost(which_partition);
+  if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
+  idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size);
+  if (result > rightmost_idx) {
+    result = _max;
+  }
+  assert (result >= start_index, "Requires progress");
+  return result;
 }
 
-void ShenandoahSetsOfFree::establish_alloc_bias(ShenandoahFreeMemoryType which_set) {
-  ShenandoahHeap* heap = ShenandoahHeap::heap();
-  shenandoah_assert_heaplocked();
-  assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-
-  size_t middle = (_leftmosts[which_set] + _rightmosts[which_set]) / 2;
-  size_t available_in_first_half = 0;
-  size_t available_in_second_half = 0;
+inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions(
+  ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const {
+  idx_t leftmost_idx = leftmost(which_partition);
+  // if (leftmost_idx == max) then (last_index < leftmost_idx)
+  if (last_index < leftmost_idx) return -1;
+  idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size);
+  if (result <= leftmost_idx) {
+    result = -1;
+  }
+  assert (result <= last_index, "Requires progress");
+  return result;
+}
 
-  for (size_t index = _leftmosts[which_set]; index < middle; index++) {
-    if (in_free_set(index, which_set)) {
-      ShenandoahHeapRegion* r = heap->get_region(index);
-      available_in_first_half += r->free();
-    }
+idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) {
+  assert (which_partition < NumPartitions, "selected free partition must be valid");
+  idx_t max_regions = _max;
+  if (_leftmosts_empty[int(which_partition)] == _max) {
+    return _max;
   }
-  for (size_t index = middle; index <= _rightmosts[which_set]; index++) {
-    if (in_free_set(index, which_set)) {
-      ShenandoahHeapRegion* r = heap->get_region(index);
-      available_in_second_half += r->free();
+  for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]);
+       idx < max_regions; ) {
+    assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+    if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
+      _leftmosts_empty[int(which_partition)] = idx;
+      return idx;
     }
+    idx = find_index_of_next_available_region(which_partition, idx + 1);
   }
+  _leftmosts_empty[int(which_partition)] = _max;
+  _rightmosts_empty[int(which_partition)] = -1;
+  return _max;
+}
 
-  // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
-  // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
-  // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
-  // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
-
-  // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
-  // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
-  _left_to_right_bias[which_set] = (available_in_second_half > available_in_first_half);
+idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) {
+  assert (which_partition < NumPartitions, "selected free partition must be valid");
+  if (_rightmosts_empty[int(which_partition)] < 0) {
+    return -1;
+  }
+  for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]);
+       idx >= 0; ) {
+    assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+    if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
+      _rightmosts_empty[int(which_partition)] = idx;
+      return idx;
+    }
+    idx = find_index_of_previous_available_region(which_partition, idx - 1);
+  }
+  _leftmosts_empty[int(which_partition)] = _max;
+  _rightmosts_empty[int(which_partition)] = -1;
+  return -1;
 }
 
+
 #ifdef ASSERT
-void ShenandoahSetsOfFree::assert_bounds() {
+void ShenandoahRegionPartitions::assert_bounds() {
 
-  size_t leftmosts[NumFreeSets];
-  size_t rightmosts[NumFreeSets];
-  size_t empty_leftmosts[NumFreeSets];
-  size_t empty_rightmosts[NumFreeSets];
+  idx_t leftmosts[UIntNumPartitions];
+  idx_t rightmosts[UIntNumPartitions];
+  idx_t empty_leftmosts[UIntNumPartitions];
+  idx_t empty_rightmosts[UIntNumPartitions];
 
-  for (int i = 0; i < NumFreeSets; i++) {
+  for (uint i = 0; i < UIntNumPartitions; i++) {
     leftmosts[i] = _max;
     empty_leftmosts[i] = _max;
-    rightmosts[i] = 0;
-    empty_rightmosts[i] = 0;
+    rightmosts[i] = -1;
+    empty_rightmosts[i] = -1;
   }
 
-  for (size_t i = 0; i < _max; i++) {
-    ShenandoahFreeMemoryType set = membership(i);
-    switch (set) {
-      case NotFree:
+  for (idx_t i = 0; i < _max; i++) {
+    ShenandoahFreeSetPartitionId partition = membership(i);
+    switch (partition) {
+      case ShenandoahFreeSetPartitionId::NotFree:
         break;
 
-      case Mutator:
-      case Collector:
-      case OldCollector:
+      case ShenandoahFreeSetPartitionId::Mutator:
+      case ShenandoahFreeSetPartitionId::Collector:
+      case ShenandoahFreeSetPartitionId::OldCollector:
       {
         size_t capacity = _free_set->alloc_capacity(i);
         bool is_empty = (capacity == _region_size_bytes);
         assert(capacity > 0, "free regions must have allocation capacity");
-        if (i < leftmosts[set]) {
-          leftmosts[set] = i;
+        if (i < leftmosts[int(partition)]) {
+          leftmosts[int(partition)] = i;
         }
-        if (is_empty && (i < empty_leftmosts[set])) {
-          empty_leftmosts[set] = i;
+        if (is_empty && (i < empty_leftmosts[int(partition)])) {
+          empty_leftmosts[int(partition)] = i;
         }
-        if (i > rightmosts[set]) {
-          rightmosts[set] = i;
+        if (i > rightmosts[int(partition)]) {
+          rightmosts[int(partition)] = i;
         }
-        if (is_empty && (i > empty_rightmosts[set])) {
-          empty_rightmosts[set] = i;
+        if (is_empty && (i > empty_rightmosts[int(partition)])) {
+          empty_rightmosts[int(partition)] = i;
         }
         break;
       }
 
-      case NumFreeSets:
       default:
         ShouldNotReachHere();
     }
   }
 
-  // Performance invariants. Failing these would not break the free set, but performance would suffer.
-  assert (leftmost(Mutator) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Mutator),  _max);
-  assert (rightmost(Mutator) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Mutator),  _max);
-
-  assert (leftmost(Mutator) == _max || in_free_set(leftmost(Mutator), Mutator),
-          "leftmost region should be free: " SIZE_FORMAT,  leftmost(Mutator));
-  assert (leftmost(Mutator) == _max || in_free_set(rightmost(Mutator), Mutator),
-          "rightmost region should be free: " SIZE_FORMAT, rightmost(Mutator));
-
-  // If Mutator set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
-  size_t beg_off = leftmosts[Mutator];
-  size_t end_off = rightmosts[Mutator];
-  assert (beg_off >= leftmost(Mutator),
-          "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Mutator));
-  assert (end_off <= rightmost(Mutator),
-          "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Mutator));
-
-  beg_off = empty_leftmosts[Mutator];
-  end_off = empty_rightmosts[Mutator];
-  assert (beg_off >= leftmost_empty(Mutator),
-          "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Mutator));
-  assert (end_off <= rightmost_empty(Mutator),
-          "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Mutator));
-
-  // Performance invariants. Failing these would not break the free set, but performance would suffer.
-  assert (leftmost(Collector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(Collector),  _max);
-  assert (rightmost(Collector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(Collector),  _max);
-
-  assert (leftmost(Collector) == _max || in_free_set(leftmost(Collector), Collector),
-          "leftmost region should be free: " SIZE_FORMAT,  leftmost(Collector));
-  assert (leftmost(Collector) == _max || in_free_set(rightmost(Collector), Collector),
-          "rightmost region should be free: " SIZE_FORMAT, rightmost(Collector));
-
-  // If Collector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
-  beg_off = leftmosts[Collector];
-  end_off = rightmosts[Collector];
-  assert (beg_off >= leftmost(Collector),
-          "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(Collector));
-  assert (end_off <= rightmost(Collector),
-          "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(Collector));
-
-  beg_off = empty_leftmosts[Collector];
-  end_off = empty_rightmosts[Collector];
-  assert (beg_off >= leftmost_empty(Collector),
-          "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(Collector));
-  assert (end_off <= rightmost_empty(Collector),
-          "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(Collector));
-
-  // Performance invariants. Failing these would not break the free set, but performance would suffer.
-  assert (leftmost(OldCollector) <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, leftmost(OldCollector),  _max);
-  assert (rightmost(OldCollector) < _max, "rightmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, rightmost(OldCollector),  _max);
-
-  assert (leftmost(OldCollector) == _max || in_free_set(leftmost(OldCollector), OldCollector),
-          "leftmost region should be free: " SIZE_FORMAT,  leftmost(OldCollector));
-  assert (leftmost(OldCollector) == _max || in_free_set(rightmost(OldCollector), OldCollector),
-          "rightmost region should be free: " SIZE_FORMAT, rightmost(OldCollector));
-
-  // If OldCollector set is empty, leftmosts will both equal max, rightmosts will both equal zero.  Likewise for empty region sets.
-  beg_off = leftmosts[OldCollector];
-  end_off = rightmosts[OldCollector];
-  assert (beg_off >= leftmost(OldCollector),
-          "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost(OldCollector));
-  assert (end_off <= rightmost(OldCollector),
-          "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost(OldCollector));
-
-  beg_off = empty_leftmosts[OldCollector];
-  end_off = empty_rightmosts[OldCollector];
-  assert (beg_off >= leftmost_empty(OldCollector),
-          "free empty regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, leftmost_empty(OldCollector));
-  assert (end_off <= rightmost_empty(OldCollector),
-          "free empty regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, rightmost_empty(OldCollector));
+  // Performance invariants. Failing these would not break the free partition, but performance would suffer.
+  assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) <= _max,
+          "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
+  assert (rightmost(ShenandoahFreeSetPartitionId::Mutator) < _max,
+          "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
+
+  assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
+          || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
+          "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Mutator));
+  assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
+          || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
+          "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator));
+
+  // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
+  // Likewise for empty region partitions.
+  idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+  idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+  assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
+          "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
+  assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
+          "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
+
+  beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+  end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
+  assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
+          "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+  assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
+          "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+
+  // Performance invariants. Failing these would not break the free partition, but performance would suffer.
+  assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
+          leftmost(ShenandoahFreeSetPartitionId::Collector),  _max);
+  assert (rightmost(ShenandoahFreeSetPartitionId::Collector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
+          rightmost(ShenandoahFreeSetPartitionId::Collector),  _max);
+
+  assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
+          || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
+          "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Collector));
+  assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
+          || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
+          "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Collector));
+
+  // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
+  // Likewise for empty region partitions.
+  beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+  end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+  assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
+          "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
+  assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
+          "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
+
+  beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+  end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
+  assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+          "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
+  assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
+          "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
+
+  // Performance invariants. Failing these would not break the free partition, but performance would suffer.
+  assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
+          leftmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
+  assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
+          rightmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
+
+  assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
+          || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+                                  ShenandoahFreeSetPartitionId::OldCollector),
+          "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::OldCollector));
+  assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
+          || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
+                                  ShenandoahFreeSetPartitionId::OldCollector),
+          "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+
+  // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
+  // Likewise for empty region partitions.
+  beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+  end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+  assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+          "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
+  assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
+          "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+
+  beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+  end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
+  assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+          "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
+  assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
+          "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
+          end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 }
 #endif
 
 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
   _heap(heap),
-  _free_sets(max_regions, this)
+  _partitions(max_regions, this),
+  _alloc_bias_weight(0)
 {
   clear_internal();
 }
 
-// This allocates from a region within the old_collector_set.  If affiliation equals OLD, the allocation must be taken
-// from a region that is_old().  Otherwise, affiliation should be FREE, in which case this will put a previously unaffiliated
-// region into service.
-HeapWord* ShenandoahFreeSet::allocate_old_with_affiliation(ShenandoahAffiliation affiliation,
-                                                           ShenandoahAllocRequest& req, bool& in_new_region) {
-  shenandoah_assert_heaplocked();
-
-  size_t rightmost =
-    (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(OldCollector): _free_sets.rightmost(OldCollector);
-  size_t leftmost =
-    (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(OldCollector): _free_sets.leftmost(OldCollector);
-  if (_free_sets.alloc_from_left_bias(OldCollector)) {
-    // This mode picks up stragglers left by a full GC
-    for (size_t idx = leftmost; idx <= rightmost; idx++) {
-      if (_free_sets.in_free_set(idx, OldCollector)) {
-        ShenandoahHeapRegion* r = _heap->get_region(idx);
-        assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
-        if (r->affiliation() == affiliation) {
-          HeapWord* result = try_allocate_in(r, req, in_new_region);
-          if (result != nullptr) {
-            return result;
-          }
-        }
-      }
-    }
-  } else {
-    // This mode picks up stragglers left by a previous concurrent GC
-    for (size_t count = rightmost + 1; count > leftmost; count--) {
-      // size_t is unsigned, need to dodge underflow when _leftmost = 0
-      size_t idx = count - 1;
-      if (_free_sets.in_free_set(idx, OldCollector)) {
-        ShenandoahHeapRegion* r = _heap->get_region(idx);
-        assert(r->is_trash() || !r->is_affiliated() || r->is_old(), "old_collector_set region has bad affiliation");
-        if (r->affiliation() == affiliation) {
-          HeapWord* result = try_allocate_in(r, req, in_new_region);
-          if (result != nullptr) {
-            return result;
-          }
-        }
-      }
-    }
-  }
-  return nullptr;
-}
-
-void ShenandoahFreeSet::add_old_collector_free_region(ShenandoahHeapRegion* region) {
+void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
   shenandoah_assert_heaplocked();
   size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
   size_t idx = region->index();
   size_t capacity = alloc_capacity(region);
-  assert(_free_sets.membership(idx) == NotFree, "Regions promoted in place should not be in any free set");
+  assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
+         "Regions promoted in place should have been excluded from Mutator partition");
   if (capacity >= plab_min_size_in_bytes) {
-    _free_sets.make_free(idx, OldCollector, capacity);
+    _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
     _heap->old_generation()->augment_promoted_reserve(capacity);
   }
 }
 
-HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation affiliation,
-                                                       ShenandoahAllocRequest& req, bool& in_new_region) {
+HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
+                                                                      ShenandoahAffiliation affiliation,
+                                                                      ShenandoahAllocRequest& req, bool& in_new_region) {
   shenandoah_assert_heaplocked();
-  size_t rightmost =
-    (affiliation == ShenandoahAffiliation::FREE)? _free_sets.rightmost_empty(Collector): _free_sets.rightmost(Collector);
-  size_t leftmost =
-    (affiliation == ShenandoahAffiliation::FREE)? _free_sets.leftmost_empty(Collector): _free_sets.leftmost(Collector);
-  for (size_t c = rightmost + 1; c > leftmost; c--) {
-    // size_t is unsigned, need to dodge underflow when _leftmost = 0
-    size_t idx = c - 1;
-    if (_free_sets.in_free_set(idx, Collector)) {
+  idx_t rightmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
+                               _partitions.rightmost_empty(which_partition): _partitions.rightmost(which_partition));
+  idx_t leftmost_collector = ((affiliation == ShenandoahAffiliation::FREE)?
+                              _partitions.leftmost_empty(which_partition): _partitions.leftmost(which_partition));
+  if (_partitions.alloc_from_left_bias(which_partition)) {
+    for (idx_t idx = leftmost_collector; idx <= rightmost_collector; ) {
+      assert(_partitions.in_free_set(which_partition, idx), "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
       ShenandoahHeapRegion* r = _heap->get_region(idx);
       if (r->affiliation() == affiliation) {
         HeapWord* result = try_allocate_in(r, req, in_new_region);
@@ -487,6 +705,20 @@ HeapWord* ShenandoahFreeSet::allocate_with_affiliation(ShenandoahAffiliation aff
           return result;
         }
       }
+      idx = _partitions.find_index_of_next_available_region(which_partition, idx + 1);
+    }
+  } else {
+    for (idx_t idx = rightmost_collector; idx >= leftmost_collector; ) {
+      assert(_partitions.in_free_set(which_partition, idx),
+             "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
+      ShenandoahHeapRegion* r = _heap->get_region(idx);
+      if (r->affiliation() == affiliation) {
+        HeapWord* result = try_allocate_in(r, req, in_new_region);
+        if (result != nullptr) {
+          return result;
+        }
+      }
+      idx = _partitions.find_index_of_previous_available_region(which_partition, idx - 1);
     }
   }
   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
@@ -502,12 +734,12 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
   // we would find the region to allocate at right away.
   //
-  // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
-  // go to the end. This makes application allocation faster, because we would clear lots
-  // of regions from the beginning most of the time.
+  // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
+  // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
+  // Humongous mutator allocations are taken from the bottom of the heap.
   //
-  // Free set maintains mutator and collector views, and normally they allocate in their views only,
-  // unless we special cases for stealing and mixed allocations.
+  // Free set maintains mutator and collector partitions.  Normally, each allocates only from its partition,
+  // except in special cases when the collector steals regions from the mutator partition.
 
   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
   bool allow_new_region = true;
@@ -539,19 +771,65 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
     case ShenandoahAllocRequest::_alloc_tlab:
     case ShenandoahAllocRequest::_alloc_shared: {
       // Try to allocate in the mutator view
-      // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
-      if (!_free_sets.is_empty(Mutator)) {
-        // Use signed idx.  Otherwise, loop will never terminate.
-        int leftmost = (int) _free_sets.leftmost(Mutator);
-        for (int idx = (int) _free_sets.rightmost(Mutator); idx >= leftmost; idx--) {
-          ShenandoahHeapRegion* r = _heap->get_region(idx);
-          if (_free_sets.in_free_set(idx, Mutator) && (allow_new_region || r->is_affiliated())) {
+      if (_alloc_bias_weight-- <= 0) {
+        // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
+        // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
+        // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
+        // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
+        // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
+        // during the most recent GC pass may once again prevent the region from being collected.  We have found that
+        // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
+        // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
+        // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
+        //
+        // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
+        // available regions.  Other potential benefits:
+        //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
+        //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
+        //     late in the GC cycle.
+        idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
+                                     - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
+        idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
+                                      - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
+        _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
+        _alloc_bias_weight = _InitialAllocBiasWeight;
+      }
+      if (!_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
+        // Allocate within mutator free from high memory to low so as to preserve low memory for humongous allocations
+        if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
+          // Use signed idx.  Otherwise, loop will never terminate.
+          idx_t leftmost = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
+          for (idx_t idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx >= leftmost; ) {
+            assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
+                   "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+            ShenandoahHeapRegion* r = _heap->get_region(idx);
             // try_allocate_in() increases used if the allocation is successful.
             HeapWord* result;
             size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
             if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
               return result;
             }
+            idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
+          }
+        }
+      } else {
+        // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
+        // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this
+        // tends to accumulate "fragmented" uncollected regions in high memory.
+        if (!_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
+          // Use signed idx.  Otherwise, loop will never terminate.
+          idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+          for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); idx <= rightmost; ) {
+            assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
+                   "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
+            ShenandoahHeapRegion* r = _heap->get_region(idx);
+            // try_allocate_in() increases used if the allocation is successful.
+            HeapWord* result;
+            size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab)? req.min_size(): req.size();
+            if ((alloc_capacity(r) >= min_size) && ((result = try_allocate_in(r, req, in_new_region)) != nullptr)) {
+              return result;
+            }
+            idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, idx + 1);
           }
         }
       }
@@ -559,53 +837,34 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
       break;
     }
     case ShenandoahAllocRequest::_alloc_gclab:
-      // GCLABs are for evacuation so we must be in evacuation phase.  If this allocation is successful, increment
-      // the relevant evac_expended rather than used value.
+      // GCLABs are for evacuation so we must be in evacuation phase.
 
-    case ShenandoahAllocRequest::_alloc_plab:
-      // PLABs always reside in old-gen and are only allocated during evacuation phase.
+    case ShenandoahAllocRequest::_alloc_plab: {
+      // PLABs always reside in old-gen and are only allocated during
+      // evacuation phase.
 
     case ShenandoahAllocRequest::_alloc_shared_gc: {
-      if (!_heap->mode()->is_generational()) {
-        // size_t is unsigned, need to dodge underflow when _leftmost = 0
-        // Fast-path: try to allocate in the collector view first
-        for (size_t c = _free_sets.rightmost(Collector) + 1; c > _free_sets.leftmost(Collector); c--) {
-          size_t idx = c - 1;
-          if (_free_sets.in_free_set(idx, Collector)) {
-            HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
-            if (result != nullptr) {
-              return result;
-            }
-          }
-        }
-      } else {
-        // First try to fit into a region that is already in use in the same generation.
-        HeapWord* result;
-        if (req.is_old()) {
-          result = allocate_old_with_affiliation(req.affiliation(), req, in_new_region);
-        } else {
-          result = allocate_with_affiliation(req.affiliation(), req, in_new_region);
-        }
+      // Fast-path: try to allocate in the collector view first
+      HeapWord* result;
+      result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
+                                                        ShenandoahFreeSetPartitionId::Collector,
+                                                        req.affiliation(), req, in_new_region);
+      if (result != nullptr) {
+        return result;
+      } else if (allow_new_region) {
+        // Try a free region that is dedicated to GC allocations.
+        result = allocate_from_partition_with_affiliation(req.is_old()? ShenandoahFreeSetPartitionId::OldCollector:
+                                                          ShenandoahFreeSetPartitionId::Collector,
+                                                          ShenandoahAffiliation::FREE, req, in_new_region);
         if (result != nullptr) {
           return result;
         }
-        if (allow_new_region) {
-          // Then try a free region that is dedicated to GC allocations.
-          if (req.is_old()) {
-            result = allocate_old_with_affiliation(FREE, req, in_new_region);
-          } else {
-            result = allocate_with_affiliation(FREE, req, in_new_region);
-          }
-          if (result != nullptr) {
-            return result;
-          }
-        }
       }
+
       // No dice. Can we borrow space from mutator view?
       if (!ShenandoahEvacReserveOverflow) {
         return nullptr;
       }
-
       if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
         // This allows us to flip a mutator region to old_collector
         allow_new_region = true;
@@ -617,7 +876,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
       // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
       // only for old-gen evacuations.
 
-      // Also TODO:
+      // TODO:
       // if (GC is idle (out of cycle) and mutator allocation fails and there is memory reserved in Collector
       // or OldCollector sets, transfer a region of memory so that we can satisfy the allocation request, and
       // immediately trigger the start of GC.  Is better to satisfy the allocation than to trigger out-of-cycle
@@ -626,31 +885,31 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
 
       if (allow_new_region) {
         // Try to steal an empty region from the mutator view.
-        for (size_t c = _free_sets.rightmost_empty(Mutator) + 1; c > _free_sets.leftmost_empty(Mutator); c--) {
-          size_t idx = c - 1;
-          if (_free_sets.in_free_set(idx, Mutator)) {
-            ShenandoahHeapRegion* r = _heap->get_region(idx);
-            if (can_allocate_from(r)) {
-              if (req.is_old()) {
-                flip_to_old_gc(r);
-              } else {
-                flip_to_gc(r);
-              }
-              HeapWord *result = try_allocate_in(r, req, in_new_region);
-              if (result != nullptr) {
-                log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
-                return result;
-              }
+        idx_t rightmost_mutator = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator);
+        idx_t leftmost_mutator =  _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
+        for (idx_t idx = rightmost_mutator; idx >= leftmost_mutator; ) {
+          assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx),
+                 "Boundaries or find_prev_last_bit failed: " SSIZE_FORMAT, idx);
+          ShenandoahHeapRegion* r = _heap->get_region(idx);
+          if (can_allocate_from(r)) {
+            if (req.is_old()) {
+              flip_to_old_gc(r);
+            } else {
+              flip_to_gc(r);
             }
+            // Region r is entirely empty.  If try_allocat_in fails on region r, something else is really wrong.
+            // Don't bother to retry with other regions.
+            log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
+            return try_allocate_in(r, req, in_new_region);
           }
+          idx = _partitions.find_index_of_previous_available_region(ShenandoahFreeSetPartitionId::Mutator, idx - 1);
         }
       }
-
-      // No dice. Do not try to mix mutator and GC allocations, because
-      // URWM moves due to GC allocations would expose unparsable mutator
-      // allocations.
+      // No dice. Do not try to mix mutator and GC allocations, because adjusting region UWM
+      // due to GC allocations would expose unparsable mutator allocations.
       break;
     }
+    }
     default:
       ShouldNotReachHere();
   }
@@ -669,7 +928,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
 // at the start of the free space.
 //
 // This is merely a helper method to use for the purpose of such a calculation.
-size_t get_usable_free_words(size_t free_bytes) {
+size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
   //      free is 514
   //      usable_free is 512, which is decreased to 0
@@ -703,9 +962,7 @@ HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocR
   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
   assert(result != nullptr, "Allocation cannot fail");
   assert(r->top() <= r->end(), "Allocation cannot span end of region");
-  assert(req.actual_size() == size, "Should not have needed to adjust size for PLAB.");
   assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
-
   return result;
 }
 
@@ -714,10 +971,14 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
     return nullptr;
   }
-
+  HeapWord* result = nullptr;
   try_recycle_trashed(r);
-  if (!r->is_affiliated()) {
-    ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
+  in_new_region = r->is_empty();
+
+  if (in_new_region) {
+    log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
+                       r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
+    assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
     r->set_affiliation(req.affiliation());
     if (r->is_old()) {
       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
@@ -731,46 +992,52 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
     }
     _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
 
+#ifdef ASSERT
+    ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
     assert(ctx->is_bitmap_clear_range(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
-  } else if (r->affiliation() != req.affiliation()) {
-    assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
-           req.affiliation_name(), r->affiliation_name());
-    return nullptr;
-  }
-
-  in_new_region = r->is_empty();
-  HeapWord* result = nullptr;
-
-  if (in_new_region) {
-    log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
+#endif
+    log_debug(gc)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
                        r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
+  } else {
+    assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
+    if (r->affiliation() != req.affiliation()) {
+      assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
+             req.affiliation_name(), r->affiliation_name());
+      return nullptr;
+    }
   }
 
   // req.size() is in words, r->free() is in bytes.
   if (req.is_lab_alloc()) {
+    size_t adjusted_size = req.size();
+    size_t free = r->free();    // free represents bytes available within region r
     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+      // This is a PLAB allocation
       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
-      assert(_free_sets.in_free_set(r->index(), OldCollector), "PLABS must be allocated in old_collector_free regions");
-      // Need to assure that plabs are aligned on multiple of card region.
-      // Since we have Elastic TLABs, align sizes up. They may be decreased to fit in the usable
-      // memory remaining in the region (which will also be aligned to cards).
-      size_t adjusted_size = align_up(req.size(), CardTable::card_size_in_words());
-      size_t adjusted_min_size = align_up(req.min_size(), CardTable::card_size_in_words());
-      size_t usable_free = get_usable_free_words(r->free());
+      assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
+             "PLABS must be allocated in old_collector_free regions");
 
+      // Need to assure that plabs are aligned on multiple of card region
+      // Convert free from unaligned bytes to aligned number of words
+      size_t usable_free = get_usable_free_words(free);
       if (adjusted_size > usable_free) {
         adjusted_size = usable_free;
       }
-
-      if (adjusted_size >= adjusted_min_size) {
+      adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
+      if (adjusted_size >= req.min_size()) {
         result = allocate_aligned_plab(adjusted_size, req, r);
+        assert(result != nullptr, "allocate must succeed");
+        req.set_actual_size(adjusted_size);
+      } else {
+        // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
+        log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
+                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
       }
-      // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
     } else {
       // This is a GCLAB or a TLAB allocation
-      size_t adjusted_size = req.size();
-      size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
+      // Convert free from unaligned bytes to aligned number of words
+      free = align_down(free >> LogHeapWordSize, MinObjAlignment);
       if (adjusted_size > free) {
         adjusted_size = free;
       }
@@ -780,7 +1047,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
         req.set_actual_size(adjusted_size);
       } else {
         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
-                           " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
+                            " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
       }
     }
   } else {
@@ -792,12 +1059,11 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
     }
   }
 
-  ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
   if (result != nullptr) {
     // Allocation successful, bump stats:
     if (req.is_mutator_alloc()) {
       assert(req.is_young(), "Mutator allocations always come from young generation.");
-      _free_sets.increase_used(Mutator, req.actual_size() * HeapWordSize);
+      _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
     } else {
       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
 
@@ -810,79 +1076,103 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
       // next evacuation pass.
       r->set_update_watermark(r->top());
       if (r->is_old()) {
+        _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
+      } else {
+        _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
       }
     }
   }
 
-  if (result == nullptr || alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
-    // Region cannot afford this and is likely to not afford future allocations. Retire it.
-    //
-    // While this seems a bit harsh, especially in the case when this large allocation does not
-    // fit but the next small one would, we are risking to inflate scan times when lots of
-    // almost-full regions precede the fully-empty region where we want to allocate the entire TLAB.
+  static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
+  size_t ac = alloc_capacity(r);
+
+  if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
+    // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
+    // Note that retire_from_partition() increases used to account for waste.
+
+    // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
+    // then retire the region so that subsequent searches can find available memory more quickly.
 
-    // Record the remainder as allocation waste
     size_t idx = r->index();
+    ShenandoahFreeSetPartitionId orig_partition;
     if (req.is_mutator_alloc()) {
-      size_t waste = r->free();
-      if (waste > 0) {
-        _free_sets.increase_used(Mutator, waste);
-        // This one request could cause several regions to be "retired", so we must accumulate the waste
-        req.set_waste((waste >> LogHeapWordSize) + req.waste());
-      }
-      assert(_free_sets.membership(idx) == Mutator, "Must be mutator free: " SIZE_FORMAT, idx);
+      orig_partition = ShenandoahFreeSetPartitionId::Mutator;
+    } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
+      orig_partition = ShenandoahFreeSetPartitionId::Collector;
+    } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
+      orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
     } else {
-      assert(_free_sets.membership(idx) == Collector || _free_sets.membership(idx) == OldCollector,
-             "Must be collector or old-collector free: " SIZE_FORMAT, idx);
+      assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
+      if (req.is_old()) {
+        orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
+      } else {
+        orig_partition = ShenandoahFreeSetPartitionId::Collector;
+      }
     }
-    // This region is no longer considered free (in any set)
-    _free_sets.remove_from_free_sets(idx);
-    _free_sets.assert_bounds();
+    _partitions.retire_from_partition(orig_partition, idx, r->used());
+    _partitions.assert_bounds();
   }
   return result;
 }
 
 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
+  assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
   shenandoah_assert_heaplocked();
 
   size_t words_size = req.size();
-  size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
+  idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 
   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
 
   // Check if there are enough regions left to satisfy allocation.
-  if (_heap->mode()->is_generational()) {
-    size_t avail_young_regions = generation->free_unaffiliated_regions();
-    if (num > _free_sets.count(Mutator) || (num > avail_young_regions)) {
-      return nullptr;
-    }
-  } else {
-    if (num > _free_sets.count(Mutator)) {
-      return nullptr;
-    }
+  if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
+    return nullptr;
   }
 
+  idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
+  idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
+  idx_t last_possible_start = end_range - num;
+
   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
   // inclusive. Contiguous allocations are biased to the beginning.
-
-  size_t beg = _free_sets.leftmost(Mutator);
-  size_t end = beg;
+  idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
+                                                                          start_range, num);
+  if (beg > last_possible_start) {
+    // Hit the end, goodbye
+    return nullptr;
+  }
+  idx_t end = beg;
 
   while (true) {
-    if (end >= _free_sets.max()) {
-      // Hit the end, goodbye
-      return nullptr;
-    }
-
-    // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
-    // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
-    if (!_free_sets.in_free_set(end, Mutator) || !can_allocate_from(_heap->get_region(end))) {
-      end++;
-      beg = end;
-      continue;
+    // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
+    // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
+    // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
+    while (!can_allocate_from(_heap->get_region(end))) {
+      // region[end] is not empty, so we restart our search after region[end]
+      idx_t slide_delta = end + 1 - beg;
+      if (beg + slide_delta > last_possible_start) {
+        // no room to slide
+        return nullptr;
+      }
+      for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
+        if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
+          beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
+                                                                            span_end + 1, num);
+          break;
+        } else {
+          beg++;
+          span_end++;
+        }
+      }
+      // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
+      if (beg > last_possible_start) {
+        // Hit the end, goodbye
+        return nullptr;
+      }
+      end = beg;
     }
 
     if ((end - beg + 1) == num) {
@@ -894,10 +1184,9 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
   }
 
   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
-  ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
-
+  bool is_generational = _heap->mode()->is_generational();
   // Initialize regions:
-  for (size_t i = beg; i <= end; i++) {
+  for (idx_t i = beg; i <= end; i++) {
     ShenandoahHeapRegion* r = _heap->get_region(i);
     try_recycle_trashed(r);
 
@@ -921,15 +1210,19 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
     r->set_affiliation(req.affiliation());
     r->set_update_watermark(r->bottom());
     r->set_top(r->bottom() + used_words);
-
-    // While individual regions report their true use, all humongous regions are marked used in the free set.
-    _free_sets.remove_from_free_sets(r->index());
   }
   generation->increase_affiliated_region_count(num);
+  if (remainder != 0) {
+    // Record this remainder as allocation waste
+    _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
+  }
+
+  // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
+  _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);
 
   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
-  _free_sets.increase_used(Mutator, total_humongous_size);
-  _free_sets.assert_bounds();
+  _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
+  _partitions.assert_bounds();
   req.set_actual_size(words_size);
   if (remainder != 0) {
     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
@@ -937,36 +1230,6 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
   return _heap->get_region(beg)->bottom();
 }
 
-// Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
-// immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
-// concurrent weak root processing is in progress.
-bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
-  return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
-}
-
-bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
-  ShenandoahHeapRegion* r = _heap->get_region(idx);
-  return can_allocate_from(r);
-}
-
-size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
-  ShenandoahHeapRegion* r = _heap->get_region(idx);
-  return alloc_capacity(r);
-}
-
-size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
-  if (r->is_trash()) {
-    // This would be recycled on allocation path
-    return ShenandoahHeapRegion::region_size_bytes();
-  } else {
-    return r->free();
-  }
-}
-
-bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
-  return alloc_capacity(r) > 0;
-}
-
 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
   if (r->is_trash()) {
     r->recycle();
@@ -976,7 +1239,6 @@ void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 void ShenandoahFreeSet::recycle_trash() {
   // lock is not reentrable, check we don't have it
   shenandoah_assert_not_heaplocked();
-
   for (size_t i = 0; i < _heap->num_regions(); i++) {
     ShenandoahHeapRegion* r = _heap->get_region(i);
     if (r->is_trash()) {
@@ -990,14 +1252,14 @@ void ShenandoahFreeSet::recycle_trash() {
 void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
   size_t idx = r->index();
 
-  assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
-  // Note: can_allocate_from(r) means r is entirely empty
+  assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
   assert(can_allocate_from(r), "Should not be allocated");
 
-  ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::cast(_heap);
+  ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
   size_t region_capacity = alloc_capacity(r);
-  _free_sets.move_to_set(idx, OldCollector, region_capacity);
-  _free_sets.assert_bounds();
+  _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+                                               ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
+  _partitions.assert_bounds();
   _heap->old_generation()->augment_evacuation_reserve(region_capacity);
   bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
   if (!transferred) {
@@ -1011,12 +1273,13 @@ void ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
   size_t idx = r->index();
 
-  assert(_free_sets.in_free_set(idx, Mutator), "Should be in mutator view");
+  assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
   assert(can_allocate_from(r), "Should not be allocated");
 
-  size_t region_capacity = alloc_capacity(r);
-  _free_sets.move_to_set(idx, Collector, region_capacity);
-  _free_sets.assert_bounds();
+  size_t ac = alloc_capacity(r);
+  _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+                                               ShenandoahFreeSetPartitionId::Collector, ac);
+  _partitions.assert_bounds();
 
   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
   // to recycle trash before attempting to allocate anything in the region.
@@ -1028,33 +1291,55 @@ void ShenandoahFreeSet::clear() {
 }
 
 void ShenandoahFreeSet::clear_internal() {
-  _free_sets.clear_all();
+  _partitions.make_all_regions_unavailable();
+
+  _alloc_bias_weight = 0;
+  _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
+  _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
+  _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);
 }
 
-// This function places all is_old() regions that have allocation capacity into the old_collector set.  It places
-// all other regions (not is_old()) that have allocation capacity into the mutator_set.  Subsequently, we will
-// move some of the mutator regions into the collector set or old_collector set with the intent of packing
-// old_collector memory into the highest (rightmost) addresses of the heap and the collector memory into the
-// next highest addresses of the heap, with mutator memory consuming the lowest addresses of the heap.
 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
                                                          size_t &first_old_region, size_t &last_old_region,
                                                          size_t &old_region_count) {
+  clear_internal();
+
   first_old_region = _heap->num_regions();
   last_old_region = 0;
   old_region_count = 0;
   old_cset_regions = 0;
   young_cset_regions = 0;
+
+  size_t region_size_bytes = _partitions.region_size_bytes();
+  size_t max_regions = _partitions.max_regions();
+
+  size_t mutator_leftmost = max_regions;
+  size_t mutator_rightmost = 0;
+  size_t mutator_leftmost_empty = max_regions;
+  size_t mutator_rightmost_empty = 0;
+  size_t mutator_regions = 0;
+  size_t mutator_used = 0;
+
+  size_t old_collector_leftmost = max_regions;
+  size_t old_collector_rightmost = 0;
+  size_t old_collector_leftmost_empty = max_regions;
+  size_t old_collector_rightmost_empty = 0;
+  size_t old_collector_regions = 0;
+  size_t old_collector_used = 0;
+
   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
     ShenandoahHeapRegion* region = _heap->get_region(idx);
     if (region->is_trash()) {
-      // Trashed regions represent regions that had been in the collection set but have not yet been "cleaned up".
+      // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
+      // The cset regions are not "trashed" until we have finished update refs.
       if (region->is_old()) {
         old_cset_regions++;
       } else {
         assert(region->is_young(), "Trashed region should be old or young");
         young_cset_regions++;
       }
-    } else if (region->is_old() && region->is_regular()) {
+    } else if (region->is_old()) {
+      // count both humongous and regular regions, but don't count trash (cset) regions.
       old_region_count++;
       if (first_old_region > idx) {
         first_old_region = idx;
@@ -1063,87 +1348,166 @@ void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regi
     }
     if (region->is_alloc_allowed() || region->is_trash()) {
       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
-      assert(_free_sets.in_free_set(idx, NotFree), "We are about to make region free; it should not be free already");
 
-      // Do not add regions that would almost surely fail allocation.  Note that PLAB::min_size() is typically less than ShenandoahGenerationalHeap::plab_min_size()
-      if (alloc_capacity(region) < PLAB::min_size() * HeapWordSize) continue;
-
-      if (region->is_old()) {
-        _free_sets.make_free(idx, OldCollector, alloc_capacity(region));
-        log_debug(gc, free)(
-          "  Adding Region " SIZE_FORMAT  " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to old collector set",
-          idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
-          byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
-      } else {
-        _free_sets.make_free(idx, Mutator, alloc_capacity(region));
-        log_debug(gc, free)(
-          "  Adding Region " SIZE_FORMAT " (Free: " SIZE_FORMAT "%s, Used: " SIZE_FORMAT "%s) to mutator set",
-          idx, byte_size_in_proper_unit(region->free()), proper_unit_for_byte_size(region->free()),
-          byte_size_in_proper_unit(region->used()), proper_unit_for_byte_size(region->used()));
+      // Do not add regions that would almost surely fail allocation
+      size_t ac = alloc_capacity(region);
+      if (ac > PLAB::min_size() * HeapWordSize) {
+        if (region->is_trash() || !region->is_old()) {
+          // Both young and old collected regions (trashed) are placed into the Mutator set
+          _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
+          if (idx < mutator_leftmost) {
+            mutator_leftmost = idx;
+          }
+          if (idx > mutator_rightmost) {
+            mutator_rightmost = idx;
+          }
+          if (ac == region_size_bytes) {
+            if (idx < mutator_leftmost_empty) {
+              mutator_leftmost_empty = idx;
+            }
+            if (idx > mutator_rightmost_empty) {
+              mutator_rightmost_empty = idx;
+            }
+          }
+          mutator_regions++;
+          mutator_used += (region_size_bytes - ac);
+        } else {
+          // !region->is_trash() && region is_old()
+          _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
+          if (idx < old_collector_leftmost) {
+            old_collector_leftmost = idx;
+          }
+          if (idx > old_collector_rightmost) {
+            old_collector_rightmost = idx;
+          }
+          if (ac == region_size_bytes) {
+            if (idx < old_collector_leftmost_empty) {
+              old_collector_leftmost_empty = idx;
+            }
+            if (idx > old_collector_rightmost_empty) {
+              old_collector_rightmost_empty = idx;
+            }
+          }
+          old_collector_regions++;
+          old_collector_used += (region_size_bytes - ac);
+        }
       }
     }
   }
+  log_debug(gc)("  At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
+                ", mutator_rightmost: " SIZE_FORMAT
+                ", mutator_leftmost_empty: " SIZE_FORMAT
+                ", mutator_rightmost_empty: " SIZE_FORMAT
+                ", mutator_regions: " SIZE_FORMAT
+                ", mutator_used: " SIZE_FORMAT,
+                mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
+                mutator_regions, mutator_used);
+
+  log_debug(gc)("  old_collector_leftmost: " SIZE_FORMAT
+                ", old_collector_rightmost: " SIZE_FORMAT
+                ", old_collector_leftmost_empty: " SIZE_FORMAT
+                ", old_collector_rightmost_empty: " SIZE_FORMAT
+                ", old_collector_regions: " SIZE_FORMAT
+                ", old_collector_used: " SIZE_FORMAT,
+                old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
+                old_collector_regions, old_collector_used);
+
+  _partitions.establish_mutator_intervals(mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
+                                          mutator_regions, mutator_used);
+  _partitions.establish_old_collector_intervals(old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty,
+                                                old_collector_rightmost_empty, old_collector_regions, old_collector_used);
+  log_debug(gc)("  After find_regions_with_alloc_capacity(), Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
+                "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+                _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+                _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+                _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+                _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 }
 
-// Move no more than cset_regions from the existing Collector and OldCollector free sets to the Mutator free set.
-// This is called from outside the heap lock.
-void ShenandoahFreeSet::move_collector_sets_to_mutator(size_t max_xfer_regions) {
+// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
+size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
+                                                                                   size_t max_xfer_regions,
+                                                                                   size_t& bytes_transferred) {
+  shenandoah_assert_heaplocked();
   size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
-  size_t collector_empty_xfer = 0;
-  size_t collector_not_empty_xfer = 0;
-  size_t old_collector_empty_xfer = 0;
+  size_t transferred_regions = 0;
+  idx_t rightmost = _partitions.rightmost_empty(which_collector);
+  for (idx_t idx = _partitions.leftmost_empty(which_collector); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
+    assert(_partitions.in_free_set(which_collector, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
+    // Note: can_allocate_from() denotes that region is entirely empty
+    if (can_allocate_from(idx)) {
+      _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
+      transferred_regions++;
+      bytes_transferred += region_size_bytes;
+    }
+    idx = _partitions.find_index_of_next_available_region(which_collector, idx + 1);
+  }
+  return transferred_regions;
+}
 
-  // Process empty regions within the Collector free set
-  if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(Collector) <= _free_sets.rightmost_empty(Collector))) {
-    ShenandoahHeapLocker locker(_heap->lock());
-    for (size_t idx = _free_sets.leftmost_empty(Collector);
-         (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(Collector)); idx++) {
-      if (_free_sets.in_free_set(idx, Collector) && can_allocate_from(idx)) {
-        _free_sets.move_to_set(idx, Mutator, region_size_bytes);
-        max_xfer_regions--;
-        collector_empty_xfer += region_size_bytes;
-      }
+// Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
+size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
+                                                                                       size_t max_xfer_regions,
+                                                                                       size_t& bytes_transferred) {
+  shenandoah_assert_heaplocked();
+  size_t transferred_regions = 0;
+  idx_t rightmost = _partitions.rightmost(collector_id);
+  for (idx_t idx = _partitions.leftmost(collector_id); (transferred_regions < max_xfer_regions) && (idx <= rightmost); ) {
+    assert(_partitions.in_free_set(collector_id, idx), "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, idx);
+    size_t ac = alloc_capacity(idx);
+    if (ac > 0) {
+      _partitions.move_from_partition_to_partition(idx, collector_id, ShenandoahFreeSetPartitionId::Mutator, ac);
+      transferred_regions++;
+      bytes_transferred += ac;
     }
+    idx = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, idx + 1);
   }
+  return transferred_regions;
+}
 
-  // Process empty regions within the OldCollector free set
-  size_t old_collector_regions = 0;
-  if ((max_xfer_regions > 0) && (_free_sets.leftmost_empty(OldCollector) <= _free_sets.rightmost_empty(OldCollector))) {
+void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
+  size_t collector_xfer = 0;
+  size_t old_collector_xfer = 0;
+
+  // Process empty regions within the Collector free partition
+  if ((max_xfer_regions > 0) &&
+      (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
+       <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
     ShenandoahHeapLocker locker(_heap->lock());
-    for (size_t idx = _free_sets.leftmost_empty(OldCollector);
-         (max_xfer_regions > 0) && (idx <= _free_sets.rightmost_empty(OldCollector)); idx++) {
-      if (_free_sets.in_free_set(idx, OldCollector) && can_allocate_from(idx)) {
-        _free_sets.move_to_set(idx, Mutator, region_size_bytes);
-        max_xfer_regions--;
-        old_collector_empty_xfer += region_size_bytes;
-        old_collector_regions++;
-      }
-    }
+    max_xfer_regions -=
+      transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
+                                                               collector_xfer);
+  }
+
+  // Process empty regions within the OldCollector free partition
+  if ((max_xfer_regions > 0) &&
+      (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
+       <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
+    ShenandoahHeapLocker locker(_heap->lock());
+    size_t old_collector_regions =
+      transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
+                                                               old_collector_xfer);
+    max_xfer_regions -= old_collector_regions;
     if (old_collector_regions > 0) {
       ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
     }
   }
 
-  // If there are any non-empty regions within Collector set, we can also move them to the Mutator free set
-  if ((max_xfer_regions > 0) && (_free_sets.leftmost(Collector) <= _free_sets.rightmost(Collector))) {
+  // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
+  if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
+                                 <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
     ShenandoahHeapLocker locker(_heap->lock());
-    for (size_t idx = _free_sets.leftmost(Collector); (max_xfer_regions > 0) && (idx <= _free_sets.rightmost(Collector)); idx++) {
-      size_t alloc_capacity = this->alloc_capacity(idx);
-      if (_free_sets.in_free_set(idx, Collector) && (alloc_capacity > 0)) {
-        _free_sets.move_to_set(idx, Mutator, alloc_capacity);
-        max_xfer_regions--;
-        collector_not_empty_xfer += alloc_capacity;
-      }
-    }
+    max_xfer_regions -=
+      transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
+                                                                   collector_xfer);
   }
 
-  size_t collector_xfer = collector_empty_xfer + collector_not_empty_xfer;
-  size_t total_xfer = collector_xfer + old_collector_empty_xfer;
+  size_t total_xfer = collector_xfer + old_collector_xfer;
   log_info(gc, free)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
-                     byte_size_in_proper_unit(old_collector_empty_xfer), proper_unit_for_byte_size(old_collector_empty_xfer));
+                     byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
 }
 
 
@@ -1156,30 +1520,48 @@ void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &o
   log_debug(gc, free)("Rebuilding FreeSet");
 
   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
-  // mutator set otherwise.
+  // mutator set otherwise.  All trashed (cset) regions are affiliated young and placed in mutator set.
   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
 }
 
-void ShenandoahFreeSet::rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves) {
+void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
+  assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
+  if (ShenandoahHeap::heap()->mode()->is_generational()) {
+    ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
+    ShenandoahOldGeneration* old_gen = heap->old_generation();
+    ShenandoahYoungGeneration* young_gen = heap->young_generation();
+    size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
+
+    old_gen->set_capacity(old_region_count * region_size_bytes);
+    young_gen->set_capacity(young_region_count * region_size_bytes);
+  }
+}
+
+void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
+                                       bool have_evacuation_reserves) {
   shenandoah_assert_heaplocked();
   size_t young_reserve(0), old_reserve(0);
 
-  if (!_heap->mode()->is_generational()) {
-    young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
-    old_reserve = 0;
-  } else {
+  if (_heap->mode()->is_generational()) {
     compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
                                    young_reserve, old_reserve);
-
+  } else {
+    young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
+    old_reserve = 0;
   }
 
-  reserve_regions(young_reserve, old_reserve);
-  _free_sets.establish_alloc_bias(OldCollector);
-  _free_sets.assert_bounds();
+  // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
+  // young_reserve and old_reserve.
+  reserve_regions(young_reserve, old_reserve, old_region_count);
+  size_t young_region_count = _heap->num_regions() - old_region_count;
+  establish_generation_sizes(young_region_count, old_region_count);
+  establish_old_collector_alloc_bias();
+  _partitions.assert_bounds();
   log_status();
 }
 
-void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves,
+void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
+                                                       bool have_evacuation_reserves,
                                                        size_t& young_reserve_result, size_t& old_reserve_result) const {
   shenandoah_assert_generational();
   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
@@ -1193,18 +1575,19 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions
 
   // Add in the regions we anticipate to be freed by evacuation of the collection set
   old_unaffiliated_regions += old_cset_regions;
-  old_available += old_cset_regions * region_size_bytes;
   young_unaffiliated_regions += young_cset_regions;
 
   // Consult old-region balance to make adjustments to current generation capacities and availability.
   // The generation region transfers take place after we rebuild.
   const ssize_t old_region_balance = old_generation->get_region_balance();
   if (old_region_balance != 0) {
+#ifdef ASSERT
     if (old_region_balance > 0) {
       assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
     } else {
       assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
     }
+#endif
 
     ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
     old_available -= xfer_bytes;
@@ -1239,11 +1622,13 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions
   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
-  if (old_reserve_result > _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes) {
-    old_reserve_result = _free_sets.capacity_of(OldCollector) + old_unaffiliated_regions * region_size_bytes;
+  if (old_reserve_result >
+      _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
+    old_reserve_result =
+      _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
   }
 
-  if (old_reserve_result > young_unaffiliated_regions * region_size_bytes) {
+  if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
     young_reserve_result = young_unaffiliated_regions * region_size_bytes;
   }
 }
@@ -1251,61 +1636,118 @@ void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions
 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
 // into the collector set or old collector set in order to assure that the memory available for allocations within
-// the collector set is at least to_reserve, and the memory available for allocations within the old collector set
+// the collector set is at least to_reserve and the memory available for allocations within the old collector set
 // is at least to_reserve_old.
-void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old) {
+void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
   for (size_t i = _heap->num_regions(); i > 0; i--) {
     size_t idx = i - 1;
     ShenandoahHeapRegion* r = _heap->get_region(idx);
-    if (!_free_sets.in_free_set(idx, Mutator)) {
+    if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
       continue;
     }
 
     size_t ac = alloc_capacity(r);
     assert (ac > 0, "Membership in free set implies has capacity");
-    assert (!r->is_old(), "mutator_is_free regions should not be affiliated OLD");
+    assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
 
-    bool move_to_old = _free_sets.capacity_of(OldCollector) < to_reserve_old;
-    bool move_to_young = _free_sets.capacity_of(Collector) < to_reserve;
+    bool move_to_old_collector = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
+    bool move_to_collector = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
 
-    if (!move_to_old && !move_to_young) {
+    if (!move_to_collector && !move_to_old_collector) {
       // We've satisfied both to_reserve and to_reserved_old
       break;
     }
 
-    if (move_to_old) {
+    if (move_to_old_collector) {
+      // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
+      // addresses than Collector regions.  Presumably, OldCollector regions are more "stable" and less likely to
+      // be collected in the near future.
       if (r->is_trash() || !r->is_affiliated()) {
-        // OLD regions that have available memory are already in the old_collector free set
-        _free_sets.move_to_set(idx, OldCollector, ac);
-        log_debug(gc, free)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
+        // OLD regions that have available memory are already in the old_collector free set.
+        _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+                                                     ShenandoahFreeSetPartitionId::OldCollector, ac);
+        log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
+        log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
+                      "  Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+                      _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+                      _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+                      _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+                      _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
+        old_region_count++;
         continue;
       }
     }
 
-    if (move_to_young) {
+    if (move_to_collector) {
       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
-      // they were entirely empty.  I'm not sure I understand the rationale for that.  That alternative behavior would
-      // tend to mix survivor objects with ephemeral objects, making it more difficult to reclaim the memory for the
-      // ephemeral objects.  It also delays aging of regions, causing promotion in place to be delayed.
-      _free_sets.move_to_set(idx, Collector, ac);
+      // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
+      // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
+      // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
+      // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
+      // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
+      // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
+      // collection set, and they are easily evacuated because they have low density of live objects.
+      _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
+                                                   ShenandoahFreeSetPartitionId::Collector, ac);
       log_debug(gc)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
+      log_debug(gc)("  Shifted Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "],"
+                    "  Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
+                    _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+                    _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+                    _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
+                    _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
     }
   }
 
   if (LogTarget(Info, gc, free)::is_enabled()) {
-    size_t old_reserve = _free_sets.capacity_of(OldCollector);
+    size_t old_reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector);
     if (old_reserve < to_reserve_old) {
       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
     }
-    size_t young_reserve = _free_sets.capacity_of(Collector);
-    if (young_reserve < to_reserve) {
-      log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
-                         PROPERFMTARGS(to_reserve), PROPERFMTARGS(young_reserve));
+    size_t reserve = _partitions.capacity_of(ShenandoahFreeSetPartitionId::Collector);
+    if (reserve < to_reserve) {
+      log_debug(gc)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
+                    PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
     }
   }
 }
 
+void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
+  ShenandoahHeap* heap = ShenandoahHeap::heap();
+  shenandoah_assert_heaplocked();
+
+  idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
+  idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
+  idx_t middle = (left_idx + right_idx) / 2;
+  size_t available_in_first_half = 0;
+  size_t available_in_second_half = 0;
+
+  for (idx_t index = left_idx; index < middle; index++) {
+    if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
+      ShenandoahHeapRegion* r = heap->get_region((size_t) index);
+      available_in_first_half += r->free();
+    }
+  }
+  for (idx_t index = middle; index <= right_idx; index++) {
+    if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
+      ShenandoahHeapRegion* r = heap->get_region(index);
+      available_in_second_half += r->free();
+    }
+  }
+
+  // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
+  // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
+  // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
+  // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
+
+  // TODO: In the future, we may modify Full GC so that it slides old objects to the end of the heap and young objects to the
+  // front of the heap. If this is done, we can always search survivor Collector and OldCollector regions right to left.
+  _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
+                                          (available_in_second_half > available_in_first_half));
+}
+
+
 void ShenandoahFreeSet::log_status() {
   shenandoah_assert_heaplocked();
 
@@ -1333,36 +1775,41 @@ void ShenandoahFreeSet::log_status() {
     for (uint i = 0; i < BUFFER_SIZE; i++) {
       buffer[i] = '\0';
     }
-    log_debug(gc, free)("FreeSet map legend:"
+
+    log_debug(gc)("FreeSet map legend:"
                        " M:mutator_free C:collector_free O:old_collector_free"
                        " H:humongous ~:retired old _:retired young");
-    log_debug(gc, free)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
-                       " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
-                       "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
-                       _free_sets.leftmost(Mutator), _free_sets.rightmost(Mutator),
-                       _free_sets.leftmost(Collector), _free_sets.rightmost(Collector),
-                       _free_sets.leftmost(OldCollector), _free_sets.rightmost(OldCollector),
-                       _free_sets.alloc_from_left_bias(OldCollector)? "left to right": "right to left");
+    log_debug(gc)(" mutator free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocating from %s, "
+                  " collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "], "
+                  "old collector free range [" SIZE_FORMAT ".." SIZE_FORMAT "] allocates from %s",
+                  _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
+                  _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
+                  _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left",
+                  _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
+                  _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
+                  _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
+                  _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
+                  _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
 
     for (uint i = 0; i < _heap->num_regions(); i++) {
       ShenandoahHeapRegion *r = _heap->get_region(i);
       uint idx = i % 64;
       if ((i != 0) && (idx == 0)) {
-        log_debug(gc, free)(" %6u: %s", i-64, buffer);
+        log_debug(gc)(" %6u: %s", i-64, buffer);
       }
-      if (_free_sets.in_free_set(i, Mutator)) {
-        assert(!r->is_old(), "Old regions should not be in mutator_free set");
+      if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
         size_t capacity = alloc_capacity(r);
+        assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
         available_mutator += capacity;
         consumed_mutator += region_size_bytes - capacity;
         buffer[idx] = (capacity == region_size_bytes)? 'M': 'm';
-      } else if (_free_sets.in_free_set(i, Collector)) {
-        assert(!r->is_old(), "Old regions should not be in collector_free set");
+      } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
         size_t capacity = alloc_capacity(r);
+        assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
         available_collector += capacity;
         consumed_collector += region_size_bytes - capacity;
         buffer[idx] = (capacity == region_size_bytes)? 'C': 'c';
-      } else if (_free_sets.in_free_set(i, OldCollector)) {
+      } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
         size_t capacity = alloc_capacity(r);
         available_old_collector += capacity;
         consumed_old_collector += region_size_bytes - capacity;
@@ -1393,9 +1840,7 @@ void ShenandoahFreeSet::log_status() {
     } else {
       remnant = 64;
     }
-    log_debug(gc, free)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
-    size_t total_young = retired_young + retired_young_humongous;
-    size_t total_old = retired_old + retired_old_humongous;
+    log_debug(gc)(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
   }
 #endif
 
@@ -1405,7 +1850,7 @@ void ShenandoahFreeSet::log_status() {
     LogStream ls(lt);
 
     {
-      size_t last_idx = 0;
+      idx_t last_idx = 0;
       size_t max = 0;
       size_t max_contig = 0;
       size_t empty_contig = 0;
@@ -1414,8 +1859,9 @@ void ShenandoahFreeSet::log_status() {
       size_t total_free = 0;
       size_t total_free_ext = 0;
 
-      for (size_t idx = _free_sets.leftmost(Mutator); idx <= _free_sets.rightmost(Mutator); idx++) {
-        if (_free_sets.in_free_set(idx, Mutator)) {
+      for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
+           idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) {
+        if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
           ShenandoahHeapRegion *r = _heap->get_region(idx);
           size_t free = alloc_capacity(r);
           max = MAX2(max, free);
@@ -1439,10 +1885,10 @@ void ShenandoahFreeSet::log_status() {
       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
       size_t free = capacity() - used();
 
-      assert(free == total_free, "Sum of free within mutator regions (" SIZE_FORMAT
-             ") should match mutator capacity (" SIZE_FORMAT ") minus mutator used (" SIZE_FORMAT ")",
-             total_free, capacity(), used());
-
+      // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
+      // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
+      // my internally tracked values of used() and free().
+      assert(free == total_free, "Free memory should match");
       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
@@ -1459,14 +1905,16 @@ void ShenandoahFreeSet::log_status() {
       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
 
       size_t frag_int;
-      if (_free_sets.count(Mutator) > 0) {
-        frag_int = (100 * (total_used / _free_sets.count(Mutator)) / ShenandoahHeapRegion::region_size_bytes());
+      if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
+        frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
+                    / ShenandoahHeapRegion::region_size_bytes());
       } else {
         frag_int = 0;
       }
       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
-               byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used), _free_sets.count(Mutator));
+               byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used),
+               _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
     }
 
     {
@@ -1474,8 +1922,9 @@ void ShenandoahFreeSet::log_status() {
       size_t total_free = 0;
       size_t total_used = 0;
 
-      for (size_t idx = _free_sets.leftmost(Collector); idx <= _free_sets.rightmost(Collector); idx++) {
-        if (_free_sets.in_free_set(idx, Collector)) {
+      for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
+           idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
+        if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
           ShenandoahHeapRegion *r = _heap->get_region(idx);
           size_t free = alloc_capacity(r);
           max = MAX2(max, free);
@@ -1494,8 +1943,9 @@ void ShenandoahFreeSet::log_status() {
       size_t total_free = 0;
       size_t total_used = 0;
 
-      for (size_t idx = _free_sets.leftmost(OldCollector); idx <= _free_sets.rightmost(OldCollector); idx++) {
-        if (_free_sets.in_free_set(idx, OldCollector)) {
+      for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
+           idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
+        if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
           ShenandoahHeapRegion *r = _heap->get_region(idx);
           size_t free = alloc_capacity(r);
           max = MAX2(max, free);
@@ -1513,8 +1963,6 @@ void ShenandoahFreeSet::log_status() {
 
 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
   shenandoah_assert_heaplocked();
-
-  // Allocation request is known to satisfy all memory budgeting constraints.
   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
     switch (req.type()) {
       case ShenandoahAllocRequest::_alloc_shared:
@@ -1537,79 +1985,49 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_
   }
 }
 
-size_t ShenandoahFreeSet::unsafe_peek_free() const {
-  // Deliberately not locked, this method is unsafe when free set is modified.
-
-  for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
-    if (index < _free_sets.max() && _free_sets.in_free_set(index, Mutator)) {
-      ShenandoahHeapRegion* r = _heap->get_region(index);
-      if (r->free() >= MinTLABSize) {
-        return r->free();
-      }
-    }
-  }
-
-  // It appears that no regions left
-  return 0;
-}
-
 void ShenandoahFreeSet::print_on(outputStream* out) const {
-  out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _free_sets.count(Mutator));
-  for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
-    if (_free_sets.in_free_set(index, Mutator)) {
-      _heap->get_region(index)->print_on(out);
-    }
+  out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
+  idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+  for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
+    assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
+           "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+    _heap->get_region(index)->print_on(out);
+    index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
   }
-  out->print_cr("Collector Free Set: " SIZE_FORMAT "", _free_sets.count(Collector));
-  for (size_t index = _free_sets.leftmost(Collector); index <= _free_sets.rightmost(Collector); index++) {
-    if (_free_sets.in_free_set(index, Collector)) {
-      _heap->get_region(index)->print_on(out);
-    }
+  out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
+  rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector);
+  for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector); index <= rightmost; ) {
+    assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, index),
+           "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+    _heap->get_region(index)->print_on(out);
+    index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Collector, index + 1);
   }
   if (_heap->mode()->is_generational()) {
-    out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _free_sets.count(OldCollector));
-    for (size_t index = _free_sets.leftmost(OldCollector); index <= _free_sets.rightmost(OldCollector); index++) {
-      if (_free_sets.in_free_set(index, OldCollector)) {
+    out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
+    for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
+         index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
+      if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
         _heap->get_region(index)->print_on(out);
       }
     }
   }
 }
 
-/*
- * Internal fragmentation metric: describes how fragmented the heap regions are.
- *
- * It is derived as:
- *
- *               sum(used[i]^2, i=0..k)
- *   IF = 1 - ------------------------------
- *              C * sum(used[i], i=0..k)
- *
- * ...where k is the number of regions in computation, C is the region capacity, and
- * used[i] is the used space in the region.
- *
- * The non-linearity causes IF to be lower for the cases where the same total heap
- * used is densely packed. For example:
- *   a) Heap is completely full  => IF = 0
- *   b) Heap is half full, first 50% regions are completely full => IF = 0
- *   c) Heap is half full, each region is 50% full => IF = 1/2
- *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
- *   e) Heap is quarter full, each region is 25% full => IF = 3/4
- *   f) Heap has one small object per each region => IF =~ 1
- */
 double ShenandoahFreeSet::internal_fragmentation() {
   double squared = 0;
   double linear = 0;
   int count = 0;
 
-  for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
-    if (_free_sets.in_free_set(index, Mutator)) {
-      ShenandoahHeapRegion* r = _heap->get_region(index);
-      size_t used = r->used();
-      squared += used * used;
-      linear += used;
-      count++;
-    }
+  idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+  for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
+    assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
+           "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+    ShenandoahHeapRegion* r = _heap->get_region(index);
+    size_t used = r->used();
+    squared += used * used;
+    linear += used;
+    count++;
+    index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
   }
 
   if (count > 0) {
@@ -1620,43 +2038,31 @@ double ShenandoahFreeSet::internal_fragmentation() {
   }
 }
 
-/*
- * External fragmentation metric: describes how fragmented the heap is.
- *
- * It is derived as:
- *
- *   EF = 1 - largest_contiguous_free / total_free
- *
- * For example:
- *   a) Heap is completely empty => EF = 0
- *   b) Heap is completely full => EF = 0
- *   c) Heap is first-half full => EF = 1/2
- *   d) Heap is half full, full and empty regions interleave => EF =~ 1
- */
 double ShenandoahFreeSet::external_fragmentation() {
-  size_t last_idx = 0;
+  idx_t last_idx = 0;
   size_t max_contig = 0;
   size_t empty_contig = 0;
 
   size_t free = 0;
 
-  for (size_t index = _free_sets.leftmost(Mutator); index <= _free_sets.rightmost(Mutator); index++) {
-    if (_free_sets.in_free_set(index, Mutator)) {
-      ShenandoahHeapRegion* r = _heap->get_region(index);
-      if (r->is_empty()) {
-        free += ShenandoahHeapRegion::region_size_bytes();
-        if (last_idx + 1 == index) {
-          empty_contig++;
-        } else {
-          empty_contig = 1;
-        }
+  idx_t rightmost = _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator);
+  for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator); index <= rightmost; ) {
+    assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, index),
+           "Boundaries or find_first_set_bit failed: " SSIZE_FORMAT, index);
+    ShenandoahHeapRegion* r = _heap->get_region(index);
+    if (r->is_empty()) {
+      free += ShenandoahHeapRegion::region_size_bytes();
+      if (last_idx + 1 == index) {
+        empty_contig++;
       } else {
-        empty_contig = 0;
+        empty_contig = 1;
       }
-
-      max_contig = MAX2(max_contig, empty_contig);
-      last_idx = index;
+    } else {
+      empty_contig = 0;
     }
+    max_contig = MAX2(max_contig, empty_contig);
+    last_idx = index;
+    index = _partitions.find_index_of_next_available_region(ShenandoahFreeSetPartitionId::Mutator, index + 1);
   }
 
   if (free > 0) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
index 15a3469651e..677fd40e50b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFreeSet.hpp
@@ -29,99 +29,212 @@
 
 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
 #include "gc/shenandoah/shenandoahHeap.hpp"
-
-enum ShenandoahFreeMemoryType : uint8_t {
-  NotFree,
-  Mutator,
-  Collector,
-  OldCollector,
-  NumFreeSets
+#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
+
+// Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
+enum class ShenandoahFreeSetPartitionId : uint8_t {
+  Mutator,                      // Region is in the Mutator free set: available memory is available to mutators.
+  Collector,                    // Region is in the Collector free set: available memory is reserved for evacuations.
+  OldCollector,                 // Region is in the Old Collector free set:
+                                //    available memory is reserved for old evacuations and for promotions..
+  NotFree                       // Region is in no free set: it has no available memory
 };
 
-class ShenandoahSetsOfFree {
+// We do not maintain counts, capacity, or used for regions that are not free.  Informally, if a region is NotFree, it is
+// in no partition.  NumPartitions represents the size of an array that may be indexed by Mutator or Collector.
+#define NumPartitions           (ShenandoahFreeSetPartitionId::NotFree)
+#define IntNumPartitions     int(ShenandoahFreeSetPartitionId::NotFree)
+#define UIntNumPartitions   uint(ShenandoahFreeSetPartitionId::NotFree)
 
-private:
-  size_t _max;                  // The maximum number of heap regions
-  ShenandoahFreeSet* _free_set;
-  size_t _region_size_bytes;
-  ShenandoahFreeMemoryType* _membership;
-  size_t _leftmosts[NumFreeSets];
-  size_t _rightmosts[NumFreeSets];
-  size_t _leftmosts_empty[NumFreeSets];
-  size_t _rightmosts_empty[NumFreeSets];
-  size_t _capacity_of[NumFreeSets];
-  size_t _used_by[NumFreeSets];
-  bool _left_to_right_bias[NumFreeSets];
-  size_t _region_counts[NumFreeSets];
-
-  inline void shrink_bounds_if_touched(ShenandoahFreeMemoryType set, size_t idx);
-  inline void expand_bounds_maybe(ShenandoahFreeMemoryType set, size_t idx, size_t capacity);
-
-  // Restore all state variables to initial default state.
-  void clear_internal();
+// ShenandoahRegionPartitions provides an abstraction to help organize the implementation of ShenandoahFreeSet.  This
+// class implements partitioning of regions into distinct sets.  Each ShenandoahHeapRegion is either in the Mutator free set,
+// the Collector free set, or in neither free set (NotFree).  When we speak of a "free partition", we mean partitions that
+// for which the ShenandoahFreeSetPartitionId is not equal to NotFree.
+class ShenandoahRegionPartitions {
 
+private:
+  const ssize_t _max;           // The maximum number of heap regions
+  const size_t _region_size_bytes;
+  const ShenandoahFreeSet* _free_set;
+  // For each partition, we maintain a bitmap of which regions are affiliated with his partition.
+  ShenandoahSimpleBitMap _membership[UIntNumPartitions];
+
+  // For each partition, we track an interval outside of which a region affiliated with that partition is guaranteed
+  // not to be found. This makes searches for free space more efficient.  For each partition p, _leftmosts[p]
+  // represents its least index, and its _rightmosts[p] its greatest index. Empty intervals are indicated by the
+  // canonical [_max, -1].
+  ssize_t _leftmosts[UIntNumPartitions];
+  ssize_t _rightmosts[UIntNumPartitions];
+
+  // Allocation for humongous objects needs to find regions that are entirely empty.  For each partion p, _leftmosts_empty[p]
+  // represents the first region belonging to this partition that is completely empty and _rightmosts_empty[p] represents the
+  // last region that is completely empty.  If there is no completely empty region in this partition, this is represented
+  // by the canonical [_max, -1].
+  ssize_t _leftmosts_empty[UIntNumPartitions];
+  ssize_t _rightmosts_empty[UIntNumPartitions];
+
+  // For each partition p, _capacity[p] represents the total amount of memory within the partition at the time
+  // of the most recent rebuild, _used[p] represents the total amount of memory that has been allocated within this
+  // partition (either already allocated as of the rebuild, or allocated since the rebuild).  _capacity[p] and _used[p]
+  // are denoted in bytes.  Note that some regions that had been assigned to a particular partition at rebuild time
+  // may have been retired following the rebuild.  The tallies for these regions are still reflected in _capacity[p]
+  // and _used[p], even though the region may have been removed from the free set.
+  size_t _capacity[UIntNumPartitions];
+  size_t _used[UIntNumPartitions];
+  size_t _region_counts[UIntNumPartitions];
+
+  // For each partition p, _left_to_right_bias is true iff allocations are normally made from lower indexed regions
+  // before higher indexed regions.
+  bool _left_to_right_bias[UIntNumPartitions];
+
+  // Shrink the intervals associated with partition when region idx is removed from this free set
+  inline void shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx);
+
+  // Shrink the intervals associated with partition when regions low_idx through high_idx inclusive are removed from this free set
+  inline void shrink_interval_if_range_modifies_either_boundary(ShenandoahFreeSetPartitionId partition,
+                                                                ssize_t low_idx, ssize_t high_idx);
+  inline void expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, ssize_t idx, size_t capacity);
+
+  inline bool is_mutator_partition(ShenandoahFreeSetPartitionId p);
+  inline bool is_young_collector_partition(ShenandoahFreeSetPartitionId p);
+  inline bool is_old_collector_partition(ShenandoahFreeSetPartitionId p);
+  inline bool available_implies_empty(size_t available);
+
+#ifndef PRODUCT
+  void dump_bitmap_row(ssize_t region_idx) const;
+  void dump_bitmap_range(ssize_t start_region_idx, ssize_t end_region_idx) const;
+  void dump_bitmap() const;
+#endif
 public:
-  ShenandoahSetsOfFree(size_t max_regions, ShenandoahFreeSet* free_set);
-  ~ShenandoahSetsOfFree();
+  ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set);
+  ~ShenandoahRegionPartitions() {}
+
+  // Remove all regions from all partitions and reset all bounds
+  void make_all_regions_unavailable();
+
+  // Set the partition id for a particular region without adjusting interval bounds or usage/capacity tallies
+  inline void raw_assign_membership(size_t idx, ShenandoahFreeSetPartitionId p) {
+    _membership[int(p)].set_bit(idx);
+  }
+
+  // Set the Mutator intervals, usage, and capacity according to arguments.  Reset the Collector intervals, used, capacity
+  // to represent empty Collector free set.  We use this at the end of rebuild_free_set() to avoid the overhead of making
+  // many redundant incremental adjustments to the mutator intervals as the free set is being rebuilt.
+  void establish_mutator_intervals(ssize_t mutator_leftmost, ssize_t mutator_rightmost,
+                                   ssize_t mutator_leftmost_empty, ssize_t mutator_rightmost_empty,
+                                   size_t mutator_region_count, size_t mutator_used);
+
+  // Set the OldCollector intervals, usage, and capacity according to arguments.  We use this at the end of rebuild_free_set()
+  // to avoid the overhead of making many redundant incremental adjustments to the mutator intervals as the free set is being
+  // rebuilt.
+  void establish_old_collector_intervals(ssize_t old_collector_leftmost, ssize_t old_collector_rightmost,
+                                         ssize_t old_collector_leftmost_empty, ssize_t old_collector_rightmost_empty,
+                                         size_t old_collector_region_count, size_t old_collector_used);
+
+  // Retire region idx from within partition, , leaving its capacity and used as part of the original free partition's totals.
+  // Requires that region idx is in in the Mutator or Collector partitions.  Hereafter, identifies this region as NotFree.
+  // Any remnant of available memory at the time of retirement is added to the original partition's total of used bytes.
+  void retire_from_partition(ShenandoahFreeSetPartitionId p, ssize_t idx, size_t used_bytes);
+
+  // Retire all regions between low_idx and high_idx inclusive from within partition.  Requires that each region idx is
+  // in the same Mutator or Collector partition.  Hereafter, identifies each region as NotFree.   Assumes that each region
+  // is now considered fully used, since the region is presumably used to represent a humongous object.
+  void retire_range_from_partition(ShenandoahFreeSetPartitionId partition, ssize_t low_idx, ssize_t high_idx);
 
-  // Make all regions NotFree and reset all bounds
-  void clear_all();
+  // Place region idx into free set which_partition.  Requires that idx is currently NotFree.
+  void make_free(ssize_t idx, ShenandoahFreeSetPartitionId which_partition, size_t region_capacity);
 
-  // Remove or retire region idx from all free sets.  Requires that idx is in a free set.  This does not affect capacity.
-  void remove_from_free_sets(size_t idx);
+  // Place region idx into free partition new_partition, adjusting used and capacity totals for the original and new partition
+  // given that available bytes can still be allocated within this region.  Requires that idx is currently not NotFree.
+  void move_from_partition_to_partition(ssize_t idx, ShenandoahFreeSetPartitionId orig_partition,
+                                        ShenandoahFreeSetPartitionId new_partition, size_t available);
 
-  // Place region idx into free set which_set.  Requires that idx is currently NotFree.
-  void make_free(size_t idx, ShenandoahFreeMemoryType which_set, size_t region_capacity);
+  const char* partition_membership_name(ssize_t idx) const;
 
-  // Place region idx into free set new_set.  Requires that idx is currently not NotFree.
-  void move_to_set(size_t idx, ShenandoahFreeMemoryType new_set, size_t region_capacity);
+  // Return the index of the next available region >= start_index, or maximum_regions if not found.
+  inline ssize_t find_index_of_next_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t start_index) const;
 
-  // Returns the ShenandoahFreeMemoryType affiliation of region idx, or NotFree if this region is not currently free.  This does
-  // not enforce that free_set membership implies allocation capacity.
-  inline ShenandoahFreeMemoryType membership(size_t idx) const;
+  // Return the index of the previous available region <= last_index, or -1 if not found.
+  inline ssize_t find_index_of_previous_available_region(ShenandoahFreeSetPartitionId which_partition, ssize_t last_index) const;
 
-  // Returns true iff region idx is in the test_set free_set.  Before returning true, asserts that the free
-  // set is not empty.  Requires that test_set != NotFree or NumFreeSets.
-  inline bool in_free_set(size_t idx, ShenandoahFreeMemoryType which_set) const;
+  // Return the index of the next available cluster of cluster_size regions >= start_index, or maximum_regions if not found.
+  inline ssize_t find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
+                                                                 ssize_t start_index, size_t cluster_size) const;
+
+  // Return the index of the previous available cluster of cluster_size regions <= last_index, or -1 if not found.
+  inline ssize_t find_index_of_previous_available_cluster_of_regions(ShenandoahFreeSetPartitionId which_partition,
+                                                                     ssize_t last_index, size_t cluster_size) const;
+
+  inline bool in_free_set(ShenandoahFreeSetPartitionId which_partition, ssize_t idx) const {
+    return _membership[int(which_partition)].is_set(idx);
+  }
+
+  // Returns the ShenandoahFreeSetPartitionId affiliation of region idx, NotFree if this region is not currently in any partition.
+  // This does not enforce that free_set membership implies allocation capacity.
+  inline ShenandoahFreeSetPartitionId membership(ssize_t idx) const;
+
+#ifdef ASSERT
+  // Returns true iff region idx's membership is which_partition.  If which_partition represents a free set, asserts
+  // that the region has allocation capacity.
+  inline bool partition_id_matches(ssize_t idx, ShenandoahFreeSetPartitionId which_partition) const;
+#endif
+
+  inline size_t max_regions() const { return _max; }
+
+  inline size_t region_size_bytes() const { return _region_size_bytes; };
 
   // The following four methods return the left-most and right-most bounds on ranges of regions representing
   // the requested set.  The _empty variants represent bounds on the range that holds completely empty
-  // regions, which are required for humongous allocations and desired for "very large" allocations.  A
-  // return value of -1 from leftmost() or leftmost_empty() denotes that the corresponding set is empty.
-  // In other words:
-  //   if the requested which_set is empty:
+  // regions, which are required for humongous allocations and desired for "very large" allocations.
+  //   if the requested which_partition is empty:
   //     leftmost() and leftmost_empty() return _max, rightmost() and rightmost_empty() return 0
   //   otherwise, expect the following:
   //     0 <= leftmost <= leftmost_empty <= rightmost_empty <= rightmost < _max
-  inline size_t leftmost(ShenandoahFreeMemoryType which_set) const;
-  inline size_t rightmost(ShenandoahFreeMemoryType which_set) const;
-  size_t leftmost_empty(ShenandoahFreeMemoryType which_set);
-  size_t rightmost_empty(ShenandoahFreeMemoryType which_set);
+  inline ssize_t leftmost(ShenandoahFreeSetPartitionId which_partition) const;
+  inline ssize_t rightmost(ShenandoahFreeSetPartitionId which_partition) const;
+  ssize_t leftmost_empty(ShenandoahFreeSetPartitionId which_partition);
+  ssize_t rightmost_empty(ShenandoahFreeSetPartitionId which_partition);
 
-  inline bool is_empty(ShenandoahFreeMemoryType which_set) const;
+  inline bool is_empty(ShenandoahFreeSetPartitionId which_partition) const;
 
-  inline void increase_used(ShenandoahFreeMemoryType which_set, size_t bytes);
+  inline void increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes);
 
-  inline size_t capacity_of(ShenandoahFreeMemoryType which_set) const {
-    assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-    return _capacity_of[which_set];
+  inline void set_bias_from_left_to_right(ShenandoahFreeSetPartitionId which_partition, bool value) {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    _left_to_right_bias[int(which_partition)] = value;
   }
 
-  inline size_t used_by(ShenandoahFreeMemoryType which_set) const {
-    assert (which_set > NotFree && which_set < NumFreeSets, "selected free set must be valid");
-    return _used_by[which_set];
+  inline bool alloc_from_left_bias(ShenandoahFreeSetPartitionId which_partition) const {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    return _left_to_right_bias[int(which_partition)];
   }
 
-  inline size_t max() const { return _max; }
+  inline size_t capacity_of(ShenandoahFreeSetPartitionId which_partition) const {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    return _capacity[int(which_partition)];
+  }
 
-  inline size_t count(ShenandoahFreeMemoryType which_set) const { return _region_counts[which_set]; }
+  inline size_t used_by(ShenandoahFreeSetPartitionId which_partition) const {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    return _used[int(which_partition)];
+  }
 
-  // Return true iff regions for allocation from this set should be peformed left to right.  Otherwise, allocate
-  // from right to left.
-  inline bool alloc_from_left_bias(ShenandoahFreeMemoryType which_set);
+  inline size_t available_in(ShenandoahFreeSetPartitionId which_partition) const {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    return _capacity[int(which_partition)] - _used[int(which_partition)];
+  }
 
-  // Determine whether we prefer to allocate from left to right or from right to left for this free-set.
-  void establish_alloc_bias(ShenandoahFreeMemoryType which_set);
+  inline void set_capacity_of(ShenandoahFreeSetPartitionId which_partition, size_t value) {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    _capacity[int(which_partition)] = value;
+  }
+
+  inline void set_used_by(ShenandoahFreeSetPartitionId which_partition, size_t value) {
+    assert (which_partition < NumPartitions, "selected free set must be valid");
+    _used[int(which_partition)] = value;
+  }
+
+  inline size_t count(ShenandoahFreeSetPartitionId which_partition) const { return _region_counts[int(which_partition)]; }
 
   // Assure leftmost, rightmost, leftmost_empty, and rightmost_empty bounds are valid for all free sets.
   // Valid bounds honor all of the following (where max is the number of heap regions):
@@ -137,7 +250,7 @@ class ShenandoahSetsOfFree {
   //     }
   //   if the set has no empty regions, leftmost_empty equals max and rightmost_empty equals 0
   //   Otherwise (the region has empty regions):
-  //     0 <= lefmost_empty < max and 0 <= rightmost_empty < max
+  //     0 <= leftmost_empty < max and 0 <= rightmost_empty < max
   //     rightmost_empty >= leftmost_empty
   //     for every idx that is in the set and is empty {
   //       idx >= leftmost &&
@@ -146,77 +259,159 @@ class ShenandoahSetsOfFree {
   void assert_bounds() NOT_DEBUG_RETURN;
 };
 
+// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads.  The public capacity(), used(),
+// and available() methods represent this public notion of memory that is under control of the mutator.  Separately,
+// ShenandoahFreeSet also represents memory available to garbage collection activities for compaction purposes.
+//
+// The Shenandoah garbage collector evacuates live objects out of specific regions that are identified as members of the
+// collection set (cset).
+//
+// The ShenandoahFreeSet tries to colocate survivor objects (objects that have been evacuated at least once) at the
+// high end of memory.  New mutator allocations are taken from the low end of memory.  Within the mutator's range of regions,
+// humongous allocations are taken from the lowest addresses, and LAB (local allocation buffers) and regular shared allocations
+// are taken from the higher address of the mutator's range of regions.  This approach allows longer lasting survivor regions
+// to congregate at the top of the heap and longer lasting humongous regions to congregate at the bottom of the heap, with
+// short-lived frequently evacuated regions occupying the middle of the heap.
+//
+// Mutator and garbage collection activities tend to scramble the content of regions.  Twice, during each GC pass, we rebuild
+// the free set in an effort to restore the efficient segregation of Collector and Mutator regions:
+//
+//  1. At the start of evacuation, we know exactly how much memory is going to be evacuated, and this guides our
+//     sizing of the Collector free set.
+//
+//  2. At the end of GC, we have reclaimed all of the memory that was spanned by the cset.  We rebuild here to make
+//     sure there is enough memory reserved at the high end of memory to hold the objects that might need to be evacuated
+//     during the next GC pass.
+
 class ShenandoahFreeSet : public CHeapObj<mtGC> {
 private:
   ShenandoahHeap* const _heap;
-  ShenandoahSetsOfFree _free_sets;
-
-  HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
+  ShenandoahRegionPartitions _partitions;
+  size_t _retired_old_regions;
 
   HeapWord* allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r);
 
-  // Satisfy young-generation or single-generation collector allocation request req by finding memory that matches
-  // affiliation, which either equals req.affiliation or FREE.  We know req.is_young().
-  HeapWord* allocate_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region);
+  // Return the address of memory allocated, setting in_new_region to true iff the allocation is taken
+  // from a region that was previously empty.  Return nullptr if memory could not be allocated.
+  inline HeapWord* allocate_from_partition_with_affiliation(ShenandoahFreeSetPartitionId which_partition,
+                                                            ShenandoahAffiliation affiliation,
+                                                            ShenandoahAllocRequest& req, bool& in_new_region);
 
-  // Satisfy allocation request req by finding memory that matches affiliation, which either equals req.affiliation
-  // or FREE. We know req.is_old().
-  HeapWord* allocate_old_with_affiliation(ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region);
+  // We re-evaluate the left-to-right allocation bias whenever _alloc_bias_weight is less than zero.  Each time
+  // we allocate an object, we decrement the count of this value.  Each time we re-evaluate whether to allocate
+  // from right-to-left or left-to-right, we reset the value of this counter to _InitialAllocBiasWeight.
+  ssize_t _alloc_bias_weight;
 
-  // While holding the heap lock, allocate memory for a single object which is to be entirely contained
-  // within a single HeapRegion as characterized by req.  The req.size() value is known to be less than or
-  // equal to ShenandoahHeapRegion::humongous_threshold_words().  The caller of allocate_single is responsible
-  // for registering the resulting object and setting the remembered set card values as appropriate.  The
-  // most common case is that we are allocating a PLAB in which case object registering and card dirtying
-  // is managed after the PLAB is divided into individual objects.
+  const ssize_t _InitialAllocBiasWeight = 256;
+
+  HeapWord* try_allocate_in(ShenandoahHeapRegion* region, ShenandoahAllocRequest& req, bool& in_new_region);
+
+  // While holding the heap lock, allocate memory for a single object or LAB  which is to be entirely contained
+  // within a single HeapRegion as characterized by req.
+  //
+  // Precondition: req.size() <= ShenandoahHeapRegion::humongous_threshold_words().
   HeapWord* allocate_single(ShenandoahAllocRequest& req, bool& in_new_region);
+
+  // While holding the heap lock, allocate memory for a humongous object which spans one or more regions that
+  // were previously empty.  Regions that represent humongous objects are entirely dedicated to the humongous
+  // object.  No other objects are packed into these regions.
+  //
+  // Precondition: req.size() > ShenandoahHeapRegion::humongous_threshold_words().
   HeapWord* allocate_contiguous(ShenandoahAllocRequest& req);
 
+  // Change region r from the Mutator partition to the GC's Collector or OldCollector partition.  This requires that the
+  // region is entirely empty.
+  //
+  // Typical usage: During evacuation, the GC may find it needs more memory than had been reserved at the start of evacuation to
+  // hold evacuated objects.  If this occurs and memory is still available in the Mutator's free set, we will flip a region from
+  // the Mutator free set into the Collector or OldCollector free set.
   void flip_to_gc(ShenandoahHeapRegion* r);
   void flip_to_old_gc(ShenandoahHeapRegion* r);
 
   void clear_internal();
-
   void try_recycle_trashed(ShenandoahHeapRegion *r);
 
-  bool can_allocate_from(ShenandoahHeapRegion *r) const;
-  bool can_allocate_from(size_t idx) const;
-  bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
+  // Returns true iff this region is entirely available, either because it is empty() or because it has been found to represent
+  // immediate trash and we'll be able to immediately recycle it.  Note that we cannot recycle immediate trash if
+  // concurrent weak root processing is in progress.
+  inline bool can_allocate_from(ShenandoahHeapRegion *r) const;
+  inline bool can_allocate_from(size_t idx) const;
+
+  inline bool has_alloc_capacity(ShenandoahHeapRegion *r) const;
+
+  size_t transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
+                                                                  size_t max_xfer_regions,
+                                                                  size_t& bytes_transferred);
+  size_t transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId collector_id,
+                                                                      size_t max_xfer_regions,
+                                                                      size_t& bytes_transferred);
+
+
+  // Determine whether we prefer to allocate from left to right or from right to left within the OldCollector free-set.
+  void establish_old_collector_alloc_bias();
+
+  // Set max_capacity for young and old generations
+  void establish_generation_sizes(size_t young_region_count, size_t old_region_count);
+  size_t get_usable_free_words(size_t free_bytes) const;
 
 public:
   ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions);
 
-  size_t alloc_capacity(ShenandoahHeapRegion *r) const;
-  size_t alloc_capacity(size_t idx) const;
+  // Public because ShenandoahRegionPartitions assertions require access.
+  inline size_t alloc_capacity(ShenandoahHeapRegion *r) const;
+  inline size_t alloc_capacity(size_t idx) const;
 
   void clear();
+
+  // Examine the existing free set representation, capturing the current state into var arguments:
+  //
+  // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
+  //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
+  //   first_old_region is the index of the first region that is part of the OldCollector set
+  //    last_old_region is the index of the last region that is part of the OldCollector set
+  //   old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
   void prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
                           size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
 
   // At the end of final mark, but before we begin evacuating, heuristics calculate how much memory is required to
-  // hold the results of evacuating to young-gen and to old-gen.  These quantities, stored in reserves for their,
-  // respective generations, are consulted prior to rebuilding the free set (ShenandoahFreeSet) in preparation for
-  // evacuation.  When the free set is rebuilt, we make sure to reserve sufficient memory in the collector and
-  // old_collector sets to hold evacuations, if have_evacuation_reserves is true.  The other time we rebuild the free
-  // set is at the end of GC, as we prepare to idle GC until the next trigger.  In this case, have_evacuation_reserves
-  // is false because we don't yet know how much memory will need to be evacuated in the next GC cycle.  When
-  // have_evacuation_reserves is false, the free set rebuild operation reserves for the collector and old_collector sets
-  // based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
+  // hold the results of evacuating to young-gen and to old-gen, and have_evacuation_reserves should be true.
+  // These quantities, stored as reserves for their respective generations, are consulted prior to rebuilding
+  // the free set (ShenandoahFreeSet) in preparation for evacuation.  When the free set is rebuilt, we make sure
+  // to reserve sufficient memory in the collector and old_collector sets to hold evacuations.
+  //
+  // We also rebuild the free set at the end of GC, as we prepare to idle GC until the next trigger.  In this case,
+  // have_evacuation_reserves is false because we don't yet know how much memory will need to be evacuated in the
+  // next GC cycle.  When have_evacuation_reserves is false, the free set rebuild operation reserves for the collector
+  // and old_collector sets based on alternative mechanisms, such as ShenandoahEvacReserve, ShenandoahOldEvacReserve, and
   // ShenandoahOldCompactionReserve.  In a future planned enhancement, the reserve for old_collector set when the
   // evacuation reserves are unknown, is based in part on anticipated promotion as determined by analysis of live data
   // found during the previous GC pass which is one less than the current tenure age.
-  void rebuild(size_t young_cset_regions, size_t old_cset_regions, bool have_evacuation_reserves = false);
-
-  void move_collector_sets_to_mutator(size_t cset_regions);
-
-  void add_old_collector_free_region(ShenandoahHeapRegion* region);
+  //
+  // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
+  //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
+  //    num_old_regions is the number of old-gen regions that have available memory for further allocations (excluding old cset)
+  // have_evacuation_reserves is true iff the desired values of young-gen and old-gen evacuation reserves and old-gen
+  //                    promotion reserve have been precomputed (and can be obtained by invoking
+  //                    <generation>->get_evacuation_reserve() or old_gen->get_promoted_reserve()
+  void finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t num_old_regions,
+                      bool have_evacuation_reserves = false);
+
+  // When a region is promoted in place, we add the region's available memory if it is greater than plab_min_size()
+  // into the old collector partition by invoking this method.
+  void add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region);
+
+  // Move up to cset_regions number of regions from being available to the collector to being available to the mutator.
+  //
+  // Typical usage: At the end of evacuation, when the collector no longer needs the regions that had been reserved
+  // for evacuation, invoke this to make regions available for mutator allocations.
+  void move_regions_from_collector_to_mutator(size_t cset_regions);
 
   void recycle_trash();
 
   void log_status();
 
-  inline size_t capacity()  const { return _free_sets.capacity_of(Mutator); }
-  inline size_t used()      const { return _free_sets.used_by(Mutator);     }
+  inline size_t capacity()  const { return _partitions.capacity_of(ShenandoahFreeSetPartitionId::Mutator); }
+  inline size_t used()      const { return _partitions.used_by(ShenandoahFreeSetPartitionId::Mutator);     }
   inline size_t available() const {
     assert(used() <= capacity(), "must use less than capacity");
     return capacity() - used();
@@ -225,14 +420,62 @@ class ShenandoahFreeSet : public CHeapObj<mtGC> {
   HeapWord* allocate(ShenandoahAllocRequest& req, bool& in_new_region);
   size_t unsafe_peek_free() const;
 
+  /*
+   * Internal fragmentation metric: describes how fragmented the heap regions are.
+   *
+   * It is derived as:
+   *
+   *               sum(used[i]^2, i=0..k)
+   *   IF = 1 - ------------------------------
+   *              C * sum(used[i], i=0..k)
+   *
+   * ...where k is the number of regions in computation, C is the region capacity, and
+   * used[i] is the used space in the region.
+   *
+   * The non-linearity causes IF to be lower for the cases where the same total heap
+   * used is densely packed. For example:
+   *   a) Heap is completely full  => IF = 0
+   *   b) Heap is half full, first 50% regions are completely full => IF = 0
+   *   c) Heap is half full, each region is 50% full => IF = 1/2
+   *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
+   *   e) Heap is quarter full, each region is 25% full => IF = 3/4
+   *   f) Heap has one small object per each region => IF =~ 1
+   */
   double internal_fragmentation();
+
+  /*
+   * External fragmentation metric: describes how fragmented the heap is.
+   *
+   * It is derived as:
+   *
+   *   EF = 1 - largest_contiguous_free / total_free
+   *
+   * For example:
+   *   a) Heap is completely empty => EF = 0
+   *   b) Heap is completely full => EF = 0
+   *   c) Heap is first-half full => EF = 1/2
+   *   d) Heap is half full, full and empty regions interleave => EF =~ 1
+   */
   double external_fragmentation();
 
   void print_on(outputStream* out) const;
 
+  // This function places all regions that have allocation capacity into the mutator partition, or if the region
+  // is already affiliated with old, into the old collector partition, identifying regions that have no allocation
+  // capacity as NotFree.  Capture the modified state of the freeset into var arguments:
+  //
+  // young_cset_regions is the number of regions currently in the young cset if we are starting to evacuate, or zero
+  //   old_cset_regions is the number of regions currently in the old cset if we are starting a mixed evacuation, or zero
+  //   first_old_region is the index of the first region that is part of the OldCollector set
+  //    last_old_region is the index of the last region that is part of the OldCollector set
+  //   old_region_count is the number of regions in the OldCollector set that have memory available to be allocated
   void find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
                                         size_t &first_old_region, size_t &last_old_region, size_t &old_region_count);
-  void reserve_regions(size_t young_reserve, size_t old_reserve);
+
+  // Ensure that Collector has at least to_reserve bytes of available memory, and OldCollector has at least old_reserve
+  // bytes of available memory.  On input, old_region_count holds the number of regions already present in the
+  // OldCollector partition.  Upon return, old_region_count holds the updated number of regions in the OldCollector partition.
+  void reserve_regions(size_t to_reserve, size_t old_reserve, size_t &old_region_count);
 
   // Reserve space for evacuations, with regions reserved for old evacuations placed to the right
   // of regions reserved of young evacuations.
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
index 836fb6dcc06..63cd1cc7873 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahFullGC.cpp
@@ -1194,7 +1194,7 @@ void ShenandoahFullGC::phase5_epilog() {
       ShenandoahGenerationalFullGC::compute_balances();
     }
 
-    heap->free_set()->rebuild(young_cset_regions, old_cset_regions);
+    heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
 
     heap->clear_cancelled_gc(true /* clear oom handler */);
   }
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
index dc60b1cd3ba..825df6e4c3e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.cpp
@@ -754,7 +754,7 @@ void ShenandoahGeneration::prepare_regions_and_collection_set(bool concurrent) {
     size_t first_old, last_old, num_old;
     heap->free_set()->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
     // Free set construction uses reserve quantities, because they are known to be valid here
-    heap->free_set()->rebuild(young_cset_regions, old_cset_regions, true);
+    heap->free_set()->finish_rebuild(young_cset_regions, old_cset_regions, num_old, true);
   }
 }
 
@@ -964,7 +964,7 @@ size_t ShenandoahGeneration::available(size_t capacity) const {
   return in_use > capacity ? 0 : capacity - in_use;
 }
 
-void ShenandoahGeneration::increase_capacity(size_t increment) {
+size_t ShenandoahGeneration::increase_capacity(size_t increment) {
   shenandoah_assert_heaplocked_or_safepoint();
 
   // We do not enforce that new capacity >= heap->max_size_for(this).  The maximum generation size is treated as a rule of thumb
@@ -981,9 +981,16 @@ void ShenandoahGeneration::increase_capacity(size_t increment) {
   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() >= _used),
          "Affiliated regions must hold more than what is currently used");
+  return _max_capacity;
 }
 
-void ShenandoahGeneration::decrease_capacity(size_t decrement) {
+size_t ShenandoahGeneration::set_capacity(size_t byte_size) {
+  shenandoah_assert_heaplocked_or_safepoint();
+  _max_capacity = byte_size;
+  return _max_capacity;
+}
+
+size_t ShenandoahGeneration::decrease_capacity(size_t decrement) {
   shenandoah_assert_heaplocked_or_safepoint();
 
   // We do not enforce that new capacity >= heap->min_size_for(this).  The minimum generation size is treated as a rule of thumb
@@ -1006,6 +1013,7 @@ void ShenandoahGeneration::decrease_capacity(size_t decrement) {
   assert(is_global() || ShenandoahHeap::heap()->is_full_gc_in_progress() ||
          (_affiliated_region_count * ShenandoahHeapRegion::region_size_bytes() <= _max_capacity),
          "Cannot use more than capacity");
+  return _max_capacity;
 }
 
 void ShenandoahGeneration::record_success_concurrent(bool abbreviated) {
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
index ec853235288..081fdad5e3b 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGeneration.hpp
@@ -147,10 +147,13 @@ class ShenandoahGeneration : public CHeapObj<mtGC>, public ShenandoahSpaceInfo {
   void reset_bytes_allocated_since_gc_start();
   void increase_allocated(size_t bytes);
 
-  // These methods change the capacity of the region by adding or subtracting the given number of bytes from the current
-  // capacity.
-  void increase_capacity(size_t increment);
-  void decrease_capacity(size_t decrement);
+  // These methods change the capacity of the generation by adding or subtracting the given number of bytes from the current
+  // capacity, returning the capacity of the generation following the change.
+  size_t increase_capacity(size_t increment);
+  size_t decrease_capacity(size_t decrement);
+
+  // Set the capacity of the generation, returning the value set
+  size_t set_capacity(size_t byte_size);
 
   void log_status(const char* msg) const;
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
index 822351c9110..1eba7e19782 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalControlThread.cpp
@@ -524,6 +524,7 @@ bool ShenandoahGenerationalControlThread::resume_concurrent_old_cycle(Shenandoah
   // is allowed to cancel a GC.
   ShenandoahOldGC gc(generation, _allow_old_preemption);
   if (gc.collect(cause)) {
+    heap->notify_gc_progress();
     generation->record_success_concurrent(false);
   }
 
@@ -600,6 +601,7 @@ void ShenandoahGenerationalControlThread::service_concurrent_cycle(ShenandoahHea
   ShenandoahConcurrentGC gc(generation, do_old_gc_bootstrap);
   if (gc.collect(cause)) {
     // Cycle is complete
+    heap->notify_gc_progress();
     generation->record_success_concurrent(gc.abbreviated());
   } else {
     assert(heap->cancelled_gc(), "Must have been cancelled");
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
index 70f79d6acda..3f446ad891f 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp
@@ -209,7 +209,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion
     old_gen->increase_used(region_used);
 
     // add_old_collector_free_region() increases promoted_reserve() if available space exceeds plab_min_size()
-    _heap->free_set()->add_old_collector_free_region(region);
+    _heap->free_set()->add_promoted_in_place_region_to_old_collector(region);
   }
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
index 05d0b27b19b..3d46c2ea4b5 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalHeap.cpp
@@ -785,9 +785,12 @@ class ShenandoahGenerationalUpdateHeapRefsTask : public WorkerTask {
       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
       size_t cset_regions = _heap->collection_set()->count();
-      // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
-      // we need the reclaimed collection set regions to replenish the collector reserves
-      _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
+
+      // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
+      // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
+      // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
+      // next GC cycle.
+      _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
     }
     // If !CONCURRENT, there's no value in expanding Mutator free set
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
index ac5d7f00cfe..386e01189e8 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp
@@ -411,7 +411,7 @@ jint ShenandoahHeap::initialize() {
     // We are initializing free set.  We ignore cset region tallies.
     size_t first_old, last_old, num_old;
     _free_set->prepare_to_rebuild(young_cset_regions, old_cset_regions, first_old, last_old, num_old);
-    _free_set->rebuild(young_cset_regions, old_cset_regions);
+    _free_set->finish_rebuild(young_cset_regions, old_cset_regions, num_old);
   }
 
   if (AlwaysPreTouch) {
@@ -988,8 +988,10 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
     // is testing that the GC overhead limit has not been exceeded.
     // This will notify the collector to start a cycle, but will raise
     // an OOME to the mutator if the last Full GCs have not made progress.
-    if (result == nullptr && !req.is_lab_alloc() && get_gc_no_progress_count() > ShenandoahNoProgressThreshold) {
+    // gc_no_progress_count is incremented following each degen or full GC that fails to achieve is_good_progress().
+    if ((result == nullptr) && !req.is_lab_alloc() && (get_gc_no_progress_count() > ShenandoahNoProgressThreshold)) {
       control_thread()->handle_alloc_failure(req, false);
+      req.set_actual_size(0);
       return nullptr;
     }
 
@@ -2360,9 +2362,12 @@ class ShenandoahUpdateHeapRefsTask : public WorkerTask {
       // We ask the first worker to replenish the Mutator free set by moving regions previously reserved to hold the
       // results of evacuation.  These reserves are no longer necessary because evacuation has completed.
       size_t cset_regions = _heap->collection_set()->count();
-      // We cannot transfer any more regions than will be reclaimed when the existing collection set is recycled, because
-      // we need the reclaimed collection set regions to replenish the collector reserves
-      _heap->free_set()->move_collector_sets_to_mutator(cset_regions);
+
+      // Now that evacuation is done, we can reassign any regions that had been reserved to hold the results of evacuation
+      // to the mutator free set.  At the end of GC, we will have cset_regions newly evacuated fully empty regions from
+      // which we will be able to replenish the Collector free set and the OldCollector free set in preparation for the
+      // next GC cycle.
+      _heap->free_set()->move_regions_from_collector_to_mutator(cset_regions);
     }
     // If !CONCURRENT, there's no value in expanding Mutator free set
     T cl;
@@ -2480,7 +2485,7 @@ void ShenandoahHeap::rebuild_free_set(bool concurrent) {
     // within partially consumed regions of memory.
   }
   // Rebuild free set based on adjusted generation sizes.
-  _free_set->rebuild(young_cset_regions, old_cset_regions);
+  _free_set->finish_rebuild(young_cset_regions, old_cset_regions, old_region_count);
 
   if (mode()->is_generational()) {
     ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
index 99eb5ceeab9..5359470c6f0 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.cpp
@@ -118,9 +118,10 @@ void ShenandoahHeapRegion::make_regular_allocation(ShenandoahAffiliation affilia
 }
 
 // Change affiliation to YOUNG_GENERATION if _state is not _pinned_cset, _regular, or _pinned.  This implements
-// behavior previously performed as a side effect of make_regular_bypass().
+// behavior previously performed as a side effect of make_regular_bypass().  This is used by Full GC
 void ShenandoahHeapRegion::make_young_maybe() {
   shenandoah_assert_heaplocked();
+  assert(!ShenandoahHeap::heap()->mode()->is_generational(), "Only call if non-generational");
   switch (_state) {
    case _empty_uncommitted:
    case _empty_committed:
@@ -128,13 +129,6 @@ void ShenandoahHeapRegion::make_young_maybe() {
    case _humongous_start:
    case _humongous_cont:
      if (affiliation() != YOUNG_GENERATION) {
-       ShenandoahHeap* heap = ShenandoahHeap::heap();
-       if (heap->mode()->is_generational()) {
-         if (is_old()) {
-           heap->old_generation()->decrement_affiliated_region_count();
-         }
-         heap->young_generation()->increment_affiliated_region_count();
-       }
        set_affiliation(YOUNG_GENERATION);
      }
      return;
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
index 5d6c27df1e2..17464a76d28 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp
@@ -75,7 +75,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq
     HeapWord* new_top = aligned_obj + size;
     assert(new_top <= end(), "PLAB cannot span end of heap region");
     set_top(new_top);
-    req.set_actual_size(size);
+    // We do not req.set_actual_size() here.  The caller sets it.
     req.set_waste(pad_words);
     assert(is_object_aligned(new_top), "new top breaks alignment: " PTR_FORMAT, p2i(new_top));
     assert(is_aligned(aligned_obj, alignment_in_bytes), "obj is not aligned: " PTR_FORMAT, p2i(aligned_obj));
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
index 07926488c9c..fe2954cdb8e 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp
@@ -476,7 +476,7 @@ void ShenandoahOldGeneration::prepare_regions_and_collection_set(bool concurrent
     heap->free_set()->prepare_to_rebuild(cset_young_regions, cset_old_regions, first_old, last_old, num_old);
     // This is just old-gen completion.  No future budgeting required here.  The only reason to rebuild the freeset here
     // is in case there was any immediate old garbage identified.
-    heap->free_set()->rebuild(cset_young_regions, cset_old_regions);
+    heap->free_set()->finish_rebuild(cset_young_regions, cset_old_regions, num_old);
   }
 }
 
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp
new file mode 100644
index 00000000000..c3e8108752f
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.cpp
@@ -0,0 +1,291 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
+
+ShenandoahSimpleBitMap::ShenandoahSimpleBitMap(size_t num_bits) :
+    _num_bits(num_bits),
+    _num_words(align_up(num_bits, BitsPerWord) / BitsPerWord),
+    _bitmap(NEW_C_HEAP_ARRAY(uintx, _num_words, mtGC))
+{
+  clear_all();
+}
+
+ShenandoahSimpleBitMap::~ShenandoahSimpleBitMap() {
+  if (_bitmap != nullptr) {
+    FREE_C_HEAP_ARRAY(uintx, _bitmap);
+  }
+}
+
+size_t ShenandoahSimpleBitMap::count_leading_ones(idx_t start_idx) const {
+  assert((start_idx >= 0) && (start_idx < _num_bits), "precondition");
+  size_t array_idx = start_idx >> LogBitsPerWord;
+  uintx element_bits = _bitmap[array_idx];
+  uintx bit_number = start_idx & right_n_bits(LogBitsPerWord);
+  uintx mask = ~right_n_bits(bit_number);
+  size_t counted_ones = 0;
+  while ((element_bits & mask) == mask) {
+    // All bits numbered >= bit_number are set
+    size_t found_ones = BitsPerWord - bit_number;
+    counted_ones += found_ones;
+    // Dead code: do not need to compute: start_idx += found_ones;
+    // Strength reduction:                array_idx = (start_idx >> LogBitsPerWord)
+    array_idx++;
+    element_bits = _bitmap[array_idx];
+    // Constant folding:                  bit_number = start_idx & right_n_bits(LogBitsPerWord);
+    bit_number = 0;
+    // Constant folding:                  mask = ~right_n_bits(bit_number);
+    mask = ~0;
+  }
+
+  // Add in number of consecutive ones starting with the_bit and including more significant bits and return result
+  uintx aligned = element_bits >> bit_number;
+  uintx complement = ~aligned;
+  return counted_ones + count_trailing_zeros<uintx>(complement);
+}
+
+size_t ShenandoahSimpleBitMap::count_trailing_ones(idx_t last_idx) const {
+  assert((last_idx >= 0) && (last_idx < _num_bits), "precondition");
+  size_t array_idx = last_idx >> LogBitsPerWord;
+  uintx element_bits = _bitmap[array_idx];
+  uintx bit_number = last_idx & right_n_bits(LogBitsPerWord);
+  // All ones from bit 0 to the_bit
+  uintx mask = right_n_bits(bit_number + 1);
+  size_t counted_ones = 0;
+  while ((element_bits & mask) == mask) {
+    // All bits numbered <= bit_number are set
+    size_t found_ones = bit_number + 1;
+    counted_ones += found_ones;
+    // Dead code: do not need to compute: last_idx -= found_ones;
+    array_idx--;
+    element_bits = _bitmap[array_idx];
+    // Constant folding:                  bit_number = last_idx & right_n_bits(LogBitsPerWord);
+    bit_number = BitsPerWord - 1;
+    // Constant folding:                  mask = right_n_bits(bit_number + 1);
+    mask = ~0;
+  }
+
+  // Add in number of consecutive ones starting with the_bit and including less significant bits and return result
+  uintx aligned = element_bits << (BitsPerWord - (bit_number + 1));
+  uintx complement = ~aligned;
+  return counted_ones + count_leading_zeros<uintx>(complement);
+}
+
+bool ShenandoahSimpleBitMap::is_forward_consecutive_ones(idx_t start_idx, idx_t count) const {
+  while (count > 0) {
+    assert((start_idx >= 0) && (start_idx < _num_bits), "precondition: start_idx: " SSIZE_FORMAT ", count: " SSIZE_FORMAT,
+           start_idx, count);
+    assert(start_idx + count <= (idx_t) _num_bits, "precondition");
+    size_t array_idx = start_idx >> LogBitsPerWord;
+    uintx bit_number = start_idx & right_n_bits(LogBitsPerWord);
+    uintx element_bits = _bitmap[array_idx];
+    uintx bits_to_examine  = BitsPerWord - bit_number;
+    element_bits >>= bit_number;
+    uintx complement = ~element_bits;
+    uintx trailing_ones;
+    if (complement != 0) {
+      trailing_ones = count_trailing_zeros<uintx>(complement);
+    } else {
+      trailing_ones = bits_to_examine;
+    }
+    if (trailing_ones >= (uintx) count) {
+      return true;
+    } else if (trailing_ones == bits_to_examine) {
+      start_idx += bits_to_examine;
+      count -= bits_to_examine;
+      // Repeat search with smaller goal
+    } else {
+      return false;
+    }
+  }
+  return true;
+}
+
+bool ShenandoahSimpleBitMap::is_backward_consecutive_ones(idx_t last_idx, idx_t count) const {
+  while (count > 0) {
+    assert((last_idx >= 0) && (last_idx < _num_bits), "precondition");
+    assert(last_idx - count >= -1, "precondition");
+    size_t array_idx = last_idx >> LogBitsPerWord;
+    uintx bit_number = last_idx & right_n_bits(LogBitsPerWord);
+    uintx element_bits = _bitmap[array_idx];
+    uintx bits_to_examine = bit_number + 1;
+    element_bits <<= (BitsPerWord - bits_to_examine);
+    uintx complement = ~element_bits;
+    uintx leading_ones;
+    if (complement != 0) {
+      leading_ones = count_leading_zeros<uintx>(complement);
+    } else {
+      leading_ones = bits_to_examine;
+    }
+    if (leading_ones >= (uintx) count) {
+      return true;
+    } else if (leading_ones == bits_to_examine) {
+      last_idx -= leading_ones;
+      count -= leading_ones;
+      // Repeat search with smaller goal
+    } else {
+      return false;
+    }
+  }
+  return true;
+}
+
+idx_t ShenandoahSimpleBitMap::find_first_consecutive_set_bits(idx_t beg, idx_t end, size_t num_bits) const {
+  assert((beg >= 0) && (beg < _num_bits), "precondition");
+
+  // Stop looking if there are not num_bits remaining in probe space.
+  idx_t start_boundary = end - num_bits;
+  if (beg > start_boundary) {
+    return end;
+  }
+  uintx array_idx = beg >> LogBitsPerWord;
+  uintx bit_number = beg & right_n_bits(LogBitsPerWord);
+  uintx element_bits = _bitmap[array_idx];
+  if (bit_number > 0) {
+    uintx mask_out = right_n_bits(bit_number);
+    element_bits &= ~mask_out;
+  }
+
+  // The following loop minimizes the number of spans probed in order to find num_bits consecutive bits.
+  // For example, if bit_number = beg = 0, num_bits = 8, and element bits equals 00111111_11000000_00000000_10011000B,
+  // we need only 3 probes to find the match at bit offset 22.
+  //
+  // Let beg = 0
+  // element_bits = 00111111_11000000_00000000_10011000B;
+  //                                           ________   (the searched span)
+  //                                           ^   ^  ^- bit_number = beg = 0
+  //                                           |   +-- next_start_candidate_1 (where next 1 is found)
+  //                                           +------ next_start_candidate_2 (start of the trailing 1s within span)
+  // Let beg = 7
+  // element_bits = 00111111_11000000_00000000_10011000B;
+  //                          ^       ^_________   (the searched span)
+  //                          |       |        ^- bit_number = beg = 7
+  //                          |       +---------- next_start_candidate_2 (there are no trailing 1s within span)
+  //                          +------------------ next_start_candidate_1 (where next 1 is found)
+  // Let beg = 22
+  // Let beg = 22
+  // element_bits = 00111111_11000001_11111100_10011000B;
+  //                  _________   (the searched span)
+  //                          ^- bit_number = beg = 18
+  // Here, is_forward_consecutive_ones(22, 8) succeeds and we report the match
+
+  while (true) {
+    if (element_bits == 0) {
+      // move to the next element
+      beg += BitsPerWord - bit_number;
+      if (beg > start_boundary) {
+        // No match found.
+        return end;
+      }
+      array_idx++;
+      bit_number = 0;
+      element_bits = _bitmap[array_idx];
+    } else if (is_forward_consecutive_ones(beg, num_bits)) {
+      return beg;
+    } else {
+      // There is at least one non-zero bit within the masked element_bits. Arrange to skip over bits that
+      // cannot be part of a consecutive-ones match.
+      uintx next_set_bit = count_trailing_zeros<uintx>(element_bits);
+      uintx next_start_candidate_1 = (array_idx << LogBitsPerWord) + next_set_bit;
+
+      // There is at least one zero bit in this span. Align the next probe at the start of trailing ones for probed span,
+      // or align at end of span if this span has no trailing ones.
+      size_t trailing_ones = count_trailing_ones(beg + num_bits - 1);
+      uintx next_start_candidate_2 = beg + num_bits - trailing_ones;
+
+      beg = MAX2(next_start_candidate_1, next_start_candidate_2);
+      if (beg > start_boundary) {
+        // No match found.
+        return end;
+      }
+      array_idx = beg >> LogBitsPerWord;
+      element_bits = _bitmap[array_idx];
+      bit_number = beg & right_n_bits(LogBitsPerWord);
+      if (bit_number > 0) {
+        size_t mask_out = right_n_bits(bit_number);
+        element_bits &= ~mask_out;
+      }
+    }
+  }
+}
+
+idx_t ShenandoahSimpleBitMap::find_last_consecutive_set_bits(const idx_t beg, idx_t end, const size_t num_bits) const {
+
+  assert((end >= 0) && (end < _num_bits), "precondition");
+
+  // Stop looking if there are not num_bits remaining in probe space.
+  idx_t last_boundary = beg + num_bits;
+  if (end < last_boundary) {
+    return beg;
+  }
+
+  size_t array_idx = end >> LogBitsPerWord;
+  uintx bit_number = end & right_n_bits(LogBitsPerWord);
+  uintx element_bits = _bitmap[array_idx];
+  if (bit_number < BitsPerWord - 1) {
+    uintx mask_in = right_n_bits(bit_number + 1);
+    element_bits &= mask_in;
+  }
+
+  // See comment in find_first_consecutive_set_bits to understand how this loop works.
+  while (true) {
+    if (element_bits == 0) {
+      // move to the previous element
+      end -= bit_number + 1;
+      if (end < last_boundary) {
+        // No match found.
+        return beg;
+      }
+      array_idx--;
+      bit_number = BitsPerWord - 1;
+      element_bits = _bitmap[array_idx];
+    } else if (is_backward_consecutive_ones(end, num_bits)) {
+      return end + 1 - num_bits;
+    } else {
+      // There is at least one non-zero bit within the masked element_bits. Arrange to skip over bits that
+      // cannot be part of a consecutive-ones match.
+      uintx next_set_bit = BitsPerWord - (1 + count_leading_zeros<uintx>(element_bits));
+      uintx next_last_candidate_1 = (array_idx << LogBitsPerWord) + next_set_bit;
+
+      // There is at least one zero bit in this span.  Align the next probe at the end of leading ones for probed span,
+      // or align before start of span if this span has no leading ones.
+      size_t leading_ones = count_leading_ones(end - (num_bits - 1));
+      uintx next_last_candidate_2 = end - (num_bits - leading_ones);
+
+      end = MIN2(next_last_candidate_1, next_last_candidate_2);
+      if (end < last_boundary) {
+        // No match found.
+        return beg;
+      }
+      array_idx = end >> LogBitsPerWord;
+      bit_number = end & right_n_bits(LogBitsPerWord);
+      element_bits = _bitmap[array_idx];
+      if (bit_number < BitsPerWord - 1){
+        size_t mask_in = right_n_bits(bit_number + 1);
+        element_bits &= mask_in;
+      }
+    }
+  }
+}
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp
new file mode 100644
index 00000000000..c22e9527002
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.hpp
@@ -0,0 +1,170 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_HPP
+
+#include <cstddef>
+
+#include "gc/shenandoah/shenandoahAsserts.hpp"
+
+// TODO: Merge the enhanced capabilities of ShenandoahSimpleBitMap into src/hotspot/share/utilities/bitMap.hpp
+//       and deprecate ShenandoahSimpleBitMap.  The key enhanced capabilities to be integrated include:
+//
+//   1. Allow searches from high to low memory (when biasing allocations towards the top of the heap)
+//   2. Allow searches for clusters of contiguous set bits (to expedite allocation for humongous objects)
+//
+// idx_t is defined here as ssize_t.  In src/hotspot/share/utiliities/bitMap.hpp, idx is defined as size_t.
+// This is a significant incompatibility.
+//
+// The API and internal implementation of ShenandoahSimpleBitMap and ShenandoahRegionPartitions use idx_t to
+// represent index, even though index is "inherently" unsigned.  There are two reasons for this choice:
+//  1. We use -1 as a sentinel value to represent empty partitions.  This same value may be used to represent
+//     failure to find a previous set bit or previous range of set bits.
+//  2. Certain loops are written most naturally if the iterator, which may hold the sentinel -1 value, can be
+//     declared as signed and the terminating condition can be < 0.
+
+typedef ssize_t idx_t;
+
+// ShenandoahSimpleBitMap resembles CHeapBitMap but adds missing support for find_first_consecutive_set_bits() and
+// find_last_consecutive_set_bits.  An alternative refactoring of code would subclass CHeapBitMap, but this might
+// break abstraction rules, because efficient implementation requires assumptions about superclass internals that
+// might be violatee through future software maintenance.
+class ShenandoahSimpleBitMap {
+  const idx_t _num_bits;
+  const size_t _num_words;
+  uintx* const _bitmap;
+
+public:
+  ShenandoahSimpleBitMap(size_t num_bits);
+
+  ~ShenandoahSimpleBitMap();
+
+  void clear_all() {
+    for (size_t i = 0; i < _num_words; i++) {
+      _bitmap[i] = 0;
+    }
+  }
+
+private:
+
+  // Count consecutive ones in forward order, starting from start_idx.  Requires that there is at least one zero
+  // between start_idx and index value (_num_bits - 1), inclusive.
+  size_t count_leading_ones(idx_t start_idx) const;
+
+  // Count consecutive ones in reverse order, starting from last_idx.  Requires that there is at least one zero
+  // between last_idx and index value zero, inclusive.
+  size_t count_trailing_ones(idx_t last_idx) const;
+
+  bool is_forward_consecutive_ones(idx_t start_idx, idx_t count) const;
+  bool is_backward_consecutive_ones(idx_t last_idx, idx_t count) const;
+
+public:
+
+  inline idx_t aligned_index(idx_t idx) const {
+    assert((idx >= 0) && (idx < _num_bits), "precondition");
+    idx_t array_idx = idx & ~right_n_bits(LogBitsPerWord);
+    return array_idx;
+  }
+
+  inline constexpr idx_t alignment() const {
+    return BitsPerWord;
+  }
+
+  // For testing
+  inline idx_t size() const {
+    return _num_bits;
+  }
+
+  // Return the word that holds idx bit and its neighboring bits.
+  inline uintx bits_at(idx_t idx) const {
+    assert((idx >= 0) && (idx < _num_bits), "precondition");
+    idx_t array_idx = idx >> LogBitsPerWord;
+    return _bitmap[array_idx];
+  }
+
+  inline void set_bit(idx_t idx) {
+    assert((idx >= 0) && (idx < _num_bits), "precondition");
+    size_t array_idx = idx >> LogBitsPerWord;
+    uintx bit_number = idx & right_n_bits(LogBitsPerWord);
+    uintx the_bit = nth_bit(bit_number);
+    _bitmap[array_idx] |= the_bit;
+  }
+
+  inline void clear_bit(idx_t idx) {
+    assert((idx >= 0) && (idx < _num_bits), "precondition");
+    assert(idx >= 0, "precondition");
+    size_t array_idx = idx >> LogBitsPerWord;
+    uintx bit_number = idx & right_n_bits(LogBitsPerWord);
+    uintx the_bit = nth_bit(bit_number);
+    _bitmap[array_idx] &= ~the_bit;
+  }
+
+  inline bool is_set(idx_t idx) const {
+    assert((idx >= 0) && (idx < _num_bits), "precondition");
+    assert(idx >= 0, "precondition");
+    size_t array_idx = idx >> LogBitsPerWord;
+    uintx bit_number = idx & right_n_bits(LogBitsPerWord);
+    uintx the_bit = nth_bit(bit_number);
+    return (_bitmap[array_idx] & the_bit)? true: false;
+  }
+
+  // Return the index of the first set bit in the range [beg, size()), or size() if none found.
+  // precondition: beg and end form a valid range for the bitmap.
+  inline idx_t find_first_set_bit(idx_t beg) const;
+
+  // Return the index of the first set bit in the range [beg, end), or end if none found.
+  // precondition: beg and end form a valid range for the bitmap.
+  inline idx_t find_first_set_bit(idx_t beg, idx_t end) const;
+
+  // Return the index of the last set bit in the range (-1, end], or -1 if none found.
+  // precondition: beg and end form a valid range for the bitmap.
+  inline idx_t find_last_set_bit(idx_t end) const;
+
+  // Return the index of the last set bit in the range (beg, end], or beg if none found.
+  // precondition: beg and end form a valid range for the bitmap.
+  inline idx_t find_last_set_bit(idx_t beg, idx_t end) const;
+
+  // Return the start index of the first run of <num_bits> consecutive set bits for which the first set bit is within
+  //   the range [beg, size()), or size() if the run of <num_bits> is not found within this range.
+  // precondition: beg is within the valid range for the bitmap.
+  inline idx_t find_first_consecutive_set_bits(idx_t beg, size_t num_bits) const;
+
+  // Return the start index of the first run of <num_bits> consecutive set bits for which the first set bit is within
+  //   the range [beg, end), or end if the run of <num_bits> is not found within this range.
+  // precondition: beg and end form a valid range for the bitmap.
+  idx_t find_first_consecutive_set_bits(idx_t beg, idx_t end, size_t num_bits) const;
+
+  // Return the start index of the last run of <num_bits> consecutive set bits for which the entire run of set bits is within
+  // the range (-1, end], or -1 if the run of <num_bits> is not found within this range.
+  // precondition: end is within the valid range for the bitmap.
+  inline idx_t find_last_consecutive_set_bits(idx_t end, size_t num_bits) const;
+
+  // Return the start index of the first run of <num_bits> consecutive set bits for which the entire run of set bits is within
+  // the range (beg, end], or beg if the run of <num_bits> is not found within this range.
+  // precondition: beg and end form a valid range for the bitmap.
+  idx_t find_last_consecutive_set_bits(idx_t beg, idx_t end, size_t num_bits) const;
+};
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp
new file mode 100644
index 00000000000..3e602ed11e0
--- /dev/null
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSimpleBitMap.inline.hpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_INLINE_HPP
+#define SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_INLINE_HPP
+
+#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
+
+inline idx_t ShenandoahSimpleBitMap::find_first_set_bit(idx_t beg, idx_t end) const {
+  assert((beg >= 0) && (beg < _num_bits), "precondition");
+  assert((end > beg) && (end <= _num_bits), "precondition");
+  do {
+    size_t array_idx = beg >> LogBitsPerWord;
+    uintx bit_number = beg & right_n_bits(LogBitsPerWord);
+    uintx element_bits = _bitmap[array_idx];
+    if (bit_number > 0) {
+      uintx mask_out = right_n_bits(bit_number);
+      element_bits &= ~mask_out;
+    }
+    if (element_bits) {
+      // The next set bit is here.  Find first set bit >= bit_number;
+      uintx aligned = element_bits >> bit_number;
+      uintx first_set_bit = count_trailing_zeros<uintx>(aligned);
+      idx_t candidate_result = (array_idx * BitsPerWord) + bit_number + first_set_bit;
+      return (candidate_result < end)? candidate_result: end;
+    } else {
+      // Next bit is not here.  Try the next array element
+      beg += BitsPerWord - bit_number;
+    }
+  } while (beg < end);
+  return end;
+}
+
+inline idx_t ShenandoahSimpleBitMap::find_first_set_bit(idx_t beg) const {
+  assert((beg >= 0) && (beg < size()), "precondition");
+  return find_first_set_bit(beg, size());
+}
+
+inline idx_t ShenandoahSimpleBitMap::find_last_set_bit(idx_t beg, idx_t end) const {
+  assert((end >= 0) && (end < _num_bits), "precondition");
+  assert((beg >= -1) && (beg < end), "precondition");
+  do {
+    idx_t array_idx = end >> LogBitsPerWord;
+    uintx bit_number = end & right_n_bits(LogBitsPerWord);
+    uintx element_bits = _bitmap[array_idx];
+    if (bit_number < BitsPerWord - 1){
+      uintx mask_in = right_n_bits(bit_number + 1);
+      element_bits &= mask_in;
+    }
+    if (element_bits) {
+      // The prev set bit is here.  Find the first set bit <= bit_number
+      uintx aligned = element_bits << (BitsPerWord - (bit_number + 1));
+      uintx first_set_bit = count_leading_zeros<uintx>(aligned);
+      idx_t candidate_result = array_idx * BitsPerWord + (bit_number - first_set_bit);
+      return (candidate_result > beg)? candidate_result: beg;
+    } else {
+      // Next bit is not here.  Try the previous array element
+      end -= (bit_number + 1);
+    }
+  } while (end > beg);
+  return beg;
+}
+
+inline idx_t ShenandoahSimpleBitMap::find_last_set_bit(idx_t end) const {
+  assert((end >= 0) && (end < _num_bits), "precondition");
+  return find_last_set_bit(-1, end);
+}
+
+inline idx_t ShenandoahSimpleBitMap::find_first_consecutive_set_bits(idx_t beg, size_t num_bits) const {
+  assert((beg >= 0) && (beg < _num_bits), "precondition");
+  return find_first_consecutive_set_bits(beg, size(), num_bits);
+}
+
+inline idx_t ShenandoahSimpleBitMap::find_last_consecutive_set_bits(idx_t end, size_t num_bits) const {
+  assert((end >= 0) && (end < _num_bits), "precondition");
+  return find_last_consecutive_set_bits((idx_t) -1, end, num_bits);
+}
+
+#endif // SHARE_GC_SHENANDOAH_SHENANDOAHSIMPLEBITMAP_INLINE_HPP
diff --git a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
index c8c8d46de81..fb1cda75727 100644
--- a/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
+++ b/src/hotspot/share/gc/shenandoah/shenandoahVerifier.cpp
@@ -352,9 +352,10 @@ class ShenandoahVerifyOopClosure : public BasicOopIterateClosure {
 // a subset (e.g. the young generation or old generation) of the total heap.
 class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure {
 private:
-  size_t _used, _committed, _garbage, _regions, _humongous_waste;
+  size_t _used, _committed, _garbage, _regions, _humongous_waste, _trashed_regions;
 public:
-  ShenandoahCalculateRegionStatsClosure() : _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0) {};
+  ShenandoahCalculateRegionStatsClosure() :
+      _used(0), _committed(0), _garbage(0), _regions(0), _humongous_waste(0), _trashed_regions(0) {};
 
   void heap_region_do(ShenandoahHeapRegion* r) override {
     _used += r->used();
@@ -363,6 +364,9 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure
     if (r->is_humongous()) {
       _humongous_waste += r->free();
     }
+    if (r->is_trash()) {
+      _trashed_regions++;
+    }
     _regions++;
     log_debug(gc)("ShenandoahCalculateRegionStatsClosure: adding " SIZE_FORMAT " for %s Region " SIZE_FORMAT ", yielding: " SIZE_FORMAT,
             r->used(), (r->is_humongous() ? "humongous" : "regular"), r->index(), _used);
@@ -376,6 +380,7 @@ class ShenandoahCalculateRegionStatsClosure : public ShenandoahHeapRegionClosure
 
   // span is the total memory affiliated with these stats (some of which is in use and other is available)
   size_t span() const { return _regions * ShenandoahHeapRegion::region_size_bytes(); }
+  size_t non_trashed_span() const { return (_regions - _trashed_regions) * ShenandoahHeapRegion::region_size_bytes(); }
 };
 
 class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {
@@ -427,9 +432,11 @@ class ShenandoahGenerationStatsClosure : public ShenandoahHeapRegionClosure {
               label, generation->name(), generation->used_regions(), stats.regions());
 
     size_t generation_capacity = generation->max_capacity();
-    guarantee(stats.span() <= generation_capacity,
-              "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") must not exceed current capacity (" PROPERFMT ")",
-              label, generation->name(), stats.regions(), PROPERFMTARGS(generation_capacity));
+    guarantee(stats.non_trashed_span() <= generation_capacity,
+              "%s: generation (%s) size spanned by regions (" SIZE_FORMAT ") * region size (" PROPERFMT
+              ") must not exceed current capacity (" PROPERFMT ")",
+              label, generation->name(), stats.regions(), PROPERFMTARGS(ShenandoahHeapRegion::region_size_bytes()),
+              PROPERFMTARGS(generation_capacity));
 
     size_t humongous_waste = generation->get_humongous_waste();
     guarantee(stats.waste() == humongous_waste,
diff --git a/test/hotspot/jtreg/ProblemList.txt b/test/hotspot/jtreg/ProblemList.txt
index d65bb123523..4ce6d7d8b7c 100644
--- a/test/hotspot/jtreg/ProblemList.txt
+++ b/test/hotspot/jtreg/ProblemList.txt
@@ -91,10 +91,8 @@ gc/stress/gclocker/TestGCLockerWithParallel.java 8180622 generic-all
 gc/stress/gclocker/TestGCLockerWithG1.java 8180622 generic-all
 gc/stress/TestJNIBlockFullGC/TestJNIBlockFullGC.java 8192647 generic-all
 gc/stress/TestStressG1Humongous.java 8286554 windows-x64
-gc/shenandoah/TestHumongousThreshold.java#default 8327000 generic-all
-gc/shenandoah/TestHumongousThreshold.java#16b 8327000 generic-all
-gc/shenandoah/TestHumongousThreshold.java#generational 8327000 generic-all
-gc/shenandoah/TestHumongousThreshold.java#generational-16b 8327000 generic-all
+gc/shenandoah/TestAllocIntArrays.java#iu-aggressive 8289220 generic-all
+gc/shenandoah/TestAllocIntArrays.java#aggressive 8289220 generic-all
 
 #############################################################################
 
diff --git a/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java b/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java
index 250b2c847d5..d08fede0d98 100644
--- a/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java
+++ b/test/hotspot/jtreg/gc/shenandoah/TestAllocIntArrays.java
@@ -207,9 +207,14 @@ public class TestAllocIntArrays {
     public static void main(String[] args) throws Exception {
         final int min = 0;
         final int max = 384 * 1024;
+        // Each allocated int array is assumed to consume 16 bytes for alignment and header, plus
+        //  an average of 4 * the average number of elements in the array.
         long count = TARGET_MB * 1024 * 1024 / (16 + 4 * (min + (max - min) / 2));
 
         Random r = Utils.getRandomInstance();
+        // Repeatedly, allocate an array of int having between 0 and 384K elements, until we have
+        // allocated approximately TARGET_MB.  The largest allocated array consumes 384K*4 + 16, which is 1.5 M,
+        // which is well below the heap size of 1g.
         for (long c = 0; c < count; c++) {
             sink = new int[min + r.nextInt(max - min)];
         }
diff --git a/test/hotspot/jtreg/gc/shenandoah/generational/TestOldGrowthTriggers.java b/test/hotspot/jtreg/gc/shenandoah/generational/TestOldGrowthTriggers.java
index 708f02fb15d..d5cb88d93f8 100644
--- a/test/hotspot/jtreg/gc/shenandoah/generational/TestOldGrowthTriggers.java
+++ b/test/hotspot/jtreg/gc/shenandoah/generational/TestOldGrowthTriggers.java
@@ -42,19 +42,21 @@ public class TestOldGrowthTriggers {
   public static void makeOldAllocations() {
     // Expect most of the BigInteger entries placed into array to be promoted, and most will eventually become garbage within old
 
-    final int array_size = 512 * 1024;   // 512K entries
-    BigInteger array[] = new BigInteger[array_size];
+    final int ArraySize = 512 * 1024;   // 512K entries
+    final int BitsInBigInteger = 128;
+    final int RefillIterations = 64;
+    BigInteger array[] = new BigInteger[ArraySize];
     Random r = new Random(46);
 
-    for (int i = 0; i < array_size; i++) {
-      array[i] = new BigInteger(128, r);
+    for (int i = 0; i < ArraySize; i++) {
+      array[i] = new BigInteger(BitsInBigInteger, r);
     }
 
-    for (int refill_count = 0; refill_count < 192; refill_count++) {
-      // Each refill repopulates array_size randomly selected elements within array
-      for (int i = 0; i < array_size; i++) {
-        int replace_index = r.nextInt(array_size);
-        int derive_index = r.nextInt(array_size);
+    for (int refill_count = 0; refill_count < RefillIterations; refill_count++) {
+      // Each refill repopulates ArraySize randomly selected elements within array
+      for (int i = 0; i < ArraySize; i++) {
+        int replace_index = r.nextInt(ArraySize);
+        int derive_index = r.nextInt(ArraySize);
         switch (i & 0x3) {
           case 0:
             // 50% chance of creating garbage
@@ -100,8 +102,8 @@ public static void main(String[] args) throws Exception {
     }
 
     testOld("-Xlog:gc",
-            "-Xms256m",
-            "-Xmx256m",
+            "-Xms96m",
+            "-Xmx96m",
             "-XX:+UnlockDiagnosticVMOptions",
             "-XX:+UnlockExperimentalVMOptions",
             "-XX:+UseShenandoahGC",
diff --git a/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java
index 10f150c4aa1..f5f40945a42 100644
--- a/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java
+++ b/test/hotspot/jtreg/gc/shenandoah/oom/TestThreadFailure.java
@@ -54,9 +54,17 @@ public void run() {
     public static void main(String[] args) throws Exception {
         if (args.length > 0) {
             for (int t = 0; t < COUNT; t++) {
+                // If we experience OutOfMemoryError during our attempt to instantiate NastyThread, we'll abort
+                // main and will not print "All good".  We'll also report a non-zero termination code.  In the
+                // case that the previously instantiated NastyThread accumulated more than SheanndoahNoProgressThreshold
+                // unproductive GC cycles before failing, the main thread may not try a Full GC before it experiences
+                // OutOfMemoryError exception.
                 Thread thread = new NastyThread();
                 thread.start();
                 thread.join();
+                // Having joined thread, we know the memory consumed by thread is now garbage, and will eventually be
+                // collected.  Some or all of that memory may have been promoted, so we may need to perform a Full GC
+                // in order to reclaim it quickly.
             }
             System.out.println("All good");
             return;