diff --git a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp
index ca175fe1c47b1..89a97a4984fc8 100644
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -57,7 +57,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ mov_metadata(rscratch1, m);
   ce->store_parameter(rscratch1, 1);
   ce->store_parameter(_bci, 0);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   __ b(_continuation);
@@ -66,7 +66,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   if (_info->deoptimize_on_exception()) {
-    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
     __ far_call(RuntimeAddress(a));
     ce->add_call_info_here(_info);
     ce->verify_oop_map(_info);
@@ -79,13 +79,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   } else {
     __ mov(rscratch1, _index->as_jint());
   }
-  Runtime1::StubID stub_id;
+  C1StubId stub_id;
   if (_throw_index_out_of_bounds_exception) {
-    stub_id = Runtime1::throw_index_exception_id;
+    stub_id = C1StubId::throw_index_exception_id;
   } else {
     assert(_array != LIR_Opr::nullOpr(), "sanity");
     __ mov(rscratch2, _array->as_pointer_register());
-    stub_id = Runtime1::throw_range_check_failed_id;
+    stub_id = C1StubId::throw_range_check_failed_id;
   }
   __ lea(lr, RuntimeAddress(Runtime1::entry_for(stub_id)));
   __ blr(lr);
@@ -100,7 +100,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 
 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   __ far_call(RuntimeAddress(a));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
@@ -112,7 +112,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   }
   __ bind(_entry);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
 #ifdef ASSERT
@@ -124,14 +124,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 
 // Implementation of NewInstanceStub
 
-NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
   _result = result;
   _klass = klass;
   _klass_reg = klass_reg;
   _info = new CodeEmitInfo(info);
-  assert(stub_id == Runtime1::new_instance_id                 ||
-         stub_id == Runtime1::fast_new_instance_id            ||
-         stub_id == Runtime1::fast_new_instance_init_check_id,
+  assert(stub_id == C1StubId::new_instance_id                 ||
+         stub_id == C1StubId::fast_new_instance_id            ||
+         stub_id == C1StubId::fast_new_instance_init_check_id,
          "need new_instance id");
   _stub_id   = stub_id;
 }
@@ -167,7 +167,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_length->as_register() == r19, "length must in r19,");
   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   assert(_result->as_register() == r0, "result must in r0");
@@ -190,7 +190,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_length->as_register() == r19, "length must in r19,");
   assert(_klass_reg->as_register() == r3, "klass_reg must in r3");
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   assert(_result->as_register() == r0, "result must in r0");
@@ -202,11 +202,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_obj_reg->as_register(),  1);
   ce->store_parameter(_lock_reg->as_register(), 0);
-  Runtime1::StubID enter_id;
+  C1StubId enter_id;
   if (ce->compilation()->has_fpu_code()) {
-    enter_id = Runtime1::monitorenter_id;
+    enter_id = C1StubId::monitorenter_id;
   } else {
-    enter_id = Runtime1::monitorenter_nofpu_id;
+    enter_id = C1StubId::monitorenter_nofpu_id;
   }
   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
   ce->add_call_info_here(_info);
@@ -223,11 +223,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
   }
   ce->store_parameter(_lock_reg->as_register(), 0);
   // note: non-blocking leaf routine => no call info needed
-  Runtime1::StubID exit_id;
+  C1StubId exit_id;
   if (ce->compilation()->has_fpu_code()) {
-    exit_id = Runtime1::monitorexit_id;
+    exit_id = C1StubId::monitorexit_id;
   } else {
-    exit_id = Runtime1::monitorexit_nofpu_id;
+    exit_id = C1StubId::monitorexit_nofpu_id;
   }
   __ adr(lr, _continuation);
   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
@@ -255,7 +255,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_trap_request, 0);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id)));
   ce->add_call_info_here(_info);
   DEBUG_ONLY(__ should_not_reach_here());
 }
@@ -265,9 +265,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   address a;
   if (_info->deoptimize_on_exception()) {
     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
-    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   } else {
-    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+    a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
   }
 
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index 91430be5835b5..5e116d82761ac 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -321,19 +321,19 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 
   switch (patching_id(info)) {
   case PatchingStub::access_field_id:
-    target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+    target = Runtime1::entry_for(C1StubId::access_field_patching_id);
     reloc_type = relocInfo::section_word_type;
     break;
   case PatchingStub::load_klass_id:
-    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+    target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
     reloc_type = relocInfo::metadata_type;
     break;
   case PatchingStub::load_mirror_id:
-    target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+    target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
     reloc_type = relocInfo::oop_type;
     break;
   case PatchingStub::load_appendix_id:
-    target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+    target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
     reloc_type = relocInfo::oop_type;
     break;
   default: ShouldNotReachHere();
@@ -375,7 +375,7 @@ int LIR_Assembler::emit_exception_handler() {
   __ verify_not_null_oop(r0);
 
   // search an exception handler (r0: exception oop, r3: throwing pc)
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id)));
   __ should_not_reach_here();
   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
   __ end_a_stub();
@@ -432,7 +432,7 @@ int LIR_Assembler::emit_unwind_handler() {
   // remove the activation and dispatch to the unwind handler
   __ block_comment("remove_frame and dispatch to the unwind handler");
   __ remove_frame(initial_frame_size_in_bytes());
-  __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+  __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 
   // Emit the slow path assembly
   if (stub != nullptr) {
@@ -875,19 +875,19 @@ void LIR_Assembler::klass2reg_with_patching(Register reg, CodeEmitInfo* info) {
 
   switch (patching_id(info)) {
   case PatchingStub::access_field_id:
-    target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+    target = Runtime1::entry_for(C1StubId::access_field_patching_id);
     reloc_type = relocInfo::section_word_type;
     break;
   case PatchingStub::load_klass_id:
-    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+    target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
     reloc_type = relocInfo::metadata_type;
     break;
   case PatchingStub::load_mirror_id:
-    target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+    target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
     reloc_type = relocInfo::oop_type;
     break;
   case PatchingStub::load_appendix_id:
-    target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+    target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
     reloc_type = relocInfo::oop_type;
     break;
   default: ShouldNotReachHere();
@@ -1356,7 +1356,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
         __ br(Assembler::EQ, *success_target);
 
         __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
-        __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+        __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
         __ ldr(klass_RInfo, Address(__ post(sp, 2 * wordSize)));
         // result is a boolean
         __ cbzw(klass_RInfo, *failure_target);
@@ -1367,7 +1367,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
       __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
       __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
-      __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+      __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
       __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
       // result is a boolean
       __ cbz(k_RInfo, *failure_target);
@@ -1446,7 +1446,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
     __ stp(klass_RInfo, k_RInfo, Address(__ pre(sp, -2 * wordSize)));
-    __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+    __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
     __ ldp(k_RInfo, klass_RInfo, Address(__ post(sp, 2 * wordSize)));
     // result is a boolean
     __ cbzw(k_RInfo, *failure_target);
@@ -2035,7 +2035,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   // exception object is not added to oop map by LinearScan
   // (LinearScan assumes that no oops are in fixed registers)
   info->add_register_oop(exceptionOop);
-  Runtime1::StubID unwind_id;
+  C1StubId unwind_id;
 
   // get current pc information
   // pc is only needed if the method has an exception handler, the unwind code does not need it.
@@ -2054,9 +2054,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   __ verify_not_null_oop(r0);
   // search an exception handler (r0: exception oop, r3: throwing pc)
   if (compilation()->has_fpu_code()) {
-    unwind_id = Runtime1::handle_exception_id;
+    unwind_id = C1StubId::handle_exception_id;
   } else {
-    unwind_id = Runtime1::handle_exception_nofpu_id;
+    unwind_id = C1StubId::handle_exception_nofpu_id;
   }
   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
 
@@ -2337,7 +2337,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
       __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
 
       __ PUSH(src, dst);
-      __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+      __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
       __ POP(src, dst);
 
       __ cbnz(src, cont);
diff --git a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
index 8f1260feba3ea..4acac65ad5bab 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp
@@ -1246,7 +1246,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   args->append(rank);
   args->append(varargs);
   LIR_Opr reg = result_register_for(x->type());
-  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
+  __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
                   LIR_OprFact::illegalOpr,
                   reg, args, info);
 
@@ -1277,14 +1277,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == nullptr, "can't patch this");
-    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
   } else if (x->is_invokespecial_receiver_check()) {
     assert(patching_info == nullptr, "can't patch this");
     stub = new DeoptimizeStub(info_for_exception,
                               Deoptimization::Reason_class_check,
                               Deoptimization::Action_none);
   } else {
-    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
index d0d11d437e83e..8d1b3902ce42e 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
@@ -267,7 +267,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
 
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == r0, "must be");
-    far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
+    far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
   }
 
   verify_oop(obj);
@@ -308,7 +308,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
 
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == r0, "must be");
-    far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
+    far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
   }
 
   verify_oop(obj);
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index cb9eb03c580d2..0b9acc0f3a885 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -100,10 +100,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
     if (frame_size() == no_frame_size) {
       leave();
       far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
-    } else if (_stub_id == Runtime1::forward_exception_id) {
+    } else if (_stub_id == (int)C1StubId::forward_exception_id) {
       should_not_reach_here();
     } else {
-      far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
+      far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id)));
     }
     bind(L);
   }
@@ -358,7 +358,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
 }
 
 
-OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
+OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) {
   __ block_comment("generate_handle_exception");
 
   // incoming parameters
@@ -370,7 +370,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   OopMapSet* oop_maps = new OopMapSet();
   OopMap* oop_map = nullptr;
   switch (id) {
-  case forward_exception_id:
+  case C1StubId::forward_exception_id:
     // We're handling an exception in the context of a compiled frame.
     // The registers have been saved in the standard places.  Perform
     // an exception lookup in the caller and dispatch to the handler
@@ -390,12 +390,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
     __ str(zr, Address(rthread, JavaThread::vm_result_offset()));
     __ str(zr, Address(rthread, JavaThread::vm_result_2_offset()));
     break;
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
     // At this point all registers MAY be live.
-    oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
+    oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
     break;
-  case handle_exception_from_callee_id: {
+  case C1StubId::handle_exception_from_callee_id: {
     // At this point all registers except exception oop (r0) and
     // exception pc (lr) are dead.
     const int frame_size = 2 /*fp, return address*/;
@@ -453,13 +453,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   __ str(r0, Address(rfp, 1*BytesPerWord));
 
   switch (id) {
-  case forward_exception_id:
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
+  case C1StubId::forward_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
     // Restore the registers that were saved at the beginning.
-    restore_live_registers(sasm, id != handle_exception_nofpu_id);
+    restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
     break;
-  case handle_exception_from_callee_id:
+  case C1StubId::handle_exception_from_callee_id:
     break;
   default:  ShouldNotReachHere();
   }
@@ -611,7 +611,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 }
 
 
-OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
 
   const Register exception_oop = r0;
   const Register exception_pc  = r3;
@@ -628,7 +628,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   OopMap* oop_map = nullptr;
   switch (id) {
     {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       {
         oop_maps = generate_handle_exception(id, sasm);
         __ leave();
@@ -636,31 +636,31 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_div0_exception_id:
+    case C1StubId::throw_div0_exception_id:
       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
       }
       break;
 
-    case throw_null_pointer_exception_id:
+    case C1StubId::throw_null_pointer_exception_id:
       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
       }
       break;
 
-    case new_instance_id:
-    case fast_new_instance_id:
-    case fast_new_instance_init_check_id:
+    case C1StubId::new_instance_id:
+    case C1StubId::fast_new_instance_id:
+    case C1StubId::fast_new_instance_init_check_id:
       {
         Register klass = r3; // Incoming
         Register obj   = r0; // Result
 
-        if (id == new_instance_id) {
+        if (id == C1StubId::new_instance_id) {
           __ set_info("new_instance", dont_gc_arguments);
-        } else if (id == fast_new_instance_id) {
+        } else if (id == C1StubId::fast_new_instance_id) {
           __ set_info("fast new_instance", dont_gc_arguments);
         } else {
-          assert(id == fast_new_instance_init_check_id, "bad StubID");
+          assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
           __ set_info("fast new_instance init check", dont_gc_arguments);
         }
 
@@ -679,7 +679,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
       break;
 
-    case counter_overflow_id:
+    case C1StubId::counter_overflow_id:
       {
         Register bci = r0, method = r1;
         __ enter();
@@ -697,14 +697,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_type_array_id:
-    case new_object_array_id:
+    case C1StubId::new_type_array_id:
+    case C1StubId::new_object_array_id:
       {
         Register length   = r19; // Incoming
         Register klass    = r3; // Incoming
         Register obj      = r0; // Result
 
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           __ set_info("new_type_array", dont_gc_arguments);
         } else {
           __ set_info("new_object_array", dont_gc_arguments);
@@ -717,7 +717,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
           Register t0 = obj;
           __ ldrw(t0, Address(klass, Klass::layout_helper_offset()));
           __ asrw(t0, t0, Klass::_lh_array_tag_shift);
-          int tag = ((id == new_type_array_id)
+          int tag = ((id == C1StubId::new_type_array_id)
                      ? Klass::_lh_array_tag_type_value
                      : Klass::_lh_array_tag_obj_value);
           __ mov(rscratch1, tag);
@@ -732,7 +732,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
         __ enter();
         OopMap* map = save_live_registers(sasm);
         int call_offset;
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
         } else {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
@@ -750,7 +750,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_multi_array_id:
+    case C1StubId::new_multi_array_id:
       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
         // r0,: klass
         // r19,: rank
@@ -770,7 +770,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case register_finalizer_id:
+    case C1StubId::register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
 
@@ -802,19 +802,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_class_cast_exception_id:
+    case C1StubId::throw_class_cast_exception_id:
       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
       }
       break;
 
-    case throw_incompatible_class_change_error_id:
+    case C1StubId::throw_incompatible_class_change_error_id:
       { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
       }
       break;
 
-    case slow_subtype_check_id:
+    case C1StubId::slow_subtype_check_id:
       {
         // Typical calling sequence:
         // __ push(klass_RInfo);  // object klass or other subclass
@@ -857,10 +857,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorenter_nofpu_id:
+    case C1StubId::monitorenter_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorenter_id:
+    case C1StubId::monitorenter_id:
       {
         StubFrame f(sasm, "monitorenter", dont_gc_arguments);
         OopMap* map = save_live_registers(sasm, save_fpu_registers);
@@ -878,10 +878,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorexit_nofpu_id:
+    case C1StubId::monitorexit_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorexit_id:
+    case C1StubId::monitorexit_id:
       {
         StubFrame f(sasm, "monitorexit", dont_gc_arguments);
         OopMap* map = save_live_registers(sasm, save_fpu_registers);
@@ -901,7 +901,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case deoptimize_id:
+    case C1StubId::deoptimize_id:
       {
         StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
         OopMap* oop_map = save_live_registers(sasm);
@@ -918,13 +918,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_range_check_failed_id:
+    case C1StubId::throw_range_check_failed_id:
       { StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
       }
       break;
 
-    case unwind_exception_id:
+    case C1StubId::unwind_exception_id:
       { __ set_info("unwind_exception", dont_gc_arguments);
         // note: no stubframe since we are about to leave the current
         //       activation and we are calling a leaf VM function only.
@@ -932,54 +932,54 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case access_field_patching_id:
+    case C1StubId::access_field_patching_id:
       { StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
       }
       break;
 
-    case load_klass_patching_id:
+    case C1StubId::load_klass_patching_id:
       { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
       }
       break;
 
-    case load_mirror_patching_id:
+    case C1StubId::load_mirror_patching_id:
       { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
       }
       break;
 
-    case load_appendix_patching_id:
+    case C1StubId::load_appendix_patching_id:
       { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
       }
       break;
 
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case throw_index_exception_id:
+    case C1StubId::throw_index_exception_id:
       { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
       }
       break;
 
-    case throw_array_store_exception_id:
+    case C1StubId::throw_array_store_exception_id:
       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return);
         // tos + 0: link
         //     + 1: return address
@@ -987,7 +987,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case predicate_failed_trap_id:
+    case C1StubId::predicate_failed_trap_id:
       {
         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
 
@@ -1005,7 +1005,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case dtrace_object_alloc_id:
+    case C1StubId::dtrace_object_alloc_id:
       { // c_rarg0: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
         save_live_registers(sasm);
diff --git a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp
index 3d8dbc38071ed..8e85fa88a8749 100644
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_bci, 0);
   ce->store_parameter(_method->as_constant_ptr()->as_metadata(), 1);
-  __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(C1StubId::counter_overflow_id), relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
 
@@ -57,7 +57,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
 
   if (_info->deoptimize_on_exception()) {
-    __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
+    __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type);
     ce->add_call_info_here(_info);
     ce->verify_oop_map(_info);
     debug_only(__ should_not_reach_here());
@@ -73,10 +73,10 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   }
 
   if (_throw_index_out_of_bounds_exception) {
-    __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
+    __ call(Runtime1::entry_for(C1StubId::throw_index_exception_id), relocInfo::runtime_call_type);
   } else {
     __ str(_array->as_pointer_register(), Address(SP, BytesPerWord)); // ??? Correct offset? Correct instruction?
-    __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
+    __ call(Runtime1::entry_for(C1StubId::throw_range_check_failed_id), relocInfo::runtime_call_type);
   }
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
@@ -89,7 +89,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 
 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  __ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(C1StubId::predicate_failed_trap_id), relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   debug_only(__ should_not_reach_here());
@@ -100,7 +100,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   }
   __ bind(_entry);
-  __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id),
+  __ call(Runtime1::entry_for(C1StubId::throw_div0_exception_id),
           relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   DEBUG_ONLY(STOP("DivByZero");)
@@ -109,14 +109,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 
 // Implementation of NewInstanceStub
 
-NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
   _result = result;
   _klass = klass;
   _klass_reg = klass_reg;
   _info = new CodeEmitInfo(info);
-  assert(stub_id == Runtime1::new_instance_id                 ||
-         stub_id == Runtime1::fast_new_instance_id            ||
-         stub_id == Runtime1::fast_new_instance_init_check_id,
+  assert(stub_id == C1StubId::new_instance_id                 ||
+         stub_id == C1StubId::fast_new_instance_id            ||
+         stub_id == C1StubId::fast_new_instance_init_check_id,
          "need new_instance id");
   _stub_id   = stub_id;
 }
@@ -148,7 +148,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   assert(_klass_reg->as_register() == R1, "runtime call setup");
   assert(_length->as_register() == R2, "runtime call setup");
   __ bind(_entry);
-  __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(C1StubId::new_type_array_id), relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   __ b(_continuation);
@@ -170,7 +170,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   assert(_klass_reg->as_register() == R1, "runtime call setup");
   assert(_length->as_register() == R2, "runtime call setup");
   __ bind(_entry);
-  __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(C1StubId::new_object_array_id), relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   __ b(_continuation);
@@ -189,9 +189,9 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
     __ str(lock_reg, Address(SP, BytesPerWord));
   }
 
-  Runtime1::StubID enter_id = ce->compilation()->has_fpu_code() ?
-                              Runtime1::monitorenter_id :
-                              Runtime1::monitorenter_nofpu_id;
+  C1StubId enter_id = ce->compilation()->has_fpu_code() ?
+                              C1StubId::monitorenter_id :
+                              C1StubId::monitorenter_nofpu_id;
   __ call(Runtime1::entry_for(enter_id), relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
@@ -210,9 +210,9 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
   __ str(lock_reg, Address(SP));
 
   // Non-blocking leaf routine - no call info needed
-  Runtime1::StubID exit_id = ce->compilation()->has_fpu_code() ?
-                             Runtime1::monitorexit_id :
-                             Runtime1::monitorexit_nofpu_id;
+  C1StubId exit_id = ce->compilation()->has_fpu_code() ?
+                             C1StubId::monitorexit_id :
+                             C1StubId::monitorexit_nofpu_id;
   __ call(Runtime1::entry_for(exit_id), relocInfo::runtime_call_type);
   __ b(_continuation);
 }
@@ -322,10 +322,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
   address target = nullptr;
   relocInfo::relocType reloc_type = relocInfo::none;
   switch (_id) {
-    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
-    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
-    case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
-    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
+    case access_field_id:  target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
+    case load_klass_id:    target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
+    case load_mirror_id:   target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -351,7 +351,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   __ mov_slow(Rtemp, _trap_request);
   ce->verify_reserved_argument_area_size(1);
   __ str(Rtemp, Address(SP));
-  __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(C1StubId::deoptimize_id), relocInfo::runtime_call_type);
   ce->add_call_info_here(_info);
   DEBUG_ONLY(__ should_not_reach_here());
 }
@@ -362,9 +362,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   if (_info->deoptimize_on_exception()) {
     // Deoptimize, do not throw the exception, because it is
     // probably wrong to do it here.
-    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   } else {
-    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+    a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
   }
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   __ bind(_entry);
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
index 999f8fe590472..bb6a93e6f8da7 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
@@ -213,7 +213,7 @@ int LIR_Assembler::emit_exception_handler() {
   // check that there is really an exception
   __ verify_not_null_oop(Rexception_obj);
 
-  __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type);
+  __ call(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id), relocInfo::runtime_call_type);
   __ should_not_reach_here();
 
   assert(code_offset() - offset <= exception_handler_size(), "overflow");
@@ -253,7 +253,7 @@ int LIR_Assembler::emit_unwind_handler() {
 
   // remove the activation and dispatch to the unwind handler
   __ remove_frame(initial_frame_size_in_bytes()); // restores FP and LR
-  __ jump(Runtime1::entry_for(Runtime1::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
+  __ jump(Runtime1::entry_for(C1StubId::unwind_exception_id), relocInfo::runtime_call_type, Rtemp);
 
   // Emit the slow path assembly
   if (stub != nullptr) {
@@ -1136,7 +1136,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
       __ b(*failure_target, ne);
       // slow case
       assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
-      __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
+      __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
       __ cbz(R0, *failure_target);
       if (op->should_profile()) {
         Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtemp;
@@ -1210,7 +1210,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
           __ cmp(Rtemp, k_RInfo, ne);
           __ b(*success_target, eq);
           assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
-          __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
+          __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
           __ cbz(R0, *failure_target);
         }
       } else {
@@ -1227,7 +1227,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
         __ b(*failure_target, ne);
         // slow case
         assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
-        __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
+        __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
         __ cbz(R0, *failure_target);
       }
 
@@ -1303,7 +1303,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
           }
           __ b(*success_target, eq);
           assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
-          __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
+          __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
           if (!op->should_profile()) {
             move_regs(R0, res);
           } else {
@@ -1334,7 +1334,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
         __ b(*failure_target, ne);
         // slow case
         assert(klass_RInfo == R0 && k_RInfo == R1, "runtime call setup");
-        __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
+        __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type);
         if (!op->should_profile()) {
           move_regs(R0, res);
         }
@@ -1981,9 +1981,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   assert(exceptionPC->as_register()  == Rexception_pc, "must match");
   info->add_register_oop(exceptionOop);
 
-  Runtime1::StubID handle_id = compilation()->has_fpu_code() ?
-                               Runtime1::handle_exception_id :
-                               Runtime1::handle_exception_nofpu_id;
+  C1StubId handle_id = compilation()->has_fpu_code() ?
+                               C1StubId::handle_exception_id :
+                               C1StubId::handle_exception_nofpu_id;
   Label return_address;
   __ adr(Rexception_pc, return_address);
   __ call(Runtime1::entry_for(handle_id), relocInfo::runtime_call_type);
@@ -2260,7 +2260,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
       __ mov(altFP_7_11, R1);
       __ mov(R0, tmp);
       __ mov(R1, tmp2);
-      __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
+      __ call(Runtime1::entry_for(C1StubId::slow_subtype_check_id), relocInfo::runtime_call_type); // does not blow any registers except R0, LR and Rtemp
       __ cmp_32(R0, 0);
       __ mov(R0, R6);
       __ mov(R1, altFP_7_11);
diff --git a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
index f4e3812d77cff..adda0c1c290db 100644
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1054,7 +1054,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   args->append(rank);
   args->append(varargs);
   LIR_Opr reg = result_register_for(x->type());
-  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
+  __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
                   LIR_OprFact::illegalOpr, reg, args, info);
 
   LIR_Opr result = rlock_result(x);
@@ -1083,7 +1083,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == nullptr, "can't patch this");
-    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
+    stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id,
                                    LIR_OprFact::illegalOpr, info_for_exception);
   } else if (x->is_invokespecial_receiver_check()) {
     assert(patching_info == nullptr, "can't patch this");
@@ -1091,7 +1091,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
                               Deoptimization::Reason_class_check,
                               Deoptimization::Action_none);
   } else {
-    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id,
+    stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id,
                                    LIR_OprFact::illegalOpr, info_for_exception);
   }
 
diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
index 335baf5f16638..b5117dedc424e 100644
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
@@ -65,7 +65,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
   reset_last_Java_frame(Rtemp);
 
   assert(frame_size() != no_frame_size, "frame must be fixed");
-  if (_stub_id != Runtime1::forward_exception_id) {
+  if (_stub_id != (int)C1StubId::forward_exception_id) {
     ldr(R3, Address(Rthread, Thread::pending_exception_offset()));
   }
 
@@ -81,10 +81,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
   // Check for pending exception
   // unpack_with_exception_in_tls path is taken through
   // Runtime1::exception_handler_for_pc
-  if (_stub_id != Runtime1::forward_exception_id) {
+  if (_stub_id != (int)C1StubId::forward_exception_id) {
     assert(frame_size() != no_frame_size, "cannot directly call forward_exception_id");
     cmp(R3, 0);
-    jump(Runtime1::entry_for(Runtime1::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
+    jump(Runtime1::entry_for(C1StubId::forward_exception_id), relocInfo::runtime_call_type, Rtemp, ne);
   } else {
 #ifdef ASSERT
     // Should not have pending exception in forward_exception stub
@@ -280,7 +280,7 @@ static void restore_sp_for_method_handle(StubAssembler* sasm) {
 }
 
 
-OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) {
   __ block_comment("generate_handle_exception");
 
   bool save_fpu_registers = false;
@@ -290,7 +290,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
   OopMap* oop_map = nullptr;
 
   switch (id) {
-  case forward_exception_id: {
+  case C1StubId::forward_exception_id: {
     save_fpu_registers = HaveVFP;
     oop_map = generate_oop_map(sasm);
     __ ldr(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
@@ -299,14 +299,14 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
     __ str(zero, Address(Rthread, Thread::pending_exception_offset()));
     break;
   }
-  case handle_exception_id:
+  case C1StubId::handle_exception_id:
     save_fpu_registers = HaveVFP;
     // fall-through
-  case handle_exception_nofpu_id:
+  case C1StubId::handle_exception_nofpu_id:
     // At this point all registers MAY be live.
     oop_map = save_live_registers(sasm, save_fpu_registers);
     break;
-  case handle_exception_from_callee_id:
+  case C1StubId::handle_exception_from_callee_id:
     // At this point all registers except exception oop (R4/R19) and
     // exception pc (R5/R20) are dead.
     oop_map = save_live_registers(sasm);  // TODO it's not required to save all registers
@@ -328,13 +328,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
   // Restore the registers that were saved at the beginning, remove
   // frame and jump to the exception handler.
   switch (id) {
-  case forward_exception_id:
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
+  case C1StubId::forward_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
     restore_live_registers(sasm, save_fpu_registers);
     // Note: the restore live registers includes the jump to LR (patched to R0)
     break;
-  case handle_exception_from_callee_id:
+  case C1StubId::handle_exception_from_callee_id:
     restore_live_registers_without_return(sasm); // must not jump immediately to handler
     restore_sp_for_method_handle(sasm);
     __ ret();
@@ -403,7 +403,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 }
 
 
-OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
   const bool must_gc_arguments = true;
   const bool dont_gc_arguments = false;
 
@@ -411,16 +411,16 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   bool save_fpu_registers = HaveVFP;
 
   switch (id) {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       {
         oop_maps = generate_handle_exception(id, sasm);
         // does not return on ARM
       }
       break;
 
-    case new_instance_id:
-    case fast_new_instance_id:
-    case fast_new_instance_init_check_id:
+    case C1StubId::new_instance_id:
+    case C1StubId::fast_new_instance_id:
+    case C1StubId::fast_new_instance_init_check_id:
       {
         const Register result = R0;
         const Register klass  = R1;
@@ -436,7 +436,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case counter_overflow_id:
+    case C1StubId::counter_overflow_id:
       {
         OopMap* oop_map = save_live_registers(sasm);
         __ ldr(R1, Address(SP, arg1_offset));
@@ -448,10 +448,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_type_array_id:
-    case new_object_array_id:
+    case C1StubId::new_type_array_id:
+    case C1StubId::new_object_array_id:
       {
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           __ set_info("new_type_array", dont_gc_arguments);
         } else {
           __ set_info("new_object_array", dont_gc_arguments);
@@ -463,7 +463,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
         OopMap* map = save_live_registers(sasm);
         int call_offset;
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
         } else {
           call_offset = __ call_RT(result, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
@@ -477,7 +477,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_multi_array_id:
+    case C1StubId::new_multi_array_id:
       {
         __ set_info("new_multi_array", dont_gc_arguments);
 
@@ -500,7 +500,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case register_finalizer_id:
+    case C1StubId::register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
 
@@ -521,78 +521,78 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_range_check_failed_id:
+    case C1StubId::throw_range_check_failed_id:
       {
         __ set_info("range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
       }
       break;
 
-    case throw_index_exception_id:
+    case C1StubId::throw_index_exception_id:
       {
         __ set_info("index_range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
       }
       break;
 
-    case throw_div0_exception_id:
+    case C1StubId::throw_div0_exception_id:
       {
         __ set_info("throw_div0_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
       }
       break;
 
-    case throw_null_pointer_exception_id:
+    case C1StubId::throw_null_pointer_exception_id:
       {
         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
       }
       break;
 
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       {
         __ set_info("handle_exception", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       {
         __ set_info("handle_exception_from_callee", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case unwind_exception_id:
+    case C1StubId::unwind_exception_id:
       {
         __ set_info("unwind_exception", dont_gc_arguments);
         generate_unwind_exception(sasm);
       }
       break;
 
-    case throw_array_store_exception_id:
+    case C1StubId::throw_array_store_exception_id:
       {
         __ set_info("throw_array_store_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
       }
       break;
 
-    case throw_class_cast_exception_id:
+    case C1StubId::throw_class_cast_exception_id:
       {
         __ set_info("throw_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
       }
       break;
 
-    case throw_incompatible_class_change_error_id:
+    case C1StubId::throw_incompatible_class_change_error_id:
       {
         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
       }
       break;
 
-    case slow_subtype_check_id:
+    case C1StubId::slow_subtype_check_id:
       {
         // (in)  R0 - sub, destroyed,
         // (in)  R1 - super, not changed
@@ -625,10 +625,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorenter_nofpu_id:
+    case C1StubId::monitorenter_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorenter_id:
+    case C1StubId::monitorenter_id:
       {
         __ set_info("monitorenter", dont_gc_arguments);
         const Register obj  = R1;
@@ -643,10 +643,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorexit_nofpu_id:
+    case C1StubId::monitorexit_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorexit_id:
+    case C1StubId::monitorexit_id:
       {
         __ set_info("monitorexit", dont_gc_arguments);
         const Register lock = R1;
@@ -659,7 +659,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case deoptimize_id:
+    case C1StubId::deoptimize_id:
       {
         __ set_info("deoptimize", dont_gc_arguments);
         OopMap* oop_map = save_live_registers(sasm);
@@ -675,35 +675,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case access_field_patching_id:
+    case C1StubId::access_field_patching_id:
       {
         __ set_info("access_field_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
       }
       break;
 
-    case load_klass_patching_id:
+    case C1StubId::load_klass_patching_id:
       {
         __ set_info("load_klass_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
       }
       break;
 
-    case load_appendix_patching_id:
+    case C1StubId::load_appendix_patching_id:
       {
         __ set_info("load_appendix_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
       }
       break;
 
-    case load_mirror_patching_id:
+    case C1StubId::load_mirror_patching_id:
       {
         __ set_info("load_mirror_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
       }
       break;
 
-    case predicate_failed_trap_id:
+    case C1StubId::predicate_failed_trap_id:
       {
         __ set_info("predicate_failed_trap", dont_gc_arguments);
 
diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
index dc70c73d4b330..451f3b7e9cd6b 100644
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2021 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -68,7 +68,7 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
 
   if (_info->deoptimize_on_exception()) {
-    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
     //__ load_const_optimized(R0, a);
     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
     __ mtctr(R0);
@@ -79,8 +79,8 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
     return;
   }
 
-  address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(Runtime1::throw_index_exception_id)
-                                                      : Runtime1::entry_for(Runtime1::throw_range_check_failed_id);
+  address stub = _throw_index_out_of_bounds_exception ? Runtime1::entry_for(C1StubId::throw_index_exception_id)
+                                                      : Runtime1::entry_for(C1StubId::throw_range_check_failed_id);
   //__ load_const_optimized(R0, stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
   __ mtctr(R0);
@@ -109,7 +109,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 
 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   //__ load_const_optimized(R0, a);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
   __ mtctr(R0);
@@ -133,7 +133,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ load_const_optimized(R0, md.value());
   __ std(R0, -8, R1_SP);
 
-  address a = Runtime1::entry_for(Runtime1::counter_overflow_id);
+  address a = Runtime1::entry_for(C1StubId::counter_overflow_id);
   //__ load_const_optimized(R0, a);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(a));
   __ mtctr(R0);
@@ -150,7 +150,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   }
   __ bind(_entry);
-  address stub = Runtime1::entry_for(Runtime1::throw_div0_exception_id);
+  address stub = Runtime1::entry_for(C1StubId::throw_div0_exception_id);
   //__ load_const_optimized(R0, stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
   __ mtctr(R0);
@@ -165,9 +165,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   address a;
   if (_info->deoptimize_on_exception()) {
     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
-    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   } else {
-    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+    a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
   }
 
   if (ImplicitNullChecks || TrapBasedNullChecks) {
@@ -199,14 +199,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
 
 
 // Implementation of NewInstanceStub
-NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
   _result = result;
   _klass = klass;
   _klass_reg = klass_reg;
   _info = new CodeEmitInfo(info);
-  assert(stub_id == Runtime1::new_instance_id                 ||
-         stub_id == Runtime1::fast_new_instance_id            ||
-         stub_id == Runtime1::fast_new_instance_init_check_id,
+  assert(stub_id == C1StubId::new_instance_id                 ||
+         stub_id == C1StubId::fast_new_instance_id            ||
+         stub_id == C1StubId::fast_new_instance_init_check_id,
          "need new_instance id");
   _stub_id = stub_id;
 }
@@ -236,7 +236,7 @@ NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr re
 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
 
-  address entry = Runtime1::entry_for(Runtime1::new_type_array_id);
+  address entry = Runtime1::entry_for(C1StubId::new_type_array_id);
   //__ load_const_optimized(R0, entry);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
@@ -259,7 +259,7 @@ NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Op
 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
 
-  address entry = Runtime1::entry_for(Runtime1::new_object_array_id);
+  address entry = Runtime1::entry_for(C1StubId::new_object_array_id);
   //__ load_const_optimized(R0, entry);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry));
   __ mr_if_needed(/*op->tmp1()->as_register()*/ R5_ARG3, _length->as_register()); // already sign-extended
@@ -272,7 +272,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 
 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorenter_id : Runtime1::monitorenter_nofpu_id);
+  address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorenter_id : C1StubId::monitorenter_nofpu_id);
   //__ load_const_optimized(R0, stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
   __ mr_if_needed(/*scratch_opr()->as_register()*/ R4_ARG2, _obj_reg->as_register());
@@ -289,7 +289,7 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
   if (_compute_lock) {
     ce->monitor_address(_monitor_ix, _lock_reg);
   }
-  address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? Runtime1::monitorexit_id : Runtime1::monitorexit_nofpu_id);
+  address stub = Runtime1::entry_for(ce->compilation()->has_fpu_code() ? C1StubId::monitorexit_id : C1StubId::monitorexit_nofpu_id);
   //__ load_const_optimized(R0, stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
   assert(_lock_reg->as_register() == R4_ARG2, "");
@@ -403,12 +403,12 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
   address target = nullptr;
   relocInfo::relocType reloc_type = relocInfo::none;
   switch (_id) {
-    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
-    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+    case access_field_id:  target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
+    case load_klass_id:    target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
                            reloc_type = relocInfo::metadata_type; break;
-    case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+    case load_mirror_id:   target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
                            reloc_type = relocInfo::oop_type; break;
-    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+    case load_appendix_id: target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
                            reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
@@ -434,7 +434,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
 
 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address stub = Runtime1::entry_for(Runtime1::deoptimize_id);
+  address stub = Runtime1::entry_for(C1StubId::deoptimize_id);
   //__ load_const_optimized(R0, stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
   __ mtctr(R0);
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index 2191b894f6e16..42934dc7c3179 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -176,7 +176,7 @@ int LIR_Assembler::emit_exception_handler() {
   }
 
   int offset = code_offset();
-  address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::handle_exception_from_callee_id));
+  address entry_point = CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::handle_exception_from_callee_id));
   //__ load_const_optimized(R0, entry_point);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(entry_point));
   __ mtctr(R0);
@@ -222,7 +222,7 @@ int LIR_Assembler::emit_unwind_handler() {
   }
 
   // Dispatch to the unwind logic.
-  address unwind_stub = Runtime1::entry_for(Runtime1::unwind_exception_id);
+  address unwind_stub = Runtime1::entry_for(C1StubId::unwind_exception_id);
   //__ load_const_optimized(R0, unwind_stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(unwind_stub));
   if (preserve_exception) { __ mr(Rexception, Rexception_save); }
@@ -1800,8 +1800,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   __ calculate_address_from_global_toc(exceptionPC->as_register(), pc_for_athrow, true, true, /*add_relocation*/ true);
   add_call_info(pc_for_athrow_offset, info); // for exception handler
 
-  address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? Runtime1::handle_exception_id
-                                                                   : Runtime1::handle_exception_nofpu_id);
+  address stub = Runtime1::entry_for(compilation()->has_fpu_code() ? C1StubId::handle_exception_id
+                                                                   : C1StubId::handle_exception_nofpu_id);
   //__ load_const_optimized(R0, stub);
   __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(stub));
   __ mtctr(R0);
@@ -2001,7 +2001,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
       __ check_klass_subtype_fast_path(sub_klass, super_klass, tmp, tmp2,
                                        &cont, copyfunc_addr != nullptr ? &copyfunc : &slow, nullptr);
 
-      address slow_stc = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
+      address slow_stc = Runtime1::entry_for(C1StubId::slow_subtype_check_id);
       //__ load_const_optimized(tmp, slow_stc, tmp2);
       __ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
       __ mtctr(tmp);
@@ -2452,7 +2452,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
       __ b(*success);
     } else {
       // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
-      address entry = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
+      address entry = Runtime1::entry_for(C1StubId::slow_subtype_check_id);
       // Stub needs fixed registers (tmp1-3).
       Register original_k_RInfo = op->tmp1()->as_register();
       Register original_klass_RInfo = op->tmp2()->as_register();
@@ -2543,7 +2543,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, R0, &done, &failure, nullptr);
 
     // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
-    const address slow_path = Runtime1::entry_for(Runtime1::slow_subtype_check_id);
+    const address slow_path = Runtime1::entry_for(C1StubId::slow_subtype_check_id);
     //__ load_const_optimized(R0, slow_path);
     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
     __ mtctr(R0);
@@ -2850,8 +2850,8 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
 void LIR_Assembler::rt_call(LIR_Opr result, address dest,
                             const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
   // Stubs: Called via rt_call, but dest is a stub address (no function descriptor).
-  if (dest == Runtime1::entry_for(Runtime1::register_finalizer_id) ||
-      dest == Runtime1::entry_for(Runtime1::new_multi_array_id   )) {
+  if (dest == Runtime1::entry_for(C1StubId::register_finalizer_id) ||
+      dest == Runtime1::entry_for(C1StubId::new_multi_array_id   )) {
     //__ load_const_optimized(R0, dest);
     __ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(dest));
     __ mtctr(R0);
diff --git a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
index 04762a22c6110..7973e9d05459e 100644
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp
@@ -1032,7 +1032,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   args->append(rank);
   args->append(varargs);
   const LIR_Opr reg = result_register_for(x->type());
-  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
+  __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
                   LIR_OprFact::illegalOpr,
                   reg, args, info);
 
@@ -1067,7 +1067,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
 
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == nullptr, "can't patch this");
-    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id,
+    stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id,
                                    LIR_OprFact::illegalOpr, info_for_exception);
   } else if (x->is_invokespecial_receiver_check()) {
     assert(patching_info == nullptr, "can't patch this");
@@ -1075,7 +1075,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
                               Deoptimization::Reason_class_check,
                               Deoptimization::Action_none);
   } else {
-    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
   }
   // Following registers are used by slow_subtype_check:
   LIR_Opr tmp1 = FrameMap::R4_oop_opr; // super_klass
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 059bb2eae0c3a..c05e97a4e9aa3 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -293,7 +293,7 @@ void C1_MacroAssembler::initialize_object(
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     Unimplemented();
 //    assert(obj == O0, "must be");
-//    call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
+//    call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)),
 //         relocInfo::runtime_call_type);
   }
 
@@ -369,7 +369,7 @@ void C1_MacroAssembler::allocate_array(
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     Unimplemented();
     //assert(obj == O0, "must be");
-    //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
+    //call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)),
     //     relocInfo::runtime_call_type);
   }
 
@@ -398,7 +398,7 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
   if (TrapBasedNullChecks) { // SIGTRAP based
     trap_null_check(r);
   } else { // explicit
-    //const address exception_entry = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+    //const address exception_entry = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
     assert(Lnull != nullptr, "must have Label for explicit check");
     cmpdi(CCR0, r, 0);
     bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
index adddfda4ee74f..654626d66d812 100644
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
@@ -97,12 +97,12 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
       //load_const_optimized(R0, StubRoutines::forward_exception_entry());
       //mtctr(R0);
       //bctr();
-    } else if (_stub_id == Runtime1::forward_exception_id) {
+    } else if (_stub_id == (int)C1StubId::forward_exception_id) {
       should_not_reach_here();
     } else {
       // keep stub frame for next call_RT
-      //load_const_optimized(R0, Runtime1::entry_for(Runtime1::forward_exception_id));
-      add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(Runtime1::forward_exception_id)));
+      //load_const_optimized(R0, Runtime1::entry_for(C1StubId::forward_exception_id));
+      add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(Runtime1::entry_for(C1StubId::forward_exception_id)));
       mtctr(R0);
       bctr();
     }
@@ -388,7 +388,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
   return oop_maps;
 }
 
-OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
   OopMapSet* oop_maps = nullptr;
 
   // For better readability.
@@ -397,22 +397,22 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
   // Stub code & info for the different stubs.
   switch (id) {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       {
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case new_instance_id:
-    case fast_new_instance_id:
-    case fast_new_instance_init_check_id:
+    case C1StubId::new_instance_id:
+    case C1StubId::fast_new_instance_id:
+    case C1StubId::fast_new_instance_init_check_id:
       {
-        if (id == new_instance_id) {
+        if (id == C1StubId::new_instance_id) {
           __ set_info("new_instance", dont_gc_arguments);
-        } else if (id == fast_new_instance_id) {
+        } else if (id == C1StubId::fast_new_instance_id) {
           __ set_info("fast new_instance", dont_gc_arguments);
         } else {
-          assert(id == fast_new_instance_init_check_id, "bad StubID");
+          assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
           __ set_info("fast new_instance init check", dont_gc_arguments);
         }
 
@@ -422,15 +422,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case counter_overflow_id:
+    case C1StubId::counter_overflow_id:
         // Bci and method are on stack.
         oop_maps = stub_call_with_stack_parms(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), 2);
       break;
 
-    case new_type_array_id:
-    case new_object_array_id:
+    case C1StubId::new_type_array_id:
+    case C1StubId::new_object_array_id:
       {
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           __ set_info("new_type_array", dont_gc_arguments);
         } else {
           __ set_info("new_object_array", dont_gc_arguments);
@@ -439,7 +439,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 #ifdef ASSERT
         // Assert object type is really an array of the proper kind.
         {
-          int tag = (id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value;
+          int tag = (id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value;
           Label ok;
           __ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2);
           __ srawi(R0, R0, Klass::_lh_array_tag_shift);
@@ -453,7 +453,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
         // We don't support eden allocation.
 
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_type_array), R4_ARG2, R5_ARG3);
         } else {
           oop_maps = generate_stub_call(sasm, R3_RET, CAST_FROM_FN_PTR(address, new_object_array), R4_ARG2, R5_ARG3);
@@ -461,7 +461,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_multi_array_id:
+    case C1StubId::new_multi_array_id:
       {
         // R4: klass
         // R5: rank
@@ -471,7 +471,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case register_finalizer_id:
+    case C1StubId::register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
         // This code is called via rt_call. Hence, caller-save registers have been saved.
@@ -501,50 +501,50 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_range_check_failed_id:
+    case C1StubId::throw_range_check_failed_id:
       {
         __ set_info("range_check_failed", dont_gc_arguments); // Arguments will be discarded.
         oop_maps = generate_exception_throw_with_stack_parms(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), 2);
       }
       break;
 
-    case throw_index_exception_id:
+    case C1StubId::throw_index_exception_id:
       {
         __ set_info("index_range_check_failed", dont_gc_arguments); // Arguments will be discarded.
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
       }
       break;
 
-    case throw_div0_exception_id:
+    case C1StubId::throw_div0_exception_id:
       {
         __ set_info("throw_div0_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
       }
       break;
 
-    case throw_null_pointer_exception_id:
+    case C1StubId::throw_null_pointer_exception_id:
       {
         __ set_info("throw_null_pointer_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
       }
       break;
 
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       {
         __ set_info("handle_exception", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       {
         __ set_info("handle_exception_from_callee", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case unwind_exception_id:
+    case C1StubId::unwind_exception_id:
       {
         const Register Rexception    = R3 /*LIRGenerator::exceptionOopOpr()*/,
                        Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/,
@@ -572,28 +572,28 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_array_store_exception_id:
+    case C1StubId::throw_array_store_exception_id:
       {
         __ set_info("throw_array_store_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
       }
       break;
 
-    case throw_class_cast_exception_id:
+    case C1StubId::throw_class_cast_exception_id:
       {
         __ set_info("throw_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
       }
       break;
 
-    case throw_incompatible_class_change_error_id:
+    case C1StubId::throw_incompatible_class_change_error_id:
       {
         __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
       }
       break;
 
-    case slow_subtype_check_id:
+    case C1StubId::slow_subtype_check_id:
       { // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
         const Register sub_klass = R5,
                        super_klass = R4,
@@ -605,12 +605,12 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorenter_nofpu_id:
-    case monitorenter_id:
+    case C1StubId::monitorenter_nofpu_id:
+    case C1StubId::monitorenter_id:
       {
         __ set_info("monitorenter", dont_gc_arguments);
 
-        int save_fpu_registers = (id == monitorenter_id);
+        int save_fpu_registers = (id == C1StubId::monitorenter_id);
         // Make a frame and preserve the caller's caller-save registers.
         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 
@@ -624,15 +624,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorexit_nofpu_id:
-    case monitorexit_id:
+    case C1StubId::monitorexit_nofpu_id:
+    case C1StubId::monitorexit_id:
       {
         // note: Really a leaf routine but must setup last java sp
         //       => use call_RT for now (speed can be improved by
         //       doing last java sp setup manually).
         __ set_info("monitorexit", dont_gc_arguments);
 
-        int save_fpu_registers = (id == monitorexit_id);
+        int save_fpu_registers = (id == C1StubId::monitorexit_id);
         // Make a frame and preserve the caller's caller-save registers.
         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 
@@ -646,7 +646,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case deoptimize_id:
+    case C1StubId::deoptimize_id:
       {
         __ set_info("deoptimize", dont_gc_arguments);
         __ std(R0, -8, R1_SP); // Pass trap_request on stack.
@@ -662,35 +662,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case access_field_patching_id:
+    case C1StubId::access_field_patching_id:
       {
         __ set_info("access_field_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
       }
       break;
 
-    case load_klass_patching_id:
+    case C1StubId::load_klass_patching_id:
       {
         __ set_info("load_klass_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
       }
       break;
 
-    case load_mirror_patching_id:
+    case C1StubId::load_mirror_patching_id:
       {
         __ set_info("load_mirror_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
       }
       break;
 
-    case load_appendix_patching_id:
+    case C1StubId::load_appendix_patching_id:
       {
         __ set_info("load_appendix_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
       }
       break;
 
-    case dtrace_object_alloc_id:
+    case C1StubId::dtrace_object_alloc_id:
       { // O0: object
         __ unimplemented("stub dtrace_object_alloc_id");
         __ set_info("dtrace_object_alloc", dont_gc_arguments);
@@ -710,7 +710,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case predicate_failed_trap_id:
+    case C1StubId::predicate_failed_trap_id:
       {
         __ set_info("predicate_failed_trap", dont_gc_arguments);
         OopMap* oop_map = save_live_registers(sasm);
@@ -754,7 +754,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 }
 
 
-OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm) {
   __ block_comment("generate_handle_exception");
 
   // Save registers, if required.
@@ -764,7 +764,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
                  Rexception_pc = R4 /*LIRGenerator::exceptionPcOpr()*/;
 
   switch (id) {
-  case forward_exception_id:
+  case C1StubId::forward_exception_id:
     // We're handling an exception in the context of a compiled frame.
     // The registers have been saved in the standard places. Perform
     // an exception lookup in the caller and dispatch to the handler
@@ -780,12 +780,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
     __ ld(Rexception_pc, _abi0(lr), Rexception_pc);
     __ std(R0, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
     break;
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
     // At this point all registers MAY be live.
-    oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Rexception_pc);
+    oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Rexception_pc);
     break;
-  case handle_exception_from_callee_id:
+  case C1StubId::handle_exception_from_callee_id:
     // At this point all registers except exception oop and exception pc are dead.
     oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0);
     sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
@@ -824,13 +824,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) {
   // Restore the registers that were saved at the beginning, remove
   // the frame and jump to the exception handler.
   switch (id) {
-  case forward_exception_id:
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
-    restore_live_registers(sasm, noreg, noreg, id != handle_exception_nofpu_id);
+  case C1StubId::forward_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
+    restore_live_registers(sasm, noreg, noreg, id != C1StubId::handle_exception_nofpu_id);
     __ bctr();
     break;
-  case handle_exception_from_callee_id: {
+  case C1StubId::handle_exception_from_callee_id: {
     __ pop_frame();
     __ ld(Rexception_pc, _abi0(lr), R1_SP);
     __ mtlr(Rexception_pc);
diff --git a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
index b7e1b7863efdb..fb81082072610 100644
--- a/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_CodeStubs_riscv.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -62,7 +62,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   __ mov_metadata(t0, m);
   ce->store_parameter(t0, 1);
   ce->store_parameter(_bci, 0);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   __ j(_continuation);
@@ -71,7 +71,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   if (_info->deoptimize_on_exception()) {
-    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
     __ far_call(RuntimeAddress(a));
     ce->add_call_info_here(_info);
     ce->verify_oop_map(_info);
@@ -84,13 +84,13 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   } else {
     __ mv(t0, _index->as_jint());
   }
-  Runtime1::StubID stub_id;
+  C1StubId stub_id;
   if (_throw_index_out_of_bounds_exception) {
-    stub_id = Runtime1::throw_index_exception_id;
+    stub_id = C1StubId::throw_index_exception_id;
   } else {
     assert(_array != LIR_Opr::nullOpr(), "sanity");
     __ mv(t1, _array->as_pointer_register());
-    stub_id = Runtime1::throw_range_check_failed_id;
+    stub_id = C1StubId::throw_range_check_failed_id;
   }
   // t0 and t1 are used as args in generate_exception_throw,
   // so use ra as the tmp register for rt_call.
@@ -106,7 +106,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 
 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   __ far_call(RuntimeAddress(a));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
@@ -118,7 +118,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   }
   __ bind(_entry);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
 #ifdef ASSERT
@@ -127,14 +127,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 }
 
 // Implementation of NewInstanceStub
-NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
   _result = result;
   _klass = klass;
   _klass_reg = klass_reg;
   _info = new CodeEmitInfo(info);
-  assert(stub_id == Runtime1::new_instance_id                 ||
-         stub_id == Runtime1::fast_new_instance_id            ||
-         stub_id == Runtime1::fast_new_instance_init_check_id,
+  assert(stub_id == C1StubId::new_instance_id                 ||
+         stub_id == C1StubId::fast_new_instance_id            ||
+         stub_id == C1StubId::fast_new_instance_init_check_id,
          "need new_instance id");
   _stub_id = stub_id;
 }
@@ -163,7 +163,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_length->as_register() == x9, "length must in x9");
   assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   assert(_result->as_register() == x10, "result must in x10");
@@ -183,7 +183,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_length->as_register() == x9, "length must in x9");
   assert(_klass_reg->as_register() == x13, "klass_reg must in x13");
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   assert(_result->as_register() == x10, "result must in x10");
@@ -195,11 +195,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_obj_reg->as_register(),  1);
   ce->store_parameter(_lock_reg->as_register(), 0);
-  Runtime1::StubID enter_id;
+  C1StubId enter_id;
   if (ce->compilation()->has_fpu_code()) {
-    enter_id = Runtime1::monitorenter_id;
+    enter_id = C1StubId::monitorenter_id;
   } else {
-    enter_id = Runtime1::monitorenter_nofpu_id;
+    enter_id = C1StubId::monitorenter_nofpu_id;
   }
   __ far_call(RuntimeAddress(Runtime1::entry_for(enter_id)));
   ce->add_call_info_here(_info);
@@ -215,11 +215,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
   }
   ce->store_parameter(_lock_reg->as_register(), 0);
   // note: non-blocking leaf routine => no call info needed
-  Runtime1::StubID exit_id;
+  C1StubId exit_id;
   if (ce->compilation()->has_fpu_code()) {
-    exit_id = Runtime1::monitorexit_id;
+    exit_id = C1StubId::monitorexit_id;
   } else {
-    exit_id = Runtime1::monitorexit_nofpu_id;
+    exit_id = C1StubId::monitorexit_nofpu_id;
   }
   __ la(ra, _continuation);
   __ far_jump(RuntimeAddress(Runtime1::entry_for(exit_id)));
@@ -244,7 +244,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_trap_request, 0);
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id)));
   ce->add_call_info_here(_info);
   DEBUG_ONLY(__ should_not_reach_here());
 }
@@ -253,9 +253,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   address a = nullptr;
   if (_info->deoptimize_on_exception()) {
     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
-    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   } else {
-    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+    a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
   }
 
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp
index a8f260acae8ce..012932189382c 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_arraycopy_riscv.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -223,7 +223,7 @@ void LIR_Assembler::arraycopy_type_check(Register src, Register src_pos, Registe
     __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, nullptr);
 
     PUSH(src, dst);
-    __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+    __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
     POP(src, dst);
     __ bnez(dst, cont);
 
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index 3d146b87707aa..940706b0a7376 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -305,7 +305,7 @@ int LIR_Assembler::emit_exception_handler() {
   __ verify_not_null_oop(x10);
 
   // search an exception handler (x10: exception oop, x13: throwing pc)
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id)));
   __ should_not_reach_here();
   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
   __ end_a_stub();
@@ -361,7 +361,7 @@ int LIR_Assembler::emit_unwind_handler() {
   // remove the activation and dispatch to the unwind handler
   __ block_comment("remove_frame and dispatch to the unwind handler");
   __ remove_frame(initial_frame_size_in_bytes());
-  __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+  __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 
   // Emit the slow path assembly
   if (stub != nullptr) {
@@ -1088,7 +1088,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe
       __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
       __ sd(k_RInfo, Address(sp, 0));             // sub klass
       __ sd(klass_RInfo, Address(sp, wordSize));  // super klass
-      __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+      __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
       // load result to k_RInfo
       __ ld(k_RInfo, Address(sp, 0));
       __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
@@ -1103,7 +1103,7 @@ void LIR_Assembler::typecheck_helper_slowcheck(ciKlass *k, Register obj, Registe
     __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
     __ sd(klass_RInfo, Address(sp, wordSize));  // sub klass
     __ sd(k_RInfo, Address(sp, 0));             // super klass
-    __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+    __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
     // load result to k_RInfo
     __ ld(k_RInfo, Address(sp, 0));
     __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
@@ -1391,7 +1391,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   // exception object is not added to oop map by LinearScan
   // (LinearScan assumes that no oops are in fixed registers)
   info->add_register_oop(exceptionOop);
-  Runtime1::StubID unwind_id;
+  C1StubId unwind_id;
 
   // get current pc information
   // pc is only needed if the method has an exception handler, the unwind code does not need it.
@@ -1414,9 +1414,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   __ verify_not_null_oop(x10);
   // search an exception handler (x10: exception oop, x13: throwing pc)
   if (compilation()->has_fpu_code()) {
-    unwind_id = Runtime1::handle_exception_id;
+    unwind_id = C1StubId::handle_exception_id;
   } else {
-    unwind_id = Runtime1::handle_exception_nofpu_id;
+    unwind_id = C1StubId::handle_exception_nofpu_id;
   }
   __ far_call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
   __ nop();
@@ -2054,16 +2054,16 @@ void LIR_Assembler::deoptimize_trap(CodeEmitInfo *info) {
 
   switch (patching_id(info)) {
     case PatchingStub::access_field_id:
-      target = Runtime1::entry_for(Runtime1::access_field_patching_id);
+      target = Runtime1::entry_for(C1StubId::access_field_patching_id);
       break;
     case PatchingStub::load_klass_id:
-      target = Runtime1::entry_for(Runtime1::load_klass_patching_id);
+      target = Runtime1::entry_for(C1StubId::load_klass_patching_id);
       break;
     case PatchingStub::load_mirror_id:
-      target = Runtime1::entry_for(Runtime1::load_mirror_patching_id);
+      target = Runtime1::entry_for(C1StubId::load_mirror_patching_id);
       break;
     case PatchingStub::load_appendix_id:
-      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id);
+      target = Runtime1::entry_for(C1StubId::load_appendix_patching_id);
       break;
     default: ShouldNotReachHere();
   }
@@ -2152,7 +2152,7 @@ void LIR_Assembler::lir_store_slowcheck(Register k_RInfo, Register klass_RInfo,
   __ addi(sp, sp, -2 * wordSize); // 2: store k_RInfo and klass_RInfo
   __ sd(klass_RInfo, Address(sp, wordSize));  // sub klass
   __ sd(k_RInfo, Address(sp, 0));             // super klass
-  __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+  __ far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
   // load result to k_RInfo
   __ ld(k_RInfo, Address(sp, 0));
   __ addi(sp, sp, 2 * wordSize); // 2: pop out k_RInfo and klass_RInfo
diff --git a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
index 409e8dc0a0d95..b328d457192ba 100644
--- a/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRGenerator_riscv.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -1030,7 +1030,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   args->append(rank);
   args->append(varargs);
   LIR_Opr reg = result_register_for(x->type());
-  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
+  __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
                   LIR_OprFact::illegalOpr,
                   reg, args, info);
 
@@ -1062,7 +1062,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
   CodeStub* stub = nullptr;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == nullptr, "can't patch this");
-    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
+    stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr,
                                    info_for_exception);
   } else if (x->is_invokespecial_receiver_check()) {
     assert(patching_info == nullptr, "can't patch this");
@@ -1070,7 +1070,7 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
                               Deoptimization::Reason_class_check,
                               Deoptimization::Action_none);
   } else {
-    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
index 1ae64b4f283ba..1e4b66069ee23 100644
--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
@@ -276,7 +276,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
 
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == x10, "must be");
-    far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
+    far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
   }
 
   verify_oop(obj);
@@ -316,7 +316,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
 
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == x10, "must be");
-    far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
+    far_call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
   }
 
   verify_oop(obj);
diff --git a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
index 824d03640517e..5e4031727c827 100644
--- a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
@@ -98,10 +98,10 @@ int StubAssembler::call_RT(Register oop_result, Register metadata_result, addres
     if (frame_size() == no_frame_size) {
       leave();
       far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
-    } else if (_stub_id == Runtime1::forward_exception_id) {
+    } else if (_stub_id == (int)C1StubId::forward_exception_id) {
       should_not_reach_here();
     } else {
-      far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
+      far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id)));
     }
     bind(L);
   }
@@ -376,7 +376,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
   return oop_maps;
 }
 
-OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
+OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) {
   __ block_comment("generate_handle_exception");
 
   // incoming parameters
@@ -388,7 +388,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   OopMap* oop_map = nullptr;
 
   switch (id) {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       // We're handling an exception in the context of a compiled frame.
       // The registers have been saved in the standard places.  Perform
       // an exception lookup in the caller and dispatch to the handler
@@ -407,12 +407,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
       __ sd(zr, Address(xthread, JavaThread::vm_result_offset()));
       __ sd(zr, Address(xthread, JavaThread::vm_result_2_offset()));
       break;
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       // At this point all registers MAY be live.
-      oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id);
+      oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
       break;
-    case handle_exception_from_callee_id: {
+    case C1StubId::handle_exception_from_callee_id: {
       // At this point all registers except exception oop (x10) and
       // exception pc (ra) are dead.
       const int frame_size = 2 /* fp, return address */;
@@ -469,13 +469,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   __ sd(x10, Address(fp, frame::return_addr_offset * BytesPerWord));
 
   switch (id) {
-    case forward_exception_id:
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::forward_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       // Restore the registers that were saved at the beginning.
-      restore_live_registers(sasm, id != handle_exception_nofpu_id);
+      restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
       break;
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       break;
     default: ShouldNotReachHere();
   }
@@ -621,7 +621,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
   return oop_maps;
 }
 
-OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
   // for better readability
   const bool dont_gc_arguments = false;
 
@@ -632,7 +632,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   OopMapSet* oop_maps = nullptr;
   switch (id) {
     {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       {
         oop_maps = generate_handle_exception(id, sasm);
         __ leave();
@@ -640,32 +640,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_div0_exception_id:
+    case C1StubId::throw_div0_exception_id:
       {
         StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
       }
       break;
 
-    case throw_null_pointer_exception_id:
+    case C1StubId::throw_null_pointer_exception_id:
       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
       }
       break;
 
-    case new_instance_id:
-    case fast_new_instance_id:
-    case fast_new_instance_init_check_id:
+    case C1StubId::new_instance_id:
+    case C1StubId::fast_new_instance_id:
+    case C1StubId::fast_new_instance_init_check_id:
       {
         Register klass = x13; // Incoming
         Register obj   = x10; // Result
 
-        if (id == new_instance_id) {
+        if (id == C1StubId::new_instance_id) {
           __ set_info("new_instance", dont_gc_arguments);
-        } else if (id == fast_new_instance_id) {
+        } else if (id == C1StubId::fast_new_instance_id) {
           __ set_info("fast new_instance", dont_gc_arguments);
         } else {
-          assert(id == fast_new_instance_init_check_id, "bad StubID");
+          assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
           __ set_info("fast new_instance init check", dont_gc_arguments);
         }
 
@@ -686,7 +686,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
       break;
 
-    case counter_overflow_id:
+    case C1StubId::counter_overflow_id:
       {
         Register bci = x10;
         Register method = x11;
@@ -710,14 +710,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_type_array_id:
-    case new_object_array_id:
+    case C1StubId::new_type_array_id:
+    case C1StubId::new_object_array_id:
       {
         Register length   = x9;  // Incoming
         Register klass    = x13; // Incoming
         Register obj      = x10; // Result
 
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           __ set_info("new_type_array", dont_gc_arguments);
         } else {
           __ set_info("new_object_array", dont_gc_arguments);
@@ -730,7 +730,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
           Register tmp = obj;
           __ lwu(tmp, Address(klass, Klass::layout_helper_offset()));
           __ sraiw(tmp, tmp, Klass::_lh_array_tag_shift);
-          int tag = ((id == new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value);
+          int tag = ((id == C1StubId::new_type_array_id) ? Klass::_lh_array_tag_type_value : Klass::_lh_array_tag_obj_value);
           __ mv(t0, tag);
           __ beq(t0, tmp, ok);
           __ stop("assert(is an array klass)");
@@ -743,7 +743,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
         OopMap* map = save_live_registers(sasm);
         assert_cond(map != nullptr);
         int call_offset = 0;
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
         } else {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
@@ -762,7 +762,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_multi_array_id:
+    case C1StubId::new_multi_array_id:
       {
         StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
         // x10: klass
@@ -785,7 +785,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case register_finalizer_id:
+    case C1StubId::register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
 
@@ -819,14 +819,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_class_cast_exception_id:
+    case C1StubId::throw_class_cast_exception_id:
       {
         StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
       }
       break;
 
-    case throw_incompatible_class_change_error_id:
+    case C1StubId::throw_incompatible_class_change_error_id:
       {
         StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm,
@@ -834,7 +834,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case slow_subtype_check_id:
+    case C1StubId::slow_subtype_check_id:
       {
         // Typical calling sequence:
         // push klass_RInfo (object klass or other subclass)
@@ -874,10 +874,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorenter_nofpu_id:
+    case C1StubId::monitorenter_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorenter_id:
+    case C1StubId::monitorenter_id:
       {
         StubFrame f(sasm, "monitorenter", dont_gc_arguments);
         OopMap* map = save_live_registers(sasm, save_fpu_registers);
@@ -896,10 +896,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorexit_nofpu_id:
+    case C1StubId::monitorexit_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorexit_id:
+    case C1StubId::monitorexit_id:
       {
         StubFrame f(sasm, "monitorexit", dont_gc_arguments);
         OopMap* map = save_live_registers(sasm, save_fpu_registers);
@@ -920,7 +920,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case deoptimize_id:
+    case C1StubId::deoptimize_id:
       {
         StubFrame f(sasm, "deoptimize", dont_gc_arguments, does_not_return);
         OopMap* oop_map = save_live_registers(sasm);
@@ -939,14 +939,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_range_check_failed_id:
+    case C1StubId::throw_range_check_failed_id:
       {
         StubFrame f(sasm, "range_check_failed", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
       }
       break;
 
-    case unwind_exception_id:
+    case C1StubId::unwind_exception_id:
       {
         __ set_info("unwind_exception", dont_gc_arguments);
         // note: no stubframe since we are about to leave the current
@@ -955,7 +955,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case access_field_patching_id:
+    case C1StubId::access_field_patching_id:
       {
         StubFrame f(sasm, "access_field_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
@@ -963,7 +963,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case load_klass_patching_id:
+    case C1StubId::load_klass_patching_id:
       {
         StubFrame f(sasm, "load_klass_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
@@ -971,7 +971,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case load_mirror_patching_id:
+    case C1StubId::load_mirror_patching_id:
       {
         StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
@@ -979,7 +979,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case load_appendix_patching_id:
+    case C1StubId::load_appendix_patching_id:
       {
         StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments, does_not_return);
         // we should set up register map
@@ -987,29 +987,29 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       {
         StubFrame f(sasm, "handle_exception", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       {
         StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case throw_index_exception_id:
+    case C1StubId::throw_index_exception_id:
       {
         StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments, does_not_return);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
       }
       break;
 
-    case throw_array_store_exception_id:
+    case C1StubId::throw_array_store_exception_id:
       {
         StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments, does_not_return);
         // tos + 0: link
@@ -1018,7 +1018,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case predicate_failed_trap_id:
+    case C1StubId::predicate_failed_trap_id:
       {
         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments, does_not_return);
 
@@ -1038,7 +1038,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case dtrace_object_alloc_id:
+    case C1StubId::dtrace_object_alloc_id:
       { // c_rarg0: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
         save_live_registers(sasm);
diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
index b7f1d3605681a..e01e4458e38d3 100644
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
@@ -48,7 +48,7 @@ void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   if (_info->deoptimize_on_exception()) {
-    address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
+    address a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id);
     ce->emit_call_c(a);
     CHECK_BAILOUT();
     ce->add_call_info_here(_info);
@@ -64,11 +64,11 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
     __ load_const_optimized(Z_R1_scratch, _index->as_jint());
   }
 
-  Runtime1::StubID stub_id;
+  C1StubId stub_id;
   if (_throw_index_out_of_bounds_exception) {
-    stub_id = Runtime1::throw_index_exception_id;
+    stub_id = C1StubId::throw_index_exception_id;
   } else {
-    stub_id = Runtime1::throw_range_check_failed_id;
+    stub_id = C1StubId::throw_range_check_failed_id;
     __ lgr_if_needed(Z_R0_scratch, _array->as_pointer_register());
   }
   ce->emit_call_c(Runtime1::entry_for (stub_id));
@@ -84,7 +84,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 
 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
+  address a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id);
   ce->emit_call_c(a);
   CHECK_BAILOUT();
   ce->add_call_info_here(_info);
@@ -102,7 +102,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   }
   ce->store_parameter(/*_method->as_register()*/ Z_R1_scratch, 1);
   ce->store_parameter(_bci, 0);
-  ce->emit_call_c(Runtime1::entry_for (Runtime1::counter_overflow_id));
+  ce->emit_call_c(Runtime1::entry_for (C1StubId::counter_overflow_id));
   CHECK_BAILOUT();
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
@@ -114,7 +114,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   }
   __ bind(_entry);
-  ce->emit_call_c(Runtime1::entry_for (Runtime1::throw_div0_exception_id));
+  ce->emit_call_c(Runtime1::entry_for (C1StubId::throw_div0_exception_id));
   CHECK_BAILOUT();
   ce->add_call_info_here(_info);
   debug_only(__ should_not_reach_here());
@@ -124,9 +124,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   address a;
   if (_info->deoptimize_on_exception()) {
     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
-    a = Runtime1::entry_for (Runtime1::predicate_failed_trap_id);
+    a = Runtime1::entry_for (C1StubId::predicate_failed_trap_id);
   } else {
-    a = Runtime1::entry_for (Runtime1::throw_null_pointer_exception_id);
+    a = Runtime1::entry_for (C1StubId::throw_null_pointer_exception_id);
   }
 
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
@@ -151,14 +151,14 @@ void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
   debug_only(__ should_not_reach_here());
 }
 
-NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
   _result = result;
   _klass = klass;
   _klass_reg = klass_reg;
   _info = new CodeEmitInfo(info);
-  assert(stub_id == Runtime1::new_instance_id                 ||
-         stub_id == Runtime1::fast_new_instance_id            ||
-         stub_id == Runtime1::fast_new_instance_init_check_id,
+  assert(stub_id == C1StubId::new_instance_id                 ||
+         stub_id == C1StubId::fast_new_instance_id            ||
+         stub_id == C1StubId::fast_new_instance_init_check_id,
          "need new_instance id");
   _stub_id = stub_id;
 }
@@ -186,7 +186,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
   __ lgr_if_needed(Z_R13, _length->as_register());
-  address a = Runtime1::entry_for (Runtime1::new_type_array_id);
+  address a = Runtime1::entry_for (C1StubId::new_type_array_id);
   ce->emit_call_c(a);
   CHECK_BAILOUT();
   ce->add_call_info_here(_info);
@@ -206,7 +206,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_klass_reg->as_register() == Z_R11, "call target expects klass in Z_R11");
   __ lgr_if_needed(Z_R13, _length->as_register());
-  address a = Runtime1::entry_for (Runtime1::new_object_array_id);
+  address a = Runtime1::entry_for (C1StubId::new_object_array_id);
   ce->emit_call_c(a);
   CHECK_BAILOUT();
   ce->add_call_info_here(_info);
@@ -217,11 +217,11 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
 
 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  Runtime1::StubID enter_id;
+  C1StubId enter_id;
   if (ce->compilation()->has_fpu_code()) {
-    enter_id = Runtime1::monitorenter_id;
+    enter_id = C1StubId::monitorenter_id;
   } else {
-    enter_id = Runtime1::monitorenter_nofpu_id;
+    enter_id = C1StubId::monitorenter_nofpu_id;
   }
   __ lgr_if_needed(Z_R1_scratch, _obj_reg->as_register());
   __ lgr_if_needed(Z_R13, _lock_reg->as_register()); // See LIRGenerator::syncTempOpr().
@@ -242,11 +242,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
     __ lgr_if_needed(Z_R1_scratch, _lock_reg->as_register());
   }
   // Note: non-blocking leaf routine => no call info needed.
-  Runtime1::StubID exit_id;
+  C1StubId exit_id;
   if (ce->compilation()->has_fpu_code()) {
-    exit_id = Runtime1::monitorexit_id;
+    exit_id = C1StubId::monitorexit_id;
   } else {
-    exit_id = Runtime1::monitorexit_nofpu_id;
+    exit_id = C1StubId::monitorexit_nofpu_id;
   }
   ce->emit_call_c(Runtime1::entry_for (exit_id));
   CHECK_BAILOUT();
@@ -378,10 +378,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
   address target = nullptr;
   relocInfo::relocType reloc_type = relocInfo::none;
   switch (_id) {
-    case access_field_id:  target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
-    case load_klass_id:    target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
-    case load_mirror_id:   target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
-    case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
+    case access_field_id:  target = Runtime1::entry_for (C1StubId::access_field_patching_id); break;
+    case load_klass_id:    target = Runtime1::entry_for (C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
+    case load_mirror_id:   target = Runtime1::entry_for (C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id: target = Runtime1::entry_for (C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -406,7 +406,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   __ load_const_optimized(Z_R1_scratch, _trap_request); // Pass trap request in Z_R1_scratch.
-  ce->emit_call_c(Runtime1::entry_for (Runtime1::deoptimize_id));
+  ce->emit_call_c(Runtime1::entry_for (C1StubId::deoptimize_id));
   CHECK_BAILOUT();
   ce->add_call_info_here(_info);
   DEBUG_ONLY(__ should_not_reach_here());
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index a5e62169a9350..d288f4a893d0a 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -172,7 +172,7 @@ int LIR_Assembler::emit_exception_handler() {
 
   int offset = code_offset();
 
-  address a = Runtime1::entry_for (Runtime1::handle_exception_from_callee_id);
+  address a = Runtime1::entry_for (C1StubId::handle_exception_from_callee_id);
   address call_addr = emit_call_c(a);
   CHECK_BAILOUT_(-1);
   __ should_not_reach_here();
@@ -212,7 +212,7 @@ int LIR_Assembler::emit_unwind_handler() {
   // Perform needed unlocking.
   MonitorExitStub* stub = nullptr;
   if (method()->is_synchronized()) {
-    // Runtime1::monitorexit_id expects lock address in Z_R1_scratch.
+    // C1StubId::monitorexit_id expects lock address in Z_R1_scratch.
     LIR_Opr lock = FrameMap::as_opr(Z_R1_scratch);
     monitor_address(0, lock);
     stub = new MonitorExitStub(lock, true, 0);
@@ -241,7 +241,7 @@ int LIR_Assembler::emit_unwind_handler() {
   // Z_EXC_PC: exception pc
 
   // Dispatch to the unwind logic.
-  __ load_const_optimized(Z_R5, Runtime1::entry_for (Runtime1::unwind_exception_id));
+  __ load_const_optimized(Z_R5, Runtime1::entry_for (C1StubId::unwind_exception_id));
   __ z_br(Z_R5);
 
   // Emit the slow path assembly.
@@ -1910,8 +1910,8 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   // Reuse the debug info from the safepoint poll for the throw op itself.
   __ get_PC(Z_EXC_PC);
   add_call_info(__ offset(), info); // for exception handler
-  address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? Runtime1::handle_exception_id
-                                                                    : Runtime1::handle_exception_nofpu_id);
+  address stub = Runtime1::entry_for (compilation()->has_fpu_code() ? C1StubId::handle_exception_id
+                                                                    : C1StubId::handle_exception_nofpu_id);
   emit_call_c(stub);
 }
 
@@ -2116,7 +2116,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
 
       store_parameter(src_klass, 0); // sub
       store_parameter(dst_klass, 1); // super
-      emit_call_c(Runtime1::entry_for (Runtime1::slow_subtype_check_id));
+      emit_call_c(Runtime1::entry_for (C1StubId::slow_subtype_check_id));
       CHECK_BAILOUT2(cont, slow);
       // Sets condition code 0 for match (2 otherwise).
       __ branch_optimized(Assembler::bcondEqual, cont);
@@ -2539,7 +2539,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
                                      RegisterOrConstant(super_check_offset));
     if (need_slow_path) {
       // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
-      address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id);
+      address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id);
       store_parameter(klass_RInfo, 0); // sub
       store_parameter(k_RInfo, 1);     // super
       emit_call_c(a); // Sets condition code 0 for match (2 otherwise).
@@ -2614,7 +2614,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
     // Perform the fast part of the checking logic.
     __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, nullptr);
     // Call out-of-line instance of __ check_klass_subtype_slow_path(...):
-    address a = Runtime1::entry_for (Runtime1::slow_subtype_check_id);
+    address a = Runtime1::entry_for (C1StubId::slow_subtype_check_id);
     store_parameter(klass_RInfo, 0); // sub
     store_parameter(k_RInfo, 1);     // super
     emit_call_c(a); // Sets condition code 0 for match (2 otherwise).
diff --git a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp
index 619f0f7174f01..f998e86256f56 100644
--- a/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRGenerator_s390.cpp
@@ -885,7 +885,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   args->append(rank);
   args->append(varargs);
   LIR_Opr reg = result_register_for (x->type());
-  __ call_runtime(Runtime1::entry_for (Runtime1::new_multi_array_id),
+  __ call_runtime(Runtime1::entry_for (C1StubId::new_multi_array_id),
                   LIR_OprFact::illegalOpr,
                   reg, args, info);
 
@@ -916,14 +916,14 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == nullptr, "can't patch this");
-    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
   } else if (x->is_invokespecial_receiver_check()) {
     assert(patching_info == nullptr, "can't patch this");
     stub = new DeoptimizeStub(info_for_exception,
                               Deoptimization::Reason_class_check,
                               Deoptimization::Action_none);
   } else {
-    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp1 = new_register(objectType);
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
index a9140a7925ebd..f3fa19ddb31e0 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
@@ -254,7 +254,7 @@ void C1_MacroAssembler::initialize_object(
   // Dtrace support is unimplemented.
   //  if (CURRENT_ENV->dtrace_alloc_probes()) {
   //    assert(obj == rax, "must be");
-  //    call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
+  //    call(RuntimeAddress(Runtime1::entry_for (C1StubId::dtrace_object_alloc_id)));
   //  }
 
   verify_oop(obj, FILE_AND_LINE);
@@ -315,7 +315,7 @@ void C1_MacroAssembler::allocate_array(
   // Dtrace support is unimplemented.
   // if (CURRENT_ENV->dtrace_alloc_probes()) {
   //   assert(obj == rax, "must be");
-  //   call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
+  //   call(RuntimeAddress(Runtime1::entry_for (C1StubId::dtrace_object_alloc_id)));
   // }
 
   verify_oop(obj, FILE_AND_LINE);
diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
index 41c57043d8234..2f629c108c956 100644
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
@@ -98,10 +98,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
       restore_return_pc();
       load_const_optimized(Z_R1, StubRoutines::forward_exception_entry());
       z_br(Z_R1);
-    } else if (_stub_id == Runtime1::forward_exception_id) {
+    } else if (_stub_id == (int)C1StubId::forward_exception_id) {
       should_not_reach_here();
     } else {
-      load_const_optimized(Z_R1, Runtime1::entry_for (Runtime1::forward_exception_id));
+      load_const_optimized(Z_R1, Runtime1::entry_for (C1StubId::forward_exception_id));
       z_br(Z_R1);
     }
 
@@ -305,7 +305,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
   return oop_maps;
 }
 
-OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
 
   // for better readability
   const bool must_gc_arguments = true;
@@ -318,26 +318,26 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   // Stub code and info for the different stubs.
   OopMapSet* oop_maps = nullptr;
   switch (id) {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       {
         oop_maps = generate_handle_exception(id, sasm);
         // will not return
       }
       break;
 
-    case new_instance_id:
-    case fast_new_instance_id:
-    case fast_new_instance_init_check_id:
+    case C1StubId::new_instance_id:
+    case C1StubId::fast_new_instance_id:
+    case C1StubId::fast_new_instance_init_check_id:
       {
         Register klass    = Z_R11; // Incoming
         Register obj      = Z_R2;  // Result
 
-        if (id == new_instance_id) {
+        if (id == C1StubId::new_instance_id) {
           __ set_info("new_instance", dont_gc_arguments);
-        } else if (id == fast_new_instance_id) {
+        } else if (id == C1StubId::fast_new_instance_id) {
           __ set_info("fast new_instance", dont_gc_arguments);
         } else {
-          assert(id == fast_new_instance_init_check_id, "bad StubID");
+          assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
           __ set_info("fast new_instance init check", dont_gc_arguments);
         }
 
@@ -352,7 +352,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case counter_overflow_id:
+    case C1StubId::counter_overflow_id:
       {
         // Arguments :
         //   bci    : stack param 0
@@ -371,14 +371,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
         __ z_br(Z_R14);
       }
       break;
-    case new_type_array_id:
-    case new_object_array_id:
+    case C1StubId::new_type_array_id:
+    case C1StubId::new_object_array_id:
       {
         Register length   = Z_R13; // Incoming
         Register klass    = Z_R11; // Incoming
         Register obj      = Z_R2;  // Result
 
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           __ set_info("new_type_array", dont_gc_arguments);
         } else {
           __ set_info("new_object_array", dont_gc_arguments);
@@ -391,7 +391,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
           Register t0 = obj;
           __ mem2reg_opt(t0, Address(klass, Klass::layout_helper_offset()), false);
           __ z_sra(t0, Klass::_lh_array_tag_shift);
-          int tag = ((id == new_type_array_id)
+          int tag = ((id == C1StubId::new_type_array_id)
                      ? Klass::_lh_array_tag_type_value
                      : Klass::_lh_array_tag_obj_value);
           __ compare32_and_branch(t0, tag, Assembler::bcondEqual, ok);
@@ -403,7 +403,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
         OopMap* map = save_live_registers_except_r2(sasm);
         int call_offset;
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
         } else {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
@@ -418,7 +418,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_multi_array_id:
+    case C1StubId::new_multi_array_id:
       { __ set_info("new_multi_array", dont_gc_arguments);
         // Z_R3,: klass
         // Z_R4,: rank
@@ -436,7 +436,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case register_finalizer_id:
+    case C1StubId::register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
 
@@ -459,62 +459,62 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_range_check_failed_id:
+    case C1StubId::throw_range_check_failed_id:
       { __ set_info("range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
       }
       break;
 
-    case throw_index_exception_id:
+    case C1StubId::throw_index_exception_id:
       { __ set_info("index_range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
       }
       break;
-    case throw_div0_exception_id:
+    case C1StubId::throw_div0_exception_id:
       { __ set_info("throw_div0_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
       }
       break;
-    case throw_null_pointer_exception_id:
+    case C1StubId::throw_null_pointer_exception_id:
       { __ set_info("throw_null_pointer_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
       }
       break;
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       { __ set_info("handle_exception", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       { __ set_info("handle_exception_from_callee", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
-    case unwind_exception_id:
+    case C1StubId::unwind_exception_id:
       { __ set_info("unwind_exception", dont_gc_arguments);
         // Note: no stubframe since we are about to leave the current
         // activation and we are calling a leaf VM function only.
         generate_unwind_exception(sasm);
       }
       break;
-    case throw_array_store_exception_id:
+    case C1StubId::throw_array_store_exception_id:
       { __ set_info("throw_array_store_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true);
       }
       break;
-    case throw_class_cast_exception_id:
+    case C1StubId::throw_class_cast_exception_id:
     { // Z_R1_scratch: object
       __ set_info("throw_class_cast_exception", dont_gc_arguments);
       oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
     }
     break;
-    case throw_incompatible_class_change_error_id:
+    case C1StubId::throw_incompatible_class_change_error_id:
       { __ set_info("throw_incompatible_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
       }
       break;
-    case slow_subtype_check_id:
+    case C1StubId::slow_subtype_check_id:
     {
       // Arguments :
       //   sub  : stack param 0
@@ -580,13 +580,13 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       __ z_br(Z_R14);
     }
     break;
-    case monitorenter_nofpu_id:
-    case monitorenter_id:
+    case C1StubId::monitorenter_nofpu_id:
+    case C1StubId::monitorenter_id:
       { // Z_R1_scratch : object
         // Z_R13       : lock address (see LIRGenerator::syncTempOpr())
         __ set_info("monitorenter", dont_gc_arguments);
 
-        int save_fpu_registers = (id == monitorenter_id);
+        int save_fpu_registers = (id == C1StubId::monitorenter_id);
         // Make a frame and preserve the caller's caller-save registers.
         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 
@@ -600,15 +600,15 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorexit_nofpu_id:
-    case monitorexit_id:
+    case C1StubId::monitorexit_nofpu_id:
+    case C1StubId::monitorexit_id:
       { // Z_R1_scratch : lock address
         // Note: really a leaf routine but must setup last java sp
         //   => Use call_RT for now (speed can be improved by
         //      doing last java sp setup manually).
         __ set_info("monitorexit", dont_gc_arguments);
 
-        int save_fpu_registers = (id == monitorexit_id);
+        int save_fpu_registers = (id == C1StubId::monitorexit_id);
         // Make a frame and preserve the caller's caller-save registers.
         OopMap* oop_map = save_live_registers(sasm, save_fpu_registers);
 
@@ -622,7 +622,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case deoptimize_id:
+    case C1StubId::deoptimize_id:
       { // Args: Z_R1_scratch: trap request
         __ set_info("deoptimize", dont_gc_arguments);
         Register trap_request = Z_R1_scratch;
@@ -639,32 +639,32 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case access_field_patching_id:
+    case C1StubId::access_field_patching_id:
       { __ set_info("access_field_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
       }
       break;
 
-    case load_klass_patching_id:
+    case C1StubId::load_klass_patching_id:
       { __ set_info("load_klass_patching", dont_gc_arguments);
         // We should set up register map.
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
       }
       break;
 
-    case load_mirror_patching_id:
+    case C1StubId::load_mirror_patching_id:
       { __ set_info("load_mirror_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
       }
       break;
 
-    case load_appendix_patching_id:
+    case C1StubId::load_appendix_patching_id:
       { __ set_info("load_appendix_patching", dont_gc_arguments);
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
       }
       break;
 #if 0
-    case dtrace_object_alloc_id:
+    case C1StubId::dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
         // We can't gc here so skip the oopmap but make sure that all
@@ -679,7 +679,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case fpu2long_stub_id:
+    case C1StubId::fpu2long_stub_id:
       {
         // rax, and rdx are destroyed, but should be free since the result is returned there
         // preserve rsi,ecx
@@ -754,7 +754,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       break;
 #endif // TODO
 
-    case predicate_failed_trap_id:
+    case C1StubId::predicate_failed_trap_id:
       {
         __ set_info("predicate_failed_trap", dont_gc_arguments);
 
@@ -775,14 +775,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
     default:
       {
-        __ should_not_reach_here(FILE_AND_LINE, id);
+        __ should_not_reach_here(FILE_AND_LINE, (int)id);
       }
       break;
   }
   return oop_maps;
 }
 
-OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
+OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) {
   __ block_comment("generate_handle_exception");
 
   // incoming parameters: Z_EXC_OOP, Z_EXC_PC
@@ -793,7 +793,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   Register reg_fp = Z_R1_scratch;
 
   switch (id) {
-    case forward_exception_id: {
+    case C1StubId::forward_exception_id: {
       // We're handling an exception in the context of a compiled frame.
       // The registers have been saved in the standard places. Perform
       // an exception lookup in the caller and dispatch to the handler
@@ -820,13 +820,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
       __ clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(Metadata*));
       break;
     }
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       // At this point all registers MAY be live.
       DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
-      oop_map = save_live_registers(sasm, id != handle_exception_nofpu_id, Z_EXC_PC);
+      oop_map = save_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id, Z_EXC_PC);
       break;
-    case handle_exception_from_callee_id: {
+    case C1StubId::handle_exception_from_callee_id: {
       // At this point all registers except Z_EXC_OOP and Z_EXC_PC are dead.
       DEBUG_ONLY(__ z_lgr(reg_fp, Z_SP);)
       __ save_return_pc(Z_EXC_PC);
@@ -875,15 +875,15 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   __ invalidate_registers(Z_R2);
 
   switch(id) {
-    case forward_exception_id:
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::forward_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       // Restore the registers that were saved at the beginning.
       __ z_lgr(Z_R1_scratch, Z_R2);   // Restoring live registers kills Z_R2.
-      restore_live_registers(sasm, id != handle_exception_nofpu_id);  // Pops as well the frame.
+      restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);  // Pops as well the frame.
       __ z_br(Z_R1_scratch);
       break;
-    case handle_exception_from_callee_id: {
+    case C1StubId::handle_exception_from_callee_id: {
       __ pop_frame();
       __ z_br(Z_R2); // Jump to exception handler.
     }
diff --git a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp
index 7d89b148ba22f..71ca9351f86c9 100644
--- a/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_CodeStubs_x86.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -110,7 +110,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
   Metadata *m = _method->as_constant_ptr()->as_metadata();
   ce->store_parameter(m, 1);
   ce->store_parameter(_bci, 0);
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
+  __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::counter_overflow_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   __ jmp(_continuation);
@@ -119,7 +119,7 @@ void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   if (_info->deoptimize_on_exception()) {
-    address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
     __ call(RuntimeAddress(a));
     ce->add_call_info_here(_info);
     ce->verify_oop_map(_info);
@@ -133,11 +133,11 @@ void RangeCheckStub::emit_code(LIR_Assembler* ce) {
   } else {
     ce->store_parameter(_index->as_jint(), 0);
   }
-  Runtime1::StubID stub_id;
+  C1StubId stub_id;
   if (_throw_index_out_of_bounds_exception) {
-    stub_id = Runtime1::throw_index_exception_id;
+    stub_id = C1StubId::throw_index_exception_id;
   } else {
-    stub_id = Runtime1::throw_range_check_failed_id;
+    stub_id = C1StubId::throw_range_check_failed_id;
     ce->store_parameter(_array->as_pointer_register(), 1);
   }
   __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
@@ -152,7 +152,7 @@ PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
 
 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
-  address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+  address a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   __ call(RuntimeAddress(a));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
@@ -164,7 +164,7 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
     ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
   }
   __ bind(_entry);
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
+  __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::throw_div0_exception_id)));
   ce->add_call_info_here(_info);
   debug_only(__ should_not_reach_here());
 }
@@ -172,14 +172,14 @@ void DivByZeroStub::emit_code(LIR_Assembler* ce) {
 
 // Implementation of NewInstanceStub
 
-NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
+NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id) {
   _result = result;
   _klass = klass;
   _klass_reg = klass_reg;
   _info = new CodeEmitInfo(info);
-  assert(stub_id == Runtime1::new_instance_id                 ||
-         stub_id == Runtime1::fast_new_instance_id            ||
-         stub_id == Runtime1::fast_new_instance_init_check_id,
+  assert(stub_id == C1StubId::new_instance_id                 ||
+         stub_id == C1StubId::fast_new_instance_id            ||
+         stub_id == C1StubId::fast_new_instance_init_check_id,
          "need new_instance id");
   _stub_id   = stub_id;
 }
@@ -212,7 +212,7 @@ void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_length->as_register() == rbx, "length must in rbx,");
   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
+  __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_type_array_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   assert(_result->as_register() == rax, "result must in rax,");
@@ -235,7 +235,7 @@ void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   assert(_length->as_register() == rbx, "length must in rbx,");
   assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
+  __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::new_object_array_id)));
   ce->add_call_info_here(_info);
   ce->verify_oop_map(_info);
   assert(_result->as_register() == rax, "result must in rax,");
@@ -247,11 +247,11 @@ void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_obj_reg->as_register(),  1);
   ce->store_parameter(_lock_reg->as_register(), 0);
-  Runtime1::StubID enter_id;
+  C1StubId enter_id;
   if (ce->compilation()->has_fpu_code()) {
-    enter_id = Runtime1::monitorenter_id;
+    enter_id = C1StubId::monitorenter_id;
   } else {
-    enter_id = Runtime1::monitorenter_nofpu_id;
+    enter_id = C1StubId::monitorenter_nofpu_id;
   }
   __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
   ce->add_call_info_here(_info);
@@ -268,11 +268,11 @@ void MonitorExitStub::emit_code(LIR_Assembler* ce) {
   }
   ce->store_parameter(_lock_reg->as_register(), 0);
   // note: non-blocking leaf routine => no call info needed
-  Runtime1::StubID exit_id;
+  C1StubId exit_id;
   if (ce->compilation()->has_fpu_code()) {
-    exit_id = Runtime1::monitorexit_id;
+    exit_id = C1StubId::monitorexit_id;
   } else {
-    exit_id = Runtime1::monitorexit_nofpu_id;
+    exit_id = C1StubId::monitorexit_nofpu_id;
   }
   __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
   __ jmp(_continuation);
@@ -407,10 +407,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
   address target = nullptr;
   relocInfo::relocType reloc_type = relocInfo::none;
   switch (_id) {
-    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
-    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
-    case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
-    case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
+    case access_field_id:  target = Runtime1::entry_for(C1StubId::access_field_patching_id); break;
+    case load_klass_id:    target = Runtime1::entry_for(C1StubId::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
+    case load_mirror_id:   target = Runtime1::entry_for(C1StubId::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
+    case load_appendix_id:      target = Runtime1::entry_for(C1StubId::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
     default: ShouldNotReachHere();
   }
   __ bind(call_patch);
@@ -440,7 +440,7 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
   __ bind(_entry);
   ce->store_parameter(_trap_request, 0);
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
+  __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::deoptimize_id)));
   ce->add_call_info_here(_info);
   DEBUG_ONLY(__ should_not_reach_here());
 }
@@ -450,9 +450,9 @@ void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
   address a;
   if (_info->deoptimize_on_exception()) {
     // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
-    a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
+    a = Runtime1::entry_for(C1StubId::predicate_failed_trap_id);
   } else {
-    a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
+    a = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
   }
 
   ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index e2fde10b98d86..c3444d5a5abce 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -399,7 +399,7 @@ int LIR_Assembler::emit_exception_handler() {
   __ verify_not_null_oop(rax);
 
   // search an exception handler (rax: exception oop, rdx: throwing pc)
-  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id)));
+  __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::handle_exception_from_callee_id)));
   __ should_not_reach_here();
   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
   __ end_a_stub();
@@ -463,7 +463,7 @@ int LIR_Assembler::emit_unwind_handler() {
 
   // remove the activation and dispatch to the unwind handler
   __ remove_frame(initial_frame_size_in_bytes());
-  __ jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
+  __ jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
 
   // Emit the slow path assembly
   if (stub != nullptr) {
@@ -1566,7 +1566,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
 
       // instruction sequence too long to inline it here
       {
-        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::fpu2long_stub_id)));
+        __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::fpu2long_stub_id)));
       }
       break;
 #endif // _LP64
@@ -1781,7 +1781,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
 #else
         __ pushklass(k->constant_encoding(), noreg);
 #endif // _LP64
-        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+        __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
         __ pop(klass_RInfo);
         __ pop(klass_RInfo);
         // result is a boolean
@@ -1795,7 +1795,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
       // call out-of-line instance of __ check_klass_subtype_slow_path(...):
       __ push(klass_RInfo);
       __ push(k_RInfo);
-      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+      __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
       __ pop(klass_RInfo);
       __ pop(k_RInfo);
       // result is a boolean
@@ -1874,7 +1874,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
     // call out-of-line instance of __ check_klass_subtype_slow_path(...):
     __ push(klass_RInfo);
     __ push(k_RInfo);
-    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+    __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
     __ pop(klass_RInfo);
     __ pop(k_RInfo);
     // result is a boolean
@@ -2893,7 +2893,7 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   // exception object is not added to oop map by LinearScan
   // (LinearScan assumes that no oops are in fixed registers)
   info->add_register_oop(exceptionOop);
-  Runtime1::StubID unwind_id;
+  C1StubId unwind_id;
 
   // get current pc information
   // pc is only needed if the method has an exception handler, the unwind code does not need it.
@@ -2905,9 +2905,9 @@ void LIR_Assembler::throw_op(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmit
   __ verify_not_null_oop(rax);
   // search an exception handler (rax: exception oop, rdx: throwing pc)
   if (compilation()->has_fpu_code()) {
-    unwind_id = Runtime1::handle_exception_id;
+    unwind_id = C1StubId::handle_exception_id;
   } else {
-    unwind_id = Runtime1::handle_exception_nofpu_id;
+    unwind_id = C1StubId::handle_exception_nofpu_id;
   }
   __ call(RuntimeAddress(Runtime1::entry_for(unwind_id)));
 
@@ -3262,7 +3262,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
 
       __ push(src);
       __ push(dst);
-      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
+      __ call(RuntimeAddress(Runtime1::entry_for(C1StubId::slow_subtype_check_id)));
       __ pop(dst);
       __ pop(src);
 
diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
index d3add6975b4f2..ff237d16d2216 100644
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
@@ -1430,7 +1430,7 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
   args->append(rank);
   args->append(varargs);
   LIR_Opr reg = result_register_for(x->type());
-  __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
+  __ call_runtime(Runtime1::entry_for(C1StubId::new_multi_array_id),
                   LIR_OprFact::illegalOpr,
                   reg, args, info);
 
@@ -1463,12 +1463,12 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
   CodeStub* stub;
   if (x->is_incompatible_class_change_check()) {
     assert(patching_info == nullptr, "can't patch this");
-    stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
   } else if (x->is_invokespecial_receiver_check()) {
     assert(patching_info == nullptr, "can't patch this");
     stub = new DeoptimizeStub(info_for_exception, Deoptimization::Reason_class_check, Deoptimization::Action_none);
   } else {
-    stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
+    stub = new SimpleExceptionStub(C1StubId::throw_class_cast_exception_id, obj.result(), info_for_exception);
   }
   LIR_Opr reg = rlock_result(x);
   LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
index 4dcacd00a6339..bf5b90db5fcb0 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
@@ -271,7 +271,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
 
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == rax, "must be");
-    call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
+    call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
   }
 
   verify_oop(obj);
@@ -309,7 +309,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
 
   if (CURRENT_ENV->dtrace_alloc_probes()) {
     assert(obj == rax, "must be");
-    call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
+    call(RuntimeAddress(Runtime1::entry_for(C1StubId::dtrace_object_alloc_id)));
   }
 
   verify_oop(obj);
diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
index 11b39ce15eb1a..1ccb06df48937 100644
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
@@ -60,7 +60,7 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
 #ifdef _LP64
   // At a method handle call, the stack may not be properly aligned
   // when returning with an exception.
-  align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id);
+  align_stack = (stub_id() == (int)C1StubId::handle_exception_from_callee_id);
 #endif
 
 #ifdef _LP64
@@ -124,10 +124,10 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result, addre
     if (frame_size() == no_frame_size) {
       leave();
       jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
-    } else if (_stub_id == Runtime1::forward_exception_id) {
+    } else if (_stub_id == (int)C1StubId::forward_exception_id) {
       should_not_reach_here();
     } else {
-      jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
+      jump(RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id)));
     }
     bind(L);
   }
@@ -671,7 +671,7 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
 }
 
 
-OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
+OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler *sasm) {
   __ block_comment("generate_handle_exception");
 
   // incoming parameters
@@ -684,7 +684,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   OopMapSet* oop_maps = new OopMapSet();
   OopMap* oop_map = nullptr;
   switch (id) {
-  case forward_exception_id:
+  case C1StubId::forward_exception_id:
     // We're handling an exception in the context of a compiled frame.
     // The registers have been saved in the standard places.  Perform
     // an exception lookup in the caller and dispatch to the handler
@@ -703,12 +703,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
     __ movptr(Address(thread, JavaThread::vm_result_offset()),   NULL_WORD);
     __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
     break;
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
     // At this point all registers MAY be live.
-    oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
+    oop_map = save_live_registers(sasm, 1 /*thread*/, id != C1StubId::handle_exception_nofpu_id);
     break;
-  case handle_exception_from_callee_id: {
+  case C1StubId::handle_exception_from_callee_id: {
     // At this point all registers except exception oop (RAX) and
     // exception pc (RDX) are dead.
     const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord);
@@ -775,13 +775,13 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
   __ movptr(Address(rbp, 1*BytesPerWord), rax);
 
   switch (id) {
-  case forward_exception_id:
-  case handle_exception_nofpu_id:
-  case handle_exception_id:
+  case C1StubId::forward_exception_id:
+  case C1StubId::handle_exception_nofpu_id:
+  case C1StubId::handle_exception_id:
     // Restore the registers that were saved at the beginning.
-    restore_live_registers(sasm, id != handle_exception_nofpu_id);
+    restore_live_registers(sasm, id != C1StubId::handle_exception_nofpu_id);
     break;
-  case handle_exception_from_callee_id:
+  case C1StubId::handle_exception_from_callee_id:
     // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
     // since we do a leave anyway.
 
@@ -935,7 +935,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 
     __ testptr(rax, rax);                                   // have we deoptimized?
     __ jump_cc(Assembler::equal,
-               RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
+               RuntimeAddress(Runtime1::entry_for(C1StubId::forward_exception_id)));
 
     // the deopt blob expects exceptions in the special fields of
     // JavaThread, so copy and clear pending exception.
@@ -1007,7 +1007,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
 }
 
 
-OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
+OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
 
   // for better readability
   const bool must_gc_arguments = true;
@@ -1019,7 +1019,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
   // stub code & info for the different stubs
   OopMapSet* oop_maps = nullptr;
   switch (id) {
-    case forward_exception_id:
+    case C1StubId::forward_exception_id:
       {
         oop_maps = generate_handle_exception(id, sasm);
         __ leave();
@@ -1027,19 +1027,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_instance_id:
-    case fast_new_instance_id:
-    case fast_new_instance_init_check_id:
+    case C1StubId::new_instance_id:
+    case C1StubId::fast_new_instance_id:
+    case C1StubId::fast_new_instance_init_check_id:
       {
         Register klass = rdx; // Incoming
         Register obj   = rax; // Result
 
-        if (id == new_instance_id) {
+        if (id == C1StubId::new_instance_id) {
           __ set_info("new_instance", dont_gc_arguments);
-        } else if (id == fast_new_instance_id) {
+        } else if (id == C1StubId::fast_new_instance_id) {
           __ set_info("fast new_instance", dont_gc_arguments);
         } else {
-          assert(id == fast_new_instance_init_check_id, "bad StubID");
+          assert(id == C1StubId::fast_new_instance_init_check_id, "bad C1StubId");
           __ set_info("fast new_instance init check", dont_gc_arguments);
         }
 
@@ -1058,7 +1058,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
 
       break;
 
-    case counter_overflow_id:
+    case C1StubId::counter_overflow_id:
       {
         Register bci = rax, method = rbx;
         __ enter();
@@ -1076,14 +1076,14 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_type_array_id:
-    case new_object_array_id:
+    case C1StubId::new_type_array_id:
+    case C1StubId::new_object_array_id:
       {
         Register length   = rbx; // Incoming
         Register klass    = rdx; // Incoming
         Register obj      = rax; // Result
 
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           __ set_info("new_type_array", dont_gc_arguments);
         } else {
           __ set_info("new_object_array", dont_gc_arguments);
@@ -1096,7 +1096,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
           Register t0 = obj;
           __ movl(t0, Address(klass, Klass::layout_helper_offset()));
           __ sarl(t0, Klass::_lh_array_tag_shift);
-          int tag = ((id == new_type_array_id)
+          int tag = ((id == C1StubId::new_type_array_id)
                      ? Klass::_lh_array_tag_type_value
                      : Klass::_lh_array_tag_obj_value);
           __ cmpl(t0, tag);
@@ -1110,7 +1110,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
         __ enter();
         OopMap* map = save_live_registers(sasm, 3);
         int call_offset;
-        if (id == new_type_array_id) {
+        if (id == C1StubId::new_type_array_id) {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length);
         } else {
           call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length);
@@ -1128,7 +1128,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case new_multi_array_id:
+    case C1StubId::new_multi_array_id:
       { StubFrame f(sasm, "new_multi_array", dont_gc_arguments);
         // rax,: klass
         // rbx,: rank
@@ -1145,7 +1145,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case register_finalizer_id:
+    case C1StubId::register_finalizer_id:
       {
         __ set_info("register_finalizer", dont_gc_arguments);
 
@@ -1185,44 +1185,44 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_range_check_failed_id:
+    case C1StubId::throw_range_check_failed_id:
       { StubFrame f(sasm, "range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true);
       }
       break;
 
-    case throw_index_exception_id:
+    case C1StubId::throw_index_exception_id:
       { StubFrame f(sasm, "index_range_check_failed", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true);
       }
       break;
 
-    case throw_div0_exception_id:
+    case C1StubId::throw_div0_exception_id:
       { StubFrame f(sasm, "throw_div0_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false);
       }
       break;
 
-    case throw_null_pointer_exception_id:
+    case C1StubId::throw_null_pointer_exception_id:
       { StubFrame f(sasm, "throw_null_pointer_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false);
       }
       break;
 
-    case handle_exception_nofpu_id:
-    case handle_exception_id:
+    case C1StubId::handle_exception_nofpu_id:
+    case C1StubId::handle_exception_id:
       { StubFrame f(sasm, "handle_exception", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case handle_exception_from_callee_id:
+    case C1StubId::handle_exception_from_callee_id:
       { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments);
         oop_maps = generate_handle_exception(id, sasm);
       }
       break;
 
-    case unwind_exception_id:
+    case C1StubId::unwind_exception_id:
       { __ set_info("unwind_exception", dont_gc_arguments);
         // note: no stubframe since we are about to leave the current
         //       activation and we are calling a leaf VM function only.
@@ -1230,7 +1230,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_array_store_exception_id:
+    case C1StubId::throw_array_store_exception_id:
       { StubFrame f(sasm, "throw_array_store_exception", dont_gc_arguments);
         // tos + 0: link
         //     + 1: return address
@@ -1238,19 +1238,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case throw_class_cast_exception_id:
+    case C1StubId::throw_class_cast_exception_id:
       { StubFrame f(sasm, "throw_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true);
       }
       break;
 
-    case throw_incompatible_class_change_error_id:
+    case C1StubId::throw_incompatible_class_change_error_id:
       { StubFrame f(sasm, "throw_incompatible_class_cast_exception", dont_gc_arguments);
         oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false);
       }
       break;
 
-    case slow_subtype_check_id:
+    case C1StubId::slow_subtype_check_id:
       {
         // Typical calling sequence:
         // __ push(klass_RInfo);  // object klass or other subclass
@@ -1303,10 +1303,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorenter_nofpu_id:
+    case C1StubId::monitorenter_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorenter_id:
+    case C1StubId::monitorenter_id:
       {
         StubFrame f(sasm, "monitorenter", dont_gc_arguments);
         OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
@@ -1324,10 +1324,10 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case monitorexit_nofpu_id:
+    case C1StubId::monitorexit_nofpu_id:
       save_fpu_registers = false;
       // fall through
-    case monitorexit_id:
+    case C1StubId::monitorexit_id:
       {
         StubFrame f(sasm, "monitorexit", dont_gc_arguments);
         OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
@@ -1347,7 +1347,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case deoptimize_id:
+    case C1StubId::deoptimize_id:
       {
         StubFrame f(sasm, "deoptimize", dont_gc_arguments);
         const int num_rt_args = 2;  // thread, trap_request
@@ -1364,35 +1364,35 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case access_field_patching_id:
+    case C1StubId::access_field_patching_id:
       { StubFrame f(sasm, "access_field_patching", dont_gc_arguments);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching));
       }
       break;
 
-    case load_klass_patching_id:
+    case C1StubId::load_klass_patching_id:
       { StubFrame f(sasm, "load_klass_patching", dont_gc_arguments);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching));
       }
       break;
 
-    case load_mirror_patching_id:
+    case C1StubId::load_mirror_patching_id:
       { StubFrame f(sasm, "load_mirror_patching", dont_gc_arguments);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching));
       }
       break;
 
-    case load_appendix_patching_id:
+    case C1StubId::load_appendix_patching_id:
       { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
         // we should set up register map
         oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
       }
       break;
 
-    case dtrace_object_alloc_id:
+    case C1StubId::dtrace_object_alloc_id:
       { // rax,: object
         StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
         // we can't gc here so skip the oopmap but make sure that all
@@ -1407,7 +1407,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case fpu2long_stub_id:
+    case C1StubId::fpu2long_stub_id:
       {
 #ifdef _LP64
         Label done;
@@ -1496,7 +1496,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
       }
       break;
 
-    case predicate_failed_trap_id:
+    case C1StubId::predicate_failed_trap_id:
       {
         StubFrame f(sasm, "predicate_failed_trap", dont_gc_arguments);
 
diff --git a/src/hotspot/share/c1/c1_CodeStubs.hpp b/src/hotspot/share/c1/c1_CodeStubs.hpp
index 04e379842e152..9abfa45785bda 100644
--- a/src/hotspot/share/c1/c1_CodeStubs.hpp
+++ b/src/hotspot/share/c1/c1_CodeStubs.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -264,10 +264,10 @@ class NewInstanceStub: public CodeStub {
   LIR_Opr          _klass_reg;
   LIR_Opr          _result;
   CodeEmitInfo*    _info;
-  Runtime1::StubID _stub_id;
+  C1StubId         _stub_id;
 
  public:
-  NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id);
+  NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, C1StubId stub_id);
   virtual void emit_code(LIR_Assembler* e);
   virtual CodeEmitInfo* info() const             { return _info; }
   virtual void visit(LIR_OpVisitState* visitor) {
@@ -515,11 +515,11 @@ class DeoptimizeStub : public CodeStub {
 class SimpleExceptionStub: public CodeStub {
  private:
   LIR_Opr          _obj;
-  Runtime1::StubID _stub;
+  C1StubId         _stub;
   CodeEmitInfo*    _info;
 
  public:
-  SimpleExceptionStub(Runtime1::StubID stub, LIR_Opr obj, CodeEmitInfo* info):
+  SimpleExceptionStub(C1StubId stub, LIR_Opr obj, CodeEmitInfo* info):
     _obj(obj), _stub(stub), _info(info) {
     FrameMap* f = Compilation::current()->frame_map();
     f->update_reserved_argument_area_size(2 * BytesPerWord);
@@ -546,7 +546,7 @@ class SimpleExceptionStub: public CodeStub {
 
 class ArrayStoreExceptionStub: public SimpleExceptionStub {
  public:
-  ArrayStoreExceptionStub(LIR_Opr obj, CodeEmitInfo* info): SimpleExceptionStub(Runtime1::throw_array_store_exception_id, obj, info) {}
+  ArrayStoreExceptionStub(LIR_Opr obj, CodeEmitInfo* info): SimpleExceptionStub(C1StubId::throw_array_store_exception_id, obj, info) {}
 #ifndef PRODUCT
   virtual void print_name(outputStream* out) const { out->print("ArrayStoreExceptionStub"); }
 #endif // PRODUCT
diff --git a/src/hotspot/share/c1/c1_LIRGenerator.cpp b/src/hotspot/share/c1/c1_LIRGenerator.cpp
index 7b519804bfecd..4e63736503fe0 100644
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp
@@ -659,7 +659,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr
   if (UseFastNewInstance && klass->is_loaded()
       && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
 
-    Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
+    C1StubId stub_id = klass->is_initialized() ? C1StubId::fast_new_instance_id : C1StubId::fast_new_instance_init_check_id;
 
     CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
 
@@ -670,7 +670,7 @@ void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unr
     __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
                        oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
   } else {
-    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
+    CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, C1StubId::new_instance_id);
     __ branch(lir_cond_always, slow_path);
     __ branch_destination(slow_path->continuation());
   }
@@ -1479,7 +1479,7 @@ void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
   args->append(receiver.result());
   CodeEmitInfo* info = state_for(x, x->state());
   call_runtime(&signature, args,
-               CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
+               CAST_FROM_FN_PTR(address, Runtime1::entry_for(C1StubId::register_finalizer_id)),
                voidType, info);
 
   set_no_result(x);
diff --git a/src/hotspot/share/c1/c1_Runtime1.cpp b/src/hotspot/share/c1/c1_Runtime1.cpp
index 8524f37177b45..5b44d5c0f1983 100644
--- a/src/hotspot/share/c1/c1_Runtime1.cpp
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -109,10 +109,13 @@ void StubAssembler::set_num_rt_args(int args) {
 
 // Implementation of Runtime1
 
-CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
+CodeBlob* Runtime1::_blobs[(int)C1StubId::NUM_STUBIDS];
+
+#define C1_BLOB_NAME_DEFINE(name)  "C1 Runtime " # name "_blob",
 const char *Runtime1::_blob_names[] = {
-  RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
+  C1_STUBS_DO(C1_BLOB_NAME_DEFINE)
 };
+#undef C1_STUB_NAME_DEFINE
 
 #ifndef PRODUCT
 // statistics
@@ -190,17 +193,17 @@ static void deopt_caller(JavaThread* current) {
   }
 }
 
-class StubIDStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
+class C1StubIdStubAssemblerCodeGenClosure: public StubAssemblerCodeGenClosure {
  private:
-  Runtime1::StubID _id;
+  C1StubId _id;
  public:
-  StubIDStubAssemblerCodeGenClosure(Runtime1::StubID id) : _id(id) {}
+  C1StubIdStubAssemblerCodeGenClosure(C1StubId id) : _id(id) {}
   virtual OopMapSet* generate_code(StubAssembler* sasm) {
     return Runtime1::generate_code_for(_id, sasm);
   }
 };
 
-CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
+CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure* cl) {
   ResourceMark rm;
   // create code buffer for code storage
   CodeBuffer code(buffer_blob);
@@ -212,7 +215,7 @@ CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const ch
   Compilation::setup_code_buffer(&code, 0);
 
   // create assembler for code generation
-  StubAssembler* sasm = new StubAssembler(&code, name, stub_id);
+  StubAssembler* sasm = new StubAssembler(&code, name, (int)id);
   // generate code for runtime stub
   oop_maps = cl->generate_code(sasm);
   assert(oop_maps == nullptr || sasm->frame_size() != no_frame_size,
@@ -237,40 +240,41 @@ CodeBlob* Runtime1::generate_blob(BufferBlob* buffer_blob, int stub_id, const ch
   return blob;
 }
 
-void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
-  assert(0 <= id && id < number_of_ids, "illegal stub id");
+void Runtime1::generate_blob_for(BufferBlob* buffer_blob, C1StubId id) {
+  assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id");
   bool expect_oop_map = true;
 #ifdef ASSERT
   // Make sure that stubs that need oopmaps have them
   switch (id) {
     // These stubs don't need to have an oopmap
-  case dtrace_object_alloc_id:
-  case slow_subtype_check_id:
-  case fpu2long_stub_id:
-  case unwind_exception_id:
-  case counter_overflow_id:
+  case C1StubId::dtrace_object_alloc_id:
+  case C1StubId::slow_subtype_check_id:
+  case C1StubId::fpu2long_stub_id:
+  case C1StubId::unwind_exception_id:
+  case C1StubId::counter_overflow_id:
     expect_oop_map = false;
     break;
   default:
     break;
   }
 #endif
-  StubIDStubAssemblerCodeGenClosure cl(id);
+  C1StubIdStubAssemblerCodeGenClosure cl(id);
   CodeBlob* blob = generate_blob(buffer_blob, id, name_for(id), expect_oop_map, &cl);
   // install blob
-  _blobs[id] = blob;
+  _blobs[(int)id] = blob;
 }
 
 void Runtime1::initialize(BufferBlob* blob) {
   // platform-dependent initialization
   initialize_pd();
   // generate stubs
-  for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
+  int limit = (int)C1StubId::NUM_STUBIDS;
+  for (int id = 0; id < limit; id++) generate_blob_for(blob, (C1StubId)id);
   // printing
 #ifndef PRODUCT
   if (PrintSimpleStubs) {
     ResourceMark rm;
-    for (int id = 0; id < number_of_ids; id++) {
+    for (int id = 0; id < limit; id++) {
       _blobs[id]->print();
       if (_blobs[id]->oop_maps() != nullptr) {
         _blobs[id]->oop_maps()->print();
@@ -282,20 +286,22 @@ void Runtime1::initialize(BufferBlob* blob) {
   bs->generate_c1_runtime_stubs(blob);
 }
 
-CodeBlob* Runtime1::blob_for(StubID id) {
-  assert(0 <= id && id < number_of_ids, "illegal stub id");
-  return _blobs[id];
+CodeBlob* Runtime1::blob_for(C1StubId id) {
+  assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id");
+  return _blobs[(int)id];
 }
 
 
-const char* Runtime1::name_for(StubID id) {
-  assert(0 <= id && id < number_of_ids, "illegal stub id");
-  return _blob_names[id];
+const char* Runtime1::name_for(C1StubId id) {
+  assert(C1StubId::NO_STUBID < id && id < C1StubId::NUM_STUBIDS, "illegal stub id");
+  return _blob_names[(int)id];
 }
 
 const char* Runtime1::name_for_address(address entry) {
-  for (int id = 0; id < number_of_ids; id++) {
-    if (entry == entry_for((StubID)id)) return name_for((StubID)id);
+  int limit = (int)C1StubId::NUM_STUBIDS;
+  for (int i = 0; i < limit; i++) {
+    C1StubId id = (C1StubId)i;
+    if (entry == entry_for(id)) return name_for(id);
   }
 
 #define FUNCTION_CASE(a, f) \
@@ -425,8 +431,8 @@ JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* current, Klass* klass, int
 JRT_END
 
 
-JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, StubID id))
-  tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
+JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* current, C1StubId id))
+  tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", (int)id);
 JRT_END
 
 
@@ -525,8 +531,8 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* c
   // This function is called when we are about to throw an exception. Therefore,
   // we have to poll the stack watermark barrier to make sure that not yet safe
   // stack frames are made safe before returning into them.
-  if (current->last_frame().cb() == Runtime1::blob_for(Runtime1::handle_exception_from_callee_id)) {
-    // The Runtime1::handle_exception_from_callee_id handler is invoked after the
+  if (current->last_frame().cb() == Runtime1::blob_for(C1StubId::handle_exception_from_callee_id)) {
+    // The C1StubId::handle_exception_from_callee_id handler is invoked after the
     // frame has been unwound. It instead builds its own stub frame, to call the
     // runtime. But the throwing frame has already been unwound here.
     StackWatermarkSet::after_unwind(current);
@@ -922,7 +928,7 @@ static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TR
 // Therefore, if there is any chance of a race condition, we try to
 // patch only naturally aligned words, as single, full-word writes.
 
-JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id ))
+JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
 #ifndef PRODUCT
   if (PrintC1Statistics) {
     _patch_code_slowcase_cnt++;
@@ -959,9 +965,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
   Handle mirror(current, nullptr); // oop needed by load_mirror_patching code
   Handle appendix(current, nullptr); // oop needed by appendix_patching code
   bool load_klass_or_mirror_patch_id =
-    (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
+    (stub_id == C1StubId::load_klass_patching_id || stub_id == C1StubId::load_mirror_patching_id);
 
-  if (stub_id == Runtime1::access_field_patching_id) {
+  if (stub_id == C1StubId::access_field_patching_id) {
 
     Bytecode_field field_access(caller_method, bci);
     fieldDescriptor result; // initialize class if needed
@@ -1044,7 +1050,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
     }
     load_klass = k;
-  } else if (stub_id == load_appendix_patching_id) {
+  } else if (stub_id == C1StubId::load_appendix_patching_id) {
     Bytecode_invoke bytecode(caller_method, bci);
     Bytecodes::Code bc = bytecode.invoke_code();
 
@@ -1128,7 +1134,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
         if (TracePatching) {
           ttyLocker ttyl;
           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
-                        p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
+                        p2i(instr_pc), (stub_id == C1StubId::access_field_patching_id) ? "field" : "klass");
           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
           assert(caller_code != nullptr, "nmethod not found");
 
@@ -1144,7 +1150,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
         }
         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
         bool do_patch = true;
-        if (stub_id == Runtime1::access_field_patching_id) {
+        if (stub_id == C1StubId::access_field_patching_id) {
           // The offset may not be correct if the class was not loaded at code generation time.
           // Set it now.
           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
@@ -1170,7 +1176,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
             assert(n_copy->data() == 0 ||
                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
                    "illegal init value");
-            if (stub_id == Runtime1::load_klass_patching_id) {
+            if (stub_id == C1StubId::load_klass_patching_id) {
               assert(load_klass != nullptr, "klass not set");
               n_copy->set_data((intx) (load_klass));
             } else {
@@ -1182,7 +1188,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
             }
           }
-        } else if (stub_id == Runtime1::load_appendix_patching_id) {
+        } else if (stub_id == C1StubId::load_appendix_patching_id) {
           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
           assert(n_copy->data() == 0 ||
                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
@@ -1201,7 +1207,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
           // first replace the tail, then the call
 #ifdef ARM
           if((load_klass_or_mirror_patch_id ||
-              stub_id == Runtime1::load_appendix_patching_id) &&
+              stub_id == C1StubId::load_appendix_patching_id) &&
               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
             nmethod* nm = CodeCache::find_nmethod(instr_pc);
             address addr = nullptr;
@@ -1209,13 +1215,13 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
             RelocIterator mds(nm, copy_buff, copy_buff + 1);
             while (mds.next()) {
               if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id ||
-                       stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
+                assert(stub_id == C1StubId::load_mirror_patching_id ||
+                       stub_id == C1StubId::load_appendix_patching_id, "wrong stub id");
                 oop_Relocation* r = mds.oop_reloc();
                 addr = (address)r->oop_addr();
                 break;
               } else if (mds.type() == relocInfo::metadata_type) {
-                assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
+                assert(stub_id == C1StubId::load_klass_patching_id, "wrong stub id");
                 metadata_Relocation* r = mds.metadata_reloc();
                 addr = (address)r->metadata_addr();
                 break;
@@ -1238,9 +1244,9 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
 
           if (load_klass_or_mirror_patch_id ||
-              stub_id == Runtime1::load_appendix_patching_id) {
+              stub_id == C1StubId::load_appendix_patching_id) {
             relocInfo::relocType rtype =
-              (stub_id == Runtime1::load_klass_patching_id) ?
+              (stub_id == C1StubId::load_klass_patching_id) ?
                                    relocInfo::metadata_type :
                                    relocInfo::oop_type;
             // update relocInfo to metadata
@@ -1278,9 +1284,9 @@ JRT_END
 
 #else // DEOPTIMIZE_WHEN_PATCHING
 
-static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) {
-  if (stub_id == Runtime1::load_klass_patching_id ||
-      stub_id == Runtime1::load_mirror_patching_id) {
+static bool is_patching_needed(JavaThread* current, C1StubId stub_id) {
+  if (stub_id == C1StubId::load_klass_patching_id ||
+      stub_id == C1StubId::load_mirror_patching_id) {
     // last java frame on stack
     vframeStream vfst(current, true);
     assert(!vfst.at_end(), "Java frame must exist");
@@ -1309,7 +1315,7 @@ static bool is_patching_needed(JavaThread* current, Runtime1::StubID stub_id) {
   return true;
 }
 
-void Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_id) {
+void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
 #ifndef PRODUCT
   if (PrintC1Statistics) {
     _patch_code_slowcase_cnt++;
@@ -1364,7 +1370,7 @@ int Runtime1::move_klass_patching(JavaThread* current) {
   {
     // Enter VM mode
     ResetNoHandleMark rnhm;
-    patch_code(current, load_klass_patching_id);
+    patch_code(current, C1StubId::load_klass_patching_id);
   }
   // Back in JAVA, use no oops DON'T safepoint
 
@@ -1381,7 +1387,7 @@ int Runtime1::move_mirror_patching(JavaThread* current) {
   {
     // Enter VM mode
     ResetNoHandleMark rnhm;
-    patch_code(current, load_mirror_patching_id);
+    patch_code(current, C1StubId::load_mirror_patching_id);
   }
   // Back in JAVA, use no oops DON'T safepoint
 
@@ -1398,7 +1404,7 @@ int Runtime1::move_appendix_patching(JavaThread* current) {
   {
     // Enter VM mode
     ResetNoHandleMark rnhm;
-    patch_code(current, load_appendix_patching_id);
+    patch_code(current, C1StubId::load_appendix_patching_id);
   }
   // Back in JAVA, use no oops DON'T safepoint
 
@@ -1425,7 +1431,7 @@ int Runtime1::access_field_patching(JavaThread* current) {
   {
     // Enter VM mode
     ResetNoHandleMark rnhm;
-    patch_code(current, access_field_patching_id);
+    patch_code(current, C1StubId::access_field_patching_id);
   }
   // Back in JAVA, use no oops DON'T safepoint
 
diff --git a/src/hotspot/share/c1/c1_Runtime1.hpp b/src/hotspot/share/c1/c1_Runtime1.hpp
index 2e4c9f8a73379..330c40675041b 100644
--- a/src/hotspot/share/c1/c1_Runtime1.hpp
+++ b/src/hotspot/share/c1/c1_Runtime1.hpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 #include "interpreter/interpreter.hpp"
 #include "memory/allStatic.hpp"
 #include "runtime/deoptimization.hpp"
+#include "runtime/stubDeclarations.hpp"
 
 class StubAssembler;
 
@@ -37,61 +38,26 @@ class StubAssembler;
 // runtime routines needed by code code generated
 // by the Compiler1.
 
-#define RUNTIME1_STUBS(stub, last_entry) \
-  stub(dtrace_object_alloc)          \
-  stub(unwind_exception)             \
-  stub(forward_exception)            \
-  stub(throw_range_check_failed)       /* throws ArrayIndexOutOfBoundsException */ \
-  stub(throw_index_exception)          /* throws IndexOutOfBoundsException */ \
-  stub(throw_div0_exception)         \
-  stub(throw_null_pointer_exception) \
-  stub(register_finalizer)           \
-  stub(new_instance)                 \
-  stub(fast_new_instance)            \
-  stub(fast_new_instance_init_check) \
-  stub(new_type_array)               \
-  stub(new_object_array)             \
-  stub(new_multi_array)              \
-  stub(handle_exception_nofpu)         /* optimized version that does not preserve fpu registers */ \
-  stub(handle_exception)             \
-  stub(handle_exception_from_callee) \
-  stub(throw_array_store_exception)  \
-  stub(throw_class_cast_exception)   \
-  stub(throw_incompatible_class_change_error)   \
-  stub(slow_subtype_check)           \
-  stub(monitorenter)                 \
-  stub(monitorenter_nofpu)             /* optimized version that does not preserve fpu registers */ \
-  stub(monitorexit)                  \
-  stub(monitorexit_nofpu)              /* optimized version that does not preserve fpu registers */ \
-  stub(deoptimize)                   \
-  stub(access_field_patching)        \
-  stub(load_klass_patching)          \
-  stub(load_mirror_patching)         \
-  stub(load_appendix_patching)       \
-  stub(fpu2long_stub)                \
-  stub(counter_overflow)             \
-  stub(predicate_failed_trap)        \
-  last_entry(number_of_ids)
-
-#define DECLARE_STUB_ID(x)       x ## _id ,
-#define DECLARE_LAST_STUB_ID(x)  x
-#define STUB_NAME(x)             #x " Runtime1 stub",
-#define LAST_STUB_NAME(x)        #x " Runtime1 stub"
-
 class StubAssemblerCodeGenClosure: public Closure {
  public:
   virtual OopMapSet* generate_code(StubAssembler* sasm) = 0;
 };
 
+// define C1StubId enum tags: unwind_exception_id etc
+
+#define C1_STUB_ID_ENUM_DECLARE(name) STUB_ID_NAME(name),
+enum class C1StubId :int {
+  NO_STUBID = -1,
+  C1_STUBS_DO(C1_STUB_ID_ENUM_DECLARE)
+  NUM_STUBIDS
+};
+#undef C1_STUB_ID_ENUM_DECLARE
+
 class Runtime1: public AllStatic {
   friend class VMStructs;
   friend class ArrayCopyStub;
 
- public:
-  enum StubID {
-    RUNTIME1_STUBS(DECLARE_STUB_ID, DECLARE_LAST_STUB_ID)
-  };
-
+public:
   // statistics
 #ifndef PRODUCT
   static uint _generic_arraycopystub_cnt;
@@ -115,17 +81,17 @@ class Runtime1: public AllStatic {
 #endif
 
  private:
-  static CodeBlob* _blobs[number_of_ids];
+  static CodeBlob* _blobs[(int)C1StubId::NUM_STUBIDS];
   static const char* _blob_names[];
 
   // stub generation
  public:
-  static CodeBlob*  generate_blob(BufferBlob* buffer_blob, int stub_id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl);
-  static void       generate_blob_for(BufferBlob* blob, StubID id);
-  static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm);
+  static CodeBlob*  generate_blob(BufferBlob* buffer_blob, C1StubId id, const char* name, bool expect_oop_map, StubAssemblerCodeGenClosure *cl);
+  static void       generate_blob_for(BufferBlob* blob, C1StubId id);
+  static OopMapSet* generate_code_for(C1StubId id, StubAssembler* sasm);
  private:
   static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument);
-  static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm);
+  static OopMapSet* generate_handle_exception(C1StubId id, StubAssembler* sasm);
   static void       generate_unwind_exception(StubAssembler *sasm);
   static OopMapSet* generate_patching(StubAssembler* sasm, address target);
 
@@ -140,7 +106,7 @@ class Runtime1: public AllStatic {
 
   static address counter_overflow(JavaThread* current, int bci, Method* method);
 
-  static void unimplemented_entry(JavaThread* current, StubID id);
+  static void unimplemented_entry(JavaThread* current, C1StubId id);
 
   static address exception_handler_for_pc(JavaThread* current);
 
@@ -162,7 +128,7 @@ class Runtime1: public AllStatic {
   static int move_mirror_patching(JavaThread* current);
   static int move_appendix_patching(JavaThread* current);
 
-  static void patch_code(JavaThread* current, StubID stub_id);
+  static void patch_code(JavaThread* current, C1StubId stub_id);
 
  public:
   // initialization
@@ -170,9 +136,9 @@ class Runtime1: public AllStatic {
   static void initialize_pd();
 
   // stubs
-  static CodeBlob* blob_for (StubID id);
-  static address   entry_for(StubID id)          { return blob_for(id)->code_begin(); }
-  static const char* name_for (StubID id);
+  static CodeBlob* blob_for (C1StubId id);
+  static address   entry_for(C1StubId id)          { return blob_for(id)->code_begin(); }
+  static const char* name_for (C1StubId id);
   static const char* name_for_address(address entry);
 
   // platform might add runtime names.
diff --git a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp
index f82b5cfcc556a..449ff2e4acf8b 100644
--- a/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp
+++ b/src/hotspot/share/gc/g1/c1/g1BarrierSetC1.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -219,8 +219,8 @@ class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure {
 void G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
   C1G1PreBarrierCodeGenClosure pre_code_gen_cl;
   C1G1PostBarrierCodeGenClosure post_code_gen_cl;
-  _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_pre_barrier_slow",
+  _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_pre_barrier_slow",
                                                               false, &pre_code_gen_cl);
-  _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_post_barrier_slow",
+  _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID, "g1_post_barrier_slow",
                                                                false, &post_code_gen_cl);
 }
diff --git a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
index 6b104e7e19935..0e8b02d247e9b 100644
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, 2022, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -263,27 +263,27 @@ class C1ShenandoahLoadReferenceBarrierCodeGenClosure : public StubAssemblerCodeG
 
 void ShenandoahBarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) {
   C1ShenandoahPreBarrierCodeGenClosure pre_code_gen_cl;
-  _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1,
+  _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
                                                               "shenandoah_pre_barrier_slow",
                                                               false, &pre_code_gen_cl);
   if (ShenandoahLoadRefBarrier) {
     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_code_gen_cl(ON_STRONG_OOP_REF);
-    _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
+    _load_reference_barrier_strong_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
                                                                   "shenandoah_load_reference_barrier_strong_slow",
                                                                   false, &lrb_strong_code_gen_cl);
 
     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_strong_native_code_gen_cl(ON_STRONG_OOP_REF | IN_NATIVE);
-    _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
+    _load_reference_barrier_strong_native_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
                                                                           "shenandoah_load_reference_barrier_strong_native_slow",
                                                                           false, &lrb_strong_native_code_gen_cl);
 
     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_weak_code_gen_cl(ON_WEAK_OOP_REF);
-    _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
+    _load_reference_barrier_weak_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
                                                                           "shenandoah_load_reference_barrier_weak_slow",
                                                                           false, &lrb_weak_code_gen_cl);
 
     C1ShenandoahLoadReferenceBarrierCodeGenClosure lrb_phantom_code_gen_cl(ON_PHANTOM_OOP_REF | IN_NATIVE);
-    _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, -1,
+    _load_reference_barrier_phantom_rt_code_blob = Runtime1::generate_blob(buffer_blob, C1StubId::NO_STUBID,
                                                                            "shenandoah_load_reference_barrier_phantom_slow",
                                                                            false, &lrb_phantom_code_gen_cl);
   }
diff --git a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp
index caf7eb2e514e9..6f64392cefced 100644
--- a/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/x/c1/xBarrierSetC1.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -225,7 +225,7 @@ class XLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure
 
 static address generate_c1_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
   XLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
-  CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+  CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl);
   return code_blob->code_begin();
 }
 
diff --git a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
index 68e215e9ebca0..9c16714b26e3f 100644
--- a/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
+++ b/src/hotspot/share/gc/z/c1/zBarrierSetC1.cpp
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -507,7 +507,7 @@ class ZLoadBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosure
 
 static address generate_c1_load_runtime_stub(BufferBlob* blob, DecoratorSet decorators, const char* name) {
   ZLoadBarrierRuntimeStubCodeGenClosure cl(decorators);
-  CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+  CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl);
   return code_blob->code_begin();
 }
 
@@ -527,7 +527,7 @@ class ZStoreBarrierRuntimeStubCodeGenClosure : public StubAssemblerCodeGenClosur
 
 static address generate_c1_store_runtime_stub(BufferBlob* blob, bool self_healing, const char* name) {
   ZStoreBarrierRuntimeStubCodeGenClosure cl(self_healing);
-  CodeBlob* const code_blob = Runtime1::generate_blob(blob, -1 /* stub_id */, name, false /* expect_oop_map*/, &cl);
+  CodeBlob* const code_blob = Runtime1::generate_blob(blob, C1StubId::NO_STUBID /* stub_id */, name, false /* expect_oop_map*/, &cl);
   return code_blob->code_begin();
 }
 
diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp
index eb6887f11baf6..6ab28eaa6eeba 100644
--- a/src/hotspot/share/opto/escape.cpp
+++ b/src/hotspot/share/opto/escape.cpp
@@ -2043,7 +2043,7 @@ void ConnectionGraph::add_call_node(CallNode* call) {
     ciMethod* meth = call->as_CallJava()->method();
     if (meth == nullptr) {
       const char* name = call->as_CallStaticJava()->_name;
-      assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check");
+      assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");
       // Returns a newly allocated non-escaped object.
       add_java_object(call, PointsToNode::NoEscape);
       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
@@ -2738,7 +2738,7 @@ int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
 #ifdef ASSERT
   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
     const char* name = alloc->as_CallStaticJava()->_name;
-    assert(strncmp(name, "_multianewarray", 15) == 0, "sanity");
+    assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");
   }
 #endif
   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
diff --git a/src/hotspot/share/opto/runtime.cpp b/src/hotspot/share/opto/runtime.cpp
index 54408146d0c26..3dd94f619fd1a 100644
--- a/src/hotspot/share/opto/runtime.cpp
+++ b/src/hotspot/share/opto/runtime.cpp
@@ -88,34 +88,27 @@
 // At command line specify the parameters: -XX:+FullGCALot -XX:FullGCALotStart=100000000
 
 
-
-
-// Compiled code entry points
-address OptoRuntime::_new_instance_Java                           = nullptr;
-address OptoRuntime::_new_array_Java                              = nullptr;
-address OptoRuntime::_new_array_nozero_Java                       = nullptr;
-address OptoRuntime::_multianewarray2_Java                        = nullptr;
-address OptoRuntime::_multianewarray3_Java                        = nullptr;
-address OptoRuntime::_multianewarray4_Java                        = nullptr;
-address OptoRuntime::_multianewarray5_Java                        = nullptr;
-address OptoRuntime::_multianewarrayN_Java                        = nullptr;
-address OptoRuntime::_vtable_must_compile_Java                    = nullptr;
-address OptoRuntime::_complete_monitor_locking_Java               = nullptr;
-address OptoRuntime::_monitor_notify_Java                         = nullptr;
-address OptoRuntime::_monitor_notifyAll_Java                      = nullptr;
-address OptoRuntime::_rethrow_Java                                = nullptr;
-
-address OptoRuntime::_slow_arraycopy_Java                         = nullptr;
-address OptoRuntime::_register_finalizer_Java                     = nullptr;
-#if INCLUDE_JVMTI
-address OptoRuntime::_notify_jvmti_vthread_start                  = nullptr;
-address OptoRuntime::_notify_jvmti_vthread_end                    = nullptr;
-address OptoRuntime::_notify_jvmti_vthread_mount                  = nullptr;
-address OptoRuntime::_notify_jvmti_vthread_unmount                = nullptr;
-#endif
-
-UncommonTrapBlob*   OptoRuntime::_uncommon_trap_blob;
-ExceptionBlob*      OptoRuntime::_exception_blob;
+#define C2_BLOB_FIELD_DEFINE(name, type) \
+  type OptoRuntime:: BLOB_FIELD_NAME(name)  = nullptr;
+#define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
+#define C2_STUB_FIELD_DEFINE(name, f, t, r) \
+  address OptoRuntime:: C2_STUB_FIELD_NAME(name) = nullptr;
+#define C2_JVMTI_STUB_FIELD_DEFINE(name) \
+  address OptoRuntime:: STUB_FIELD_NAME(name) = nullptr;
+C2_STUBS_DO(C2_BLOB_FIELD_DEFINE, C2_STUB_FIELD_DEFINE, C2_JVMTI_STUB_FIELD_DEFINE)
+#undef C2_BLOB_FIELD_DEFINE
+#undef C2_STUB_FIELD_DEFINE
+#undef C2_JVMTI_STUB_FIELD_DEFINE
+
+#define C2_BLOB_NAME_DEFINE(name, type)  "C2 Runtime " # name "_blob",
+#define C2_STUB_NAME_DEFINE(name, f, t, r)  "C2 Runtime " # name,
+#define C2_JVMTI_STUB_NAME_DEFINE(name)  "C2 Runtime " # name,
+const char* OptoRuntime::_stub_names[] = {
+  C2_STUBS_DO(C2_BLOB_NAME_DEFINE, C2_STUB_NAME_DEFINE, C2_JVMTI_STUB_NAME_DEFINE)
+};
+#undef C2_BLOB_NAME_DEFINE
+#undef C2_STUB_NAME_DEFINE
+#undef C2_JVMTI_STUB_NAME_DEFINE
 
 // This should be called in an assertion at the start of OptoRuntime routines
 // which are entered from compiled code (all of them)
@@ -132,46 +125,72 @@ static bool check_compiled_frame(JavaThread* thread) {
 }
 #endif // ASSERT
 
-
+/*
 #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, return_pc) \
   var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, return_pc); \
   if (var == nullptr) { return false; }
+*/
 
-bool OptoRuntime::generate(ciEnv* env) {
+#define GEN_C2_BLOB(name, type)                    \
+  generate_ ## name ## _blob();
+
+// a few helper macros to conjure up generate_stub call arguments
+#define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
+#define C2_STUB_TYPEFUNC(name) name ## _Type
+#define C2_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, name ## _C)
+#define C2_STUB_NAME(name) stub_name(OptoStubId::name ## _id)
+
+// Almost all the C functions targeted from the generated stubs are
+// implemented locally to OptoRuntime with names that can be generated
+// from the stub name by appending suffix '_C'. However, in two cases
+// a common target method also needs to be called from shared runtime
+// stubs. In these two cases the opto stubs rely on method
+// imlementations defined in class SharedRuntime. The following
+// defines temporarily rebind the generated names to reference the
+// relevant implementations.
+
+#define GEN_C2_STUB(name, fancy_jump, pass_tls, pass_retpc  )         \
+  C2_STUB_FIELD_NAME(name) =                                          \
+    generate_stub(env,                                                  \
+                  C2_STUB_TYPEFUNC(name),                             \
+                  C2_STUB_C_FUNC(name),                               \
+                  C2_STUB_NAME(name),                                 \
+                  fancy_jump,                                           \
+                  pass_tls,                                             \
+                  pass_retpc);                                          \
+  if (C2_STUB_FIELD_NAME(name) == nullptr) { return false; }          \
+
+#define C2_JVMTI_STUB_C_FUNC(name) CAST_FROM_FN_PTR(address, SharedRuntime::name)
+
+#define GEN_C2_JVMTI_STUB(name)                                       \
+  STUB_FIELD_NAME(name) =                                               \
+    generate_stub(env,                                                  \
+                  notify_jvmti_vthread_Type,                            \
+                  C2_JVMTI_STUB_C_FUNC(name),                         \
+                  C2_STUB_NAME(name),                                 \
+                  0,                                                    \
+                  true,                                                 \
+                  false);                                               \
+  if (STUB_FIELD_NAME(name) == nullptr) { return false; }               \
 
-  generate_uncommon_trap_blob();
-  generate_exception_blob();
-
-  // Note: tls: Means fetching the return oop out of the thread-local storage
-  //
-  //   variable/name                       type-function-gen              , runtime method                  ,fncy_jp, tls,retpc
-  // -------------------------------------------------------------------------------------------------------------------------------
-  gen(env, _new_instance_Java              , new_instance_Type            , new_instance_C                  ,    0 , true, false);
-  gen(env, _new_array_Java                 , new_array_Type               , new_array_C                     ,    0 , true, false);
-  gen(env, _new_array_nozero_Java          , new_array_Type               , new_array_nozero_C              ,    0 , true, false);
-  gen(env, _multianewarray2_Java           , multianewarray2_Type         , multianewarray2_C               ,    0 , true, false);
-  gen(env, _multianewarray3_Java           , multianewarray3_Type         , multianewarray3_C               ,    0 , true, false);
-  gen(env, _multianewarray4_Java           , multianewarray4_Type         , multianewarray4_C               ,    0 , true, false);
-  gen(env, _multianewarray5_Java           , multianewarray5_Type         , multianewarray5_C               ,    0 , true, false);
-  gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true, false);
-#if INCLUDE_JVMTI
-  gen(env, _notify_jvmti_vthread_start     , notify_jvmti_vthread_Type    , SharedRuntime::notify_jvmti_vthread_start, 0, true, false);
-  gen(env, _notify_jvmti_vthread_end       , notify_jvmti_vthread_Type    , SharedRuntime::notify_jvmti_vthread_end,   0, true, false);
-  gen(env, _notify_jvmti_vthread_mount     , notify_jvmti_vthread_Type    , SharedRuntime::notify_jvmti_vthread_mount, 0, true, false);
-  gen(env, _notify_jvmti_vthread_unmount   , notify_jvmti_vthread_Type    , SharedRuntime::notify_jvmti_vthread_unmount, 0, true, false);
-#endif
-  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false);
-  gen(env, _monitor_notify_Java            , monitor_notify_Type          , monitor_notify_C                ,    0 , false, false);
-  gen(env, _monitor_notifyAll_Java         , monitor_notify_Type          , monitor_notifyAll_C             ,    0 , false, false);
-  gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , true );
+bool OptoRuntime::generate(ciEnv* env) {
 
-  gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false);
-  gen(env, _register_finalizer_Java        , register_finalizer_Type      , register_finalizer              ,    0 , false, false);
+  C2_STUBS_DO(GEN_C2_BLOB, GEN_C2_STUB, GEN_C2_JVMTI_STUB)
 
   return true;
 }
 
-#undef gen
+#undef GEN_C2_BLOB
+
+#undef C2_STUB_FIELD_NAME
+#undef C2_STUB_TYPEFUNC
+#undef C2_STUB_C_FUNC
+#undef C2_STUB_NAME
+#undef GEN_C2_STUB
+
+#undef C2_JVMTI_STUB_C_FUNC
+#undef GEN_C2_JVMTI_STUB
+// #undef gen
 
 
 // Helper method to do generation of RunTimeStub's
@@ -201,6 +220,19 @@ const char* OptoRuntime::stub_name(address entry) {
 #endif
 }
 
+// local methods passed as arguments to stub generator that forward
+// control to corresponding JRT methods of SharedRuntime
+
+void OptoRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
+                                   oopDesc* dest, jint dest_pos,
+                                   jint length, JavaThread* thread) {
+  SharedRuntime::slow_arraycopy_C(src,  src_pos, dest, dest_pos, length, thread);
+}
+
+void OptoRuntime::complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current) {
+  SharedRuntime::complete_monitor_locking_C(obj, lock, current);
+}
+
 
 //=============================================================================
 // Opto compiler runtime routines
@@ -529,6 +561,10 @@ const TypeFunc *OptoRuntime::new_array_Type() {
   return TypeFunc::make(domain, range);
 }
 
+const TypeFunc *OptoRuntime::new_array_nozero_Type() {
+  return new_array_Type();
+}
+
 const TypeFunc *OptoRuntime::multianewarray_Type(int ndim) {
   // create input type (domain)
   const int nargs = ndim + 1;
@@ -607,6 +643,9 @@ const TypeFunc *OptoRuntime::complete_monitor_enter_Type() {
   return TypeFunc::make(domain,range);
 }
 
+const TypeFunc *OptoRuntime::complete_monitor_locking_Type() {
+  return complete_monitor_enter_Type();
+}
 
 //-----------------------------------------------------------------------------
 const TypeFunc *OptoRuntime::complete_monitor_exit_Type() {
@@ -637,6 +676,10 @@ const TypeFunc *OptoRuntime::monitor_notify_Type() {
   return TypeFunc::make(domain, range);
 }
 
+const TypeFunc *OptoRuntime::monitor_notifyAll_Type() {
+  return monitor_notify_Type();
+}
+
 const TypeFunc* OptoRuntime::flush_windows_Type() {
   // create input type (domain)
   const Type** fields = TypeTuple::fields(1);
@@ -1827,7 +1870,7 @@ const TypeFunc *OptoRuntime::dtrace_object_alloc_Type() {
 }
 
 
-JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer(oopDesc* obj, JavaThread* current))
+JRT_ENTRY_NO_ASYNC(void, OptoRuntime::register_finalizer_C(oopDesc* obj, JavaThread* current))
   assert(oopDesc::is_oop(obj), "must be a valid oop");
   assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
   InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
diff --git a/src/hotspot/share/opto/runtime.hpp b/src/hotspot/share/opto/runtime.hpp
index 34c2780a2f8d7..da08fc0f38660 100644
--- a/src/hotspot/share/opto/runtime.hpp
+++ b/src/hotspot/share/opto/runtime.hpp
@@ -30,6 +30,7 @@
 #include "opto/optoreg.hpp"
 #include "opto/type.hpp"
 #include "runtime/deoptimization.hpp"
+#include "runtime/stubDeclarations.hpp"
 #include "runtime/vframe.hpp"
 
 //------------------------------OptoRuntime------------------------------------
@@ -98,37 +99,46 @@ class NamedCounter : public CHeapObj<mtCompiler> {
 
 typedef const TypeFunc*(*TypeFunc_generator)();
 
+// define OptoStubId enum tags: uncommon_trap_id etc
+
+#define C2_BLOB_ID_ENUM_DECLARE(name, type) STUB_ID_NAME(name),
+#define C2_STUB_ID_ENUM_DECLARE(name, f, t, r) STUB_ID_NAME(name),
+#define C2_JVMTI_STUB_ID_ENUM_DECLARE(name) STUB_ID_NAME(name),
+enum class OptoStubId :int {
+  NO_STUBID = -1,
+  C2_STUBS_DO(C2_BLOB_ID_ENUM_DECLARE, C2_STUB_ID_ENUM_DECLARE, C2_JVMTI_STUB_ID_ENUM_DECLARE)
+  NUM_STUBIDS
+};
+#undef C2_BLOB_ID_ENUM_DECLARE
+#undef C2_STUB_ID_ENUM_DECLARE
+#undef C2_JVMTI_STUB_ID_ENUM_DECLARE
+
 class OptoRuntime : public AllStatic {
   friend class Matcher;  // allow access to stub names
 
  private:
+  // declare opto stub address/blob holder static fields
+#define C2_BLOB_FIELD_DECLARE(name, type) \
+  static type        BLOB_FIELD_NAME(name);
+#define C2_STUB_FIELD_NAME(name) _ ## name ## _Java
+#define C2_STUB_FIELD_DECLARE(name, f, t, r) \
+  static address     C2_STUB_FIELD_NAME(name) ;
+#define C2_JVMTI_STUB_FIELD_DECLARE(name) \
+  static address     STUB_FIELD_NAME(name);
+
+  C2_STUBS_DO(C2_BLOB_FIELD_DECLARE, C2_STUB_FIELD_DECLARE, C2_JVMTI_STUB_FIELD_DECLARE)
+
+#undef C2_BLOB_FIELD_DECLARE
+#undef C2_STUB_FIELD_NAME
+#undef C2_STUB_FIELD_DECLARE
+#undef C2_JVMTI_STUB_FIELD_DECLARE
+
+  // Stub names indexed by sharedStubId
+  static const char *_stub_names[];
+
   // define stubs
   static address generate_stub(ciEnv* ci_env, TypeFunc_generator gen, address C_function, const char* name, int is_fancy_jump, bool pass_tls, bool return_pc);
 
-  // References to generated stubs
-  static address _new_instance_Java;
-  static address _new_array_Java;
-  static address _new_array_nozero_Java;
-  static address _multianewarray2_Java;
-  static address _multianewarray3_Java;
-  static address _multianewarray4_Java;
-  static address _multianewarray5_Java;
-  static address _multianewarrayN_Java;
-  static address _vtable_must_compile_Java;
-  static address _complete_monitor_locking_Java;
-  static address _rethrow_Java;
-  static address _monitor_notify_Java;
-  static address _monitor_notifyAll_Java;
-
-  static address _slow_arraycopy_Java;
-  static address _register_finalizer_Java;
-#if INCLUDE_JVMTI
-  static address _notify_jvmti_vthread_start;
-  static address _notify_jvmti_vthread_end;
-  static address _notify_jvmti_vthread_mount;
-  static address _notify_jvmti_vthread_unmount;
-#endif
-
   //
   // Implementation of runtime methods
   // =================================
@@ -148,6 +158,13 @@ class OptoRuntime : public AllStatic {
   static void multianewarray5_C(Klass* klass, int len1, int len2, int len3, int len4, int len5, JavaThread* current);
   static void multianewarrayN_C(Klass* klass, arrayOopDesc* dims, JavaThread* current);
 
+  // local methods passed as arguments to stub generator that forward
+  // control to corresponding JRT methods of SharedRuntime
+  static void slow_arraycopy_C(oopDesc* src,  jint src_pos,
+                               oopDesc* dest, jint dest_pos,
+                               jint length, JavaThread* thread);
+  static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* current);
+
 public:
   static void monitor_notify_C(oopDesc* obj, JavaThread* current);
   static void monitor_notifyAll_C(oopDesc* obj, JavaThread* current);
@@ -168,13 +185,10 @@ class OptoRuntime : public AllStatic {
   // CodeBlob support
   // ===================================================================
 
-  static UncommonTrapBlob*   _uncommon_trap_blob;
-  static ExceptionBlob*       _exception_blob;
-
   static void generate_uncommon_trap_blob(void);
   static void generate_exception_blob();
 
-  static void register_finalizer(oopDesc* obj, JavaThread* current);
+  static void register_finalizer_C(oopDesc* obj, JavaThread* current);
 
  public:
 
@@ -188,6 +202,12 @@ class OptoRuntime : public AllStatic {
   // Returns the name of a stub
   static const char* stub_name(address entry);
 
+  // Returns the name associated with a given stub id
+  static const char* stub_name(OptoStubId id) {
+    assert(id > OptoStubId::NO_STUBID && id < OptoStubId::NUM_STUBIDS, "stub id out of range");
+    return _stub_names[(int)id];
+  }
+
   // access to runtime stubs entry points for java code
   static address new_instance_Java()                     { return _new_instance_Java; }
   static address new_array_Java()                        { return _new_array_Java; }
@@ -197,7 +217,6 @@ class OptoRuntime : public AllStatic {
   static address multianewarray4_Java()                  { return _multianewarray4_Java; }
   static address multianewarray5_Java()                  { return _multianewarray5_Java; }
   static address multianewarrayN_Java()                  { return _multianewarrayN_Java; }
-  static address vtable_must_compile_stub()              { return _vtable_must_compile_Java; }
   static address complete_monitor_locking_Java()         { return _complete_monitor_locking_Java; }
   static address monitor_notify_Java()                   { return _monitor_notify_Java; }
   static address monitor_notifyAll_Java()                { return _monitor_notifyAll_Java; }
@@ -227,6 +246,7 @@ class OptoRuntime : public AllStatic {
 
   static const TypeFunc* new_instance_Type(); // object allocation (slow case)
   static const TypeFunc* new_array_Type ();   // [a]newarray (slow case)
+  static const TypeFunc* new_array_nozero_Type ();   // [a]newarray (slow case)
   static const TypeFunc* multianewarray_Type(int ndim); // multianewarray
   static const TypeFunc* multianewarray2_Type(); // multianewarray
   static const TypeFunc* multianewarray3_Type(); // multianewarray
@@ -234,8 +254,10 @@ class OptoRuntime : public AllStatic {
   static const TypeFunc* multianewarray5_Type(); // multianewarray
   static const TypeFunc* multianewarrayN_Type(); // multianewarray
   static const TypeFunc* complete_monitor_enter_Type();
+  static const TypeFunc* complete_monitor_locking_Type();
   static const TypeFunc* complete_monitor_exit_Type();
   static const TypeFunc* monitor_notify_Type();
+  static const TypeFunc* monitor_notifyAll_Type();
   static const TypeFunc* uncommon_trap_Type();
   static const TypeFunc* athrow_Type();
   static const TypeFunc* rethrow_Type();
diff --git a/src/hotspot/share/runtime/stubDeclarations.hpp b/src/hotspot/share/runtime/stubDeclarations.hpp
index c9e946906f796..ccca14c61b5b8 100644
--- a/src/hotspot/share/runtime/stubDeclarations.hpp
+++ b/src/hotspot/share/runtime/stubDeclarations.hpp
@@ -70,13 +70,108 @@
   /* other stubs */                                                    \
   SHARED_JFR_STUBS_DO(do_blob)                                         \
 
-// generate a stub id enum tag from a name
+// C1 stubs are always generated in a generic CodeBlob
+
+#ifdef COMPILER1
+// do_blob(name)
+#define C1_STUBS_DO(do_blob)                                           \
+  do_blob(dtrace_object_alloc)                                         \
+  do_blob(unwind_exception)                                            \
+  do_blob(forward_exception)                                           \
+  do_blob(throw_range_check_failed)       /* throws ArrayIndexOutOfBoundsException */ \
+  do_blob(throw_index_exception)          /* throws IndexOutOfBoundsException */ \
+  do_blob(throw_div0_exception)                                        \
+  do_blob(throw_null_pointer_exception)                                \
+  do_blob(register_finalizer)                                          \
+  do_blob(new_instance)                                                \
+  do_blob(fast_new_instance)                                           \
+  do_blob(fast_new_instance_init_check)                                \
+  do_blob(new_type_array)                                              \
+  do_blob(new_object_array)                                            \
+  do_blob(new_multi_array)                                             \
+  do_blob(handle_exception_nofpu)         /* optimized version that does not preserve fpu registers */ \
+  do_blob(handle_exception)                                            \
+  do_blob(handle_exception_from_callee)                                \
+  do_blob(throw_array_store_exception)                                 \
+  do_blob(throw_class_cast_exception)                                  \
+  do_blob(throw_incompatible_class_change_error)                       \
+  do_blob(slow_subtype_check)                                          \
+  do_blob(monitorenter)                                                \
+  do_blob(monitorenter_nofpu)             /* optimized version that does not preserve fpu registers */ \
+  do_blob(monitorexit)                                                 \
+  do_blob(monitorexit_nofpu)              /* optimized version that does not preserve fpu registers */ \
+  do_blob(deoptimize)                                                  \
+  do_blob(access_field_patching)                                       \
+  do_blob(load_klass_patching)                                         \
+  do_blob(load_mirror_patching)                                        \
+  do_blob(load_appendix_patching)                                      \
+  do_blob(fpu2long_stub)                                               \
+  do_blob(counter_overflow)                                            \
+  do_blob(predicate_failed_trap)                                       \
+
+#else
+#define C1_STUBS_DO(do_blob)
+#endif
+
+// Opto stubs can be stored as entries with just an address or as
+// blobs of different types. The former may include some JVMTI stubs.
+//
+// n.b. blobs and stub defines are generated in the order defined by
+// C2_STUBS_DO, allowing dependencies from any givem stub on its
+// predecessors to be guaranteed. That explains the initial placement
+// of the blob declarations and intermediate placement of the jvmti
+// stubs.
+
+#ifdef COMPILER2
+// do_jvmti_stub(name)
+#if INCLUDE_JVMTI
+#define C2_JVMTI_STUBS_DO(do_jvmti_stub)                               \
+  do_jvmti_stub(notify_jvmti_vthread_start)                            \
+  do_jvmti_stub(notify_jvmti_vthread_end)                              \
+  do_jvmti_stub(notify_jvmti_vthread_mount)                            \
+  do_jvmti_stub(notify_jvmti_vthread_unmount)                          \
+
+#else
+#define C2_JVMTI_STUBS_DO(do_jvmti_stub)
+#endif // INCLUDE_JVMTI
+
+// do_blob(name, type)
+// do_stub(name, fancy_jump, pass_tls, return_pc)
+// do_jvmti_stub(name)
+//
+// n.b. non-jvmti stubs may employ a special type of jump (0, 1 or 2)
+// and require access to TLS and the return pc. jvmti stubs always
+// employ jump 0, and require no special access
+#define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub)                   \
+  do_blob(uncommon_trap, UncommonTrapBlob*)                            \
+  do_blob(exception, ExceptionBlob*)                                   \
+  do_stub(new_instance, 0, true, false)                                \
+  do_stub(new_array, 0, true, false)                                   \
+  do_stub(new_array_nozero, 0, true, false)                            \
+  do_stub(multianewarray2, 0, true, false)                             \
+  do_stub(multianewarray3, 0, true, false)                             \
+  do_stub(multianewarray4, 0, true, false)                             \
+  do_stub(multianewarray5, 0, true, false)                             \
+  do_stub(multianewarrayN, 0, true, false)                             \
+  C2_JVMTI_STUBS_DO(do_jvmti_stub)                                     \
+  do_stub(complete_monitor_locking, 0, false, false)                   \
+  do_stub(monitor_notify, 0, false, false)                             \
+  do_stub(monitor_notifyAll, 0, false, false)                          \
+  do_stub(rethrow, 2, true, true)                                      \
+  do_stub(slow_arraycopy, 0, false, false)                             \
+  do_stub(register_finalizer, 0, false, false)                         \
+
+#else
+#define C2_STUBS_DO(do_blob, do_stub, do_jvmti_stub)
+#endif
+
+// generate a stub or blob id enum tag from a name
 
 #define STUB_ID_NAME(base) base##_id
 
-// generate a blob id enum tag from a name
+// generate a stub field name
 
-#define BLOB_ID_NAME(base) base##_id
+#define STUB_FIELD_NAME(base) _##base
 
 // generate a blob field name
 
diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java
index 162f384dc07b2..24fbe7e64afa0 100644
--- a/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java
+++ b/test/hotspot/jtreg/compiler/lib/ir_framework/IRNode.java
@@ -276,25 +276,25 @@ public class IRNode {
 
     public static final String ALLOC = PREFIX + "ALLOC" + POSTFIX;
     static {
-        String optoRegex = "(.*precise .*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END;
+        String optoRegex = "(.*precise .*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_instance" + END;
         allocNodes(ALLOC, "Allocate", optoRegex);
     }
 
     public static final String ALLOC_OF = COMPOSITE_PREFIX + "ALLOC_OF" + POSTFIX;
     static {
-        String regex = "(.*precise .*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: _new_instance_Java" + END;
+        String regex = "(.*precise .*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_instance" + END;
         optoOnly(ALLOC_OF, regex);
     }
 
     public static final String ALLOC_ARRAY = PREFIX + "ALLOC_ARRAY" + POSTFIX;
     static {
-        String optoRegex = "(.*precise \\[.*\\R((.*(?i:mov|mv|xor|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END;
+        String optoRegex = "(.*precise \\[.*\\R((.*(?i:mov|mv|xor|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_array" + END;
         allocNodes(ALLOC_ARRAY, "AllocateArray", optoRegex);
     }
 
     public static final String ALLOC_ARRAY_OF = COMPOSITE_PREFIX + "ALLOC_ARRAY_OF" + POSTFIX;
     static {
-        String regex = "(.*precise \\[.*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: _new_array_Java" + END;
+        String regex = "(.*precise \\[.*" + IS_REPLACED + ":.*\\R((.*(?i:mov|mv|xorl|nop|spill).*|\\s*|.*(LGHI|LI).*)\\R)*.*(?i:call,static).*wrapper for: C2 Runtime new_array" + END;
         optoOnly(ALLOC_ARRAY_OF, regex);
     }
 
diff --git a/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java b/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java
index f77be989421b2..f45f708070ab0 100644
--- a/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java
+++ b/test/hotspot/jtreg/testlibrary_tests/ir_framework/tests/TestIRMatching.java
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,7 +75,7 @@ public static void main(String[] args) {
         runCheck(new String[] {"-XX:TLABRefillWasteFraction=50", "-XX:+UsePerfData", "-XX:+UseTLAB"}, BadFailOnConstraint.create(AndOr1.class, "test1(int)", 1, "CallStaticJava"));
         runCheck(new String[] {"-XX:TLABRefillWasteFraction=50", "-XX:-UsePerfData", "-XX:+UseTLAB"}, BadFailOnConstraint.create(AndOr1.class, "test2()", 1, "CallStaticJava"));
 
-        String[] allocMatches = { "MyClass", "wrapper for: _new_instance_Java" };
+        String[] allocMatches = { "MyClass", "wrapper for: C2 Runtime new_instance" };
         runCheck(BadFailOnConstraint.create(MultipleFailOnBad.class, "fail1()", 1, 1, "Store"),
                  BadFailOnConstraint.create(MultipleFailOnBad.class, "fail1()", 1,  3, "Store"),
                  GoodFailOnRegexConstraint.create(MultipleFailOnBad.class, "fail1()", 1,  2, 4),
@@ -114,7 +114,7 @@ public static void main(String[] args) {
                  GoodRuleConstraint.create(Calls.class, "calls()", 3)
         );
 
-        String[] allocArrayMatches = { "MyClass", "wrapper for: _new_array_Java"};
+        String[] allocArrayMatches = { "MyClass", "wrapper for: C2 Runtime new_array"};
         runCheck(BadFailOnConstraint.create(AllocArray.class, "allocArray()", 1, allocArrayMatches),
                  BadFailOnConstraint.create(AllocArray.class, "allocArray()", 2,  allocArrayMatches),
                  GoodFailOnConstraint.create(AllocArray.class, "allocArray()", 3),