@@ -1634,16 +1634,17 @@ void InstanceKlass::call_class_initializer(TRAPS) {
1634
1634
1635
1635
void InstanceKlass::mask_for (const methodHandle& method, int bci,
1636
1636
InterpreterOopMap* entry_for) {
1637
- // Lazily create the _oop_map_cache at first request
1638
- // Lock-free access requires load_acquire .
1637
+ // Lazily create the _oop_map_cache at first request.
1638
+ // Load_acquire is needed to safely get instance published with CAS by another thread .
1639
1639
OopMapCache* oop_map_cache = Atomic::load_acquire (&_oop_map_cache);
1640
1640
if (oop_map_cache == nullptr ) {
1641
- MutexLocker x (OopMapCacheAlloc_lock);
1642
- // Check if _oop_map_cache was allocated while we were waiting for this lock
1643
- if ((oop_map_cache = _oop_map_cache) == nullptr ) {
1644
- oop_map_cache = new OopMapCache ();
1645
- // Ensure _oop_map_cache is stable, since it is examined without a lock
1646
- Atomic::release_store (&_oop_map_cache, oop_map_cache);
1641
+ // Try to install new instance atomically.
1642
+ oop_map_cache = new OopMapCache ();
1643
+ OopMapCache* other = Atomic::cmpxchg (&_oop_map_cache, (OopMapCache*)nullptr , oop_map_cache);
1644
+ if (other != nullptr ) {
1645
+ // Someone else managed to install before us, ditch local copy and use the existing one.
1646
+ delete oop_map_cache;
1647
+ oop_map_cache = other;
1647
1648
}
1648
1649
}
1649
1650
// _oop_map_cache is constant after init; lookup below does its own locking.
0 commit comments