@@ -149,6 +149,10 @@ size_t MetaspaceShared::core_region_alignment() {
149
149
return os::cds_core_region_alignment ();
150
150
}
151
151
152
+ size_t MetaspaceShared::protection_zone_size () {
153
+ return os::cds_core_region_alignment ();
154
+ }
155
+
152
156
static bool shared_base_valid (char * shared_base) {
153
157
// We check user input for SharedBaseAddress at dump time.
154
158
@@ -1253,6 +1257,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
1253
1257
1254
1258
ReservedSpace total_space_rs, archive_space_rs, class_space_rs;
1255
1259
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
1260
+ size_t prot_zone_size = 0 ;
1256
1261
char * mapped_base_address = reserve_address_space_for_archives (static_mapinfo,
1257
1262
dynamic_mapinfo,
1258
1263
use_requested_addr,
@@ -1264,14 +1269,21 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
1264
1269
log_debug (cds)(" Failed to reserve spaces (use_requested_addr=%u)" , (unsigned )use_requested_addr);
1265
1270
} else {
1266
1271
1272
+ if (Metaspace::using_class_space ()) {
1273
+ prot_zone_size = protection_zone_size ();
1274
+ }
1275
+
1267
1276
#ifdef ASSERT
1268
1277
// Some sanity checks after reserving address spaces for archives
1269
1278
// and class space.
1270
1279
assert (archive_space_rs.is_reserved (), " Sanity" );
1271
1280
if (Metaspace::using_class_space ()) {
1281
+ assert (archive_space_rs.base () == mapped_base_address &&
1282
+ archive_space_rs.size () > protection_zone_size (),
1283
+ " Archive space must lead and include the protection zone" );
1272
1284
// Class space must closely follow the archive space. Both spaces
1273
1285
// must be aligned correctly.
1274
- assert (class_space_rs.is_reserved (),
1286
+ assert (class_space_rs.is_reserved () && class_space_rs. size () > 0 ,
1275
1287
" A class space should have been reserved" );
1276
1288
assert (class_space_rs.base () >= archive_space_rs.end (),
1277
1289
" class space should follow the cds archive space" );
@@ -1284,8 +1296,9 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
1284
1296
}
1285
1297
#endif // ASSERT
1286
1298
1287
- log_info (cds)(" Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] (%zu) bytes" ,
1288
- p2i (archive_space_rs.base ()), p2i (archive_space_rs.end ()), archive_space_rs.size ());
1299
+ log_info (cds)(" Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] (%zu) bytes%s" ,
1300
+ p2i (archive_space_rs.base ()), p2i (archive_space_rs.end ()), archive_space_rs.size (),
1301
+ (prot_zone_size > 0 ? " (includes protection zone)" : " " ));
1289
1302
log_info (cds)(" Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] (%zu) bytes" ,
1290
1303
p2i (class_space_rs.base ()), p2i (class_space_rs.end ()), class_space_rs.size ());
1291
1304
@@ -1312,8 +1325,35 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
1312
1325
MemoryReserver::release (archive_space_rs);
1313
1326
// Mark as not reserved
1314
1327
archive_space_rs = {};
1328
+ // The protection zone is part of the archive:
1329
+ // See comment above, the Windows way of loading CDS is to mmap the individual
1330
+ // parts of the archive into the address region we just vacated. The protection
1331
+ // zone will not be mapped (and, in fact, does not exist as physical region in
1332
+ // the archive). Therefore, after removing the archive space above, we must
1333
+ // re-reserve the protection zone part lest something else gets mapped into that
1334
+ // area later.
1335
+ if (prot_zone_size > 0 ) {
1336
+ assert (prot_zone_size >= os::vm_allocation_granularity (), " must be" ); // not just page size!
1337
+ char * p = os::attempt_reserve_memory_at (mapped_base_address, prot_zone_size,
1338
+ false , MemTag::mtClassShared);
1339
+ assert (p == mapped_base_address || p == nullptr , " must be" );
1340
+ if (p == nullptr ) {
1341
+ log_debug (cds)(" Failed to re-reserve protection zone" );
1342
+ return MAP_ARCHIVE_MMAP_FAILURE;
1343
+ }
1344
+ }
1315
1345
}
1316
1346
}
1347
+
1348
+ if (prot_zone_size > 0 ) {
1349
+ os::commit_memory (mapped_base_address, prot_zone_size, false ); // will later be protected
1350
+ // Before mapping the core regions into the newly established address space, we mark
1351
+ // start and the end of the future protection zone with canaries. That way we easily
1352
+ // catch mapping errors (accidentally mapping data into the future protection zone).
1353
+ *(mapped_base_address) = ' P' ;
1354
+ *(mapped_base_address + prot_zone_size - 1 ) = ' P' ;
1355
+ }
1356
+
1317
1357
MapArchiveResult static_result = map_archive (static_mapinfo, mapped_base_address, archive_space_rs);
1318
1358
MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
1319
1359
map_archive (dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
@@ -1357,38 +1397,40 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
1357
1397
if (result == MAP_ARCHIVE_SUCCESS) {
1358
1398
SharedBaseAddress = (size_t )mapped_base_address;
1359
1399
#ifdef _LP64
1360
- if (Metaspace::using_class_space ()) {
1361
- // Set up ccs in metaspace.
1362
- Metaspace::initialize_class_space (class_space_rs);
1363
-
1364
- // Set up compressed Klass pointer encoding: the encoding range must
1365
- // cover both archive and class space.
1366
- address cds_base = (address)static_mapinfo->mapped_base ();
1367
- address ccs_end = (address)class_space_rs.end ();
1368
- assert (ccs_end > cds_base, " Sanity check" );
1369
- if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
1370
- // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
1371
- // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
1372
- // - every archived Klass' prototype (only if +UseCompactObjectHeaders)
1373
- //
1374
- // In order for those IDs to still be valid, we need to dictate base and shift: base should be the
1375
- // mapping start, shift the shift used at archive generation time.
1376
- address precomputed_narrow_klass_base = cds_base;
1377
- const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift ();
1378
- CompressedKlassPointers::initialize_for_given_encoding (
1379
- cds_base, ccs_end - cds_base, // Klass range
1380
- precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveBuilder
1381
- );
1382
- } else {
1383
- // Let JVM freely chose encoding base and shift
1384
- CompressedKlassPointers::initialize (
1385
- cds_base, ccs_end - cds_base // Klass range
1386
- );
1387
- }
1388
- // map_or_load_heap_region() compares the current narrow oop and klass encodings
1389
- // with the archived ones, so it must be done after all encodings are determined.
1390
- static_mapinfo->map_or_load_heap_region ();
1391
- }
1400
+ if (Metaspace::using_class_space ()) {
1401
+ assert (prot_zone_size > 0 &&
1402
+ *(mapped_base_address) == ' P' &&
1403
+ *(mapped_base_address + prot_zone_size - 1 ) == ' P' ,
1404
+ " Protection zone was overwritten?" );
1405
+ // Set up ccs in metaspace.
1406
+ Metaspace::initialize_class_space (class_space_rs);
1407
+
1408
+ // Set up compressed Klass pointer encoding: the encoding range must
1409
+ // cover both archive and class space.
1410
+ const address encoding_base = (address)mapped_base_address;
1411
+ const address klass_range_start = encoding_base + prot_zone_size;
1412
+ const size_t klass_range_size = (address)class_space_rs.end () - klass_range_start;
1413
+ if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
1414
+ // The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
1415
+ // - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
1416
+ // - every archived Klass' prototype (only if +UseCompactObjectHeaders)
1417
+ //
1418
+ // In order for those IDs to still be valid, we need to dictate base and shift: base should be the
1419
+ // mapping start (including protection zone), shift should be the shift used at archive generation time.
1420
+ CompressedKlassPointers::initialize_for_given_encoding (
1421
+ klass_range_start, klass_range_size,
1422
+ encoding_base, ArchiveBuilder::precomputed_narrow_klass_shift () // precomputed encoding, see ArchiveBuilder
1423
+ );
1424
+ } else {
1425
+ // Let JVM freely choose encoding base and shift
1426
+ CompressedKlassPointers::initialize (klass_range_start, klass_range_size);
1427
+ }
1428
+ CompressedKlassPointers::establish_protection_zone (encoding_base, prot_zone_size);
1429
+
1430
+ // map_or_load_heap_region() compares the current narrow oop and klass encodings
1431
+ // with the archived ones, so it must be done after all encodings are determined.
1432
+ static_mapinfo->map_or_load_heap_region ();
1433
+ }
1392
1434
#endif // _LP64
1393
1435
log_info (cds)(" initial optimized module handling: %s" , CDSConfig::is_using_optimized_module_handling () ? " enabled" : " disabled" );
1394
1436
log_info (cds)(" initial full module graph: %s" , CDSConfig::is_using_full_module_graph () ? " enabled" : " disabled" );
@@ -1470,7 +1512,6 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
1470
1512
const size_t archive_space_alignment = core_region_alignment ();
1471
1513
1472
1514
// Size and requested location of the archive_space_rs (for both static and dynamic archives)
1473
- assert (static_mapinfo->mapping_base_offset () == 0 , " Must be" );
1474
1515
size_t archive_end_offset = (dynamic_mapinfo == nullptr ) ? static_mapinfo->mapping_end_offset () : dynamic_mapinfo->mapping_end_offset ();
1475
1516
size_t archive_space_size = align_up (archive_end_offset, archive_space_alignment);
1476
1517
1 commit comments
openjdk-notifier[bot] commentedon Mar 30, 2025
Review
Issues