Skip to content

Commit 59629f8

Browse files
committedMar 30, 2025
8351040: [REDO] Protection zone for easier detection of accidental zero-nKlass use
Reviewed-by: mbaesken, iklam
1 parent 8cbadf7 commit 59629f8

16 files changed

+433
-94
lines changed
 

‎src/hotspot/share/cds/archiveBuilder.cpp

+9-5
Original file line numberDiff line numberDiff line change
@@ -153,14 +153,14 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
153153
ArchiveBuilder::ArchiveBuilder() :
154154
_current_dump_region(nullptr),
155155
_buffer_bottom(nullptr),
156-
_num_dump_regions_used(0),
157156
_requested_static_archive_bottom(nullptr),
158157
_requested_static_archive_top(nullptr),
159158
_requested_dynamic_archive_bottom(nullptr),
160159
_requested_dynamic_archive_top(nullptr),
161160
_mapped_static_archive_bottom(nullptr),
162161
_mapped_static_archive_top(nullptr),
163162
_buffer_to_requested_delta(0),
163+
_pz_region("pz", MAX_SHARED_DELTA), // protection zone -- used only during dumping; does NOT exist in cds archive.
164164
_rw_region("rw", MAX_SHARED_DELTA),
165165
_ro_region("ro", MAX_SHARED_DELTA),
166166
_ptrmap(mtClassShared),
@@ -323,8 +323,12 @@ address ArchiveBuilder::reserve_buffer() {
323323
_shared_rs = rs;
324324

325325
_buffer_bottom = buffer_bottom;
326-
_current_dump_region = &_rw_region;
327-
_num_dump_regions_used = 1;
326+
327+
if (CDSConfig::is_dumping_static_archive()) {
328+
_current_dump_region = &_pz_region;
329+
} else {
330+
_current_dump_region = &_rw_region;
331+
}
328332
_current_dump_region->init(&_shared_rs, &_shared_vs);
329333

330334
ArchivePtrMarker::initialize(&_ptrmap, &_shared_vs);
@@ -366,7 +370,8 @@ address ArchiveBuilder::reserve_buffer() {
366370
if (CDSConfig::is_dumping_static_archive()) {
367371
// We don't want any valid object to be at the very bottom of the archive.
368372
// See ArchivePtrMarker::mark_pointer().
369-
rw_region()->allocate(16);
373+
_pz_region.allocate(MetaspaceShared::protection_zone_size());
374+
start_dump_region(&_rw_region);
370375
}
371376

372377
return buffer_bottom;
@@ -544,7 +549,6 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
544549
void ArchiveBuilder::start_dump_region(DumpRegion* next) {
545550
current_dump_region()->pack(next);
546551
_current_dump_region = next;
547-
_num_dump_regions_used ++;
548552
}
549553

550554
char* ArchiveBuilder::ro_strdup(const char* s) {

‎src/hotspot/share/cds/archiveBuilder.hpp

+8-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -96,7 +96,6 @@ class ArchiveBuilder : public StackObj {
9696
protected:
9797
DumpRegion* _current_dump_region;
9898
address _buffer_bottom; // for writing the contents of rw/ro regions
99-
int _num_dump_regions_used;
10099

101100
// These are the addresses where we will request the static and dynamic archives to be
102101
// mapped at run time. If the request fails (due to ASLR), we will map the archives at
@@ -210,6 +209,12 @@ class ArchiveBuilder : public StackObj {
210209
ReservedSpace _shared_rs;
211210
VirtualSpace _shared_vs;
212211

212+
// The "pz" region is used only during static dumps to reserve an unused space between SharedBaseAddress and
213+
// the bottom of the rw region. During runtime, this space will be filled with a reserved area that disallows
214+
// read/write/exec, so we can track for bad CompressedKlassPointers encoding.
215+
// Note: this region does NOT exist in the cds archive.
216+
DumpRegion _pz_region;
217+
213218
DumpRegion _rw_region;
214219
DumpRegion _ro_region;
215220

@@ -270,9 +275,6 @@ class ArchiveBuilder : public StackObj {
270275

271276
protected:
272277
virtual void iterate_roots(MetaspaceClosure* it) = 0;
273-
274-
static const int _total_dump_regions = 2;
275-
276278
void start_dump_region(DumpRegion* next);
277279

278280
public:
@@ -367,6 +369,7 @@ class ArchiveBuilder : public StackObj {
367369
void remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref);
368370
static void serialize_dynamic_archivable_items(SerializeClosure* soc);
369371

372+
DumpRegion* pz_region() { return &_pz_region; }
370373
DumpRegion* rw_region() { return &_rw_region; }
371374
DumpRegion* ro_region() { return &_ro_region; }
372375

‎src/hotspot/share/cds/archiveUtils.cpp

+26-21
Original file line numberDiff line numberDiff line change
@@ -74,34 +74,39 @@ void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, VirtualSpace* vs) {
7474
}
7575

7676
void ArchivePtrMarker::initialize_rw_ro_maps(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap) {
77-
address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base();
78-
address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base();
77+
address* buff_bottom = (address*)ArchiveBuilder::current()->buffer_bottom();
78+
address* rw_bottom = (address*)ArchiveBuilder::current()->rw_region()->base();
79+
address* ro_bottom = (address*)ArchiveBuilder::current()->ro_region()->base();
7980

80-
_rw_ptrmap = rw_ptrmap;
81-
_ro_ptrmap = ro_ptrmap;
81+
// The bit in _ptrmap that cover the very first word in the rw/ro regions.
82+
size_t rw_start = rw_bottom - buff_bottom;
83+
size_t ro_start = ro_bottom - buff_bottom;
8284

85+
// The number of bits used by the rw/ro ptrmaps. We might have lots of zero
86+
// bits at the bottom and top of rw/ro ptrmaps, but these zeros will be
87+
// removed by FileMapInfo::write_bitmap_region().
8388
size_t rw_size = ArchiveBuilder::current()->rw_region()->used() / sizeof(address);
8489
size_t ro_size = ArchiveBuilder::current()->ro_region()->used() / sizeof(address);
85-
// ro_start is the first bit in _ptrmap that covers the pointer that would sit at ro_bottom.
86-
// E.g., if rw_bottom = (address*)100
87-
// ro_bottom = (address*)116
88-
// then for 64-bit platform:
89-
// ro_start = ro_bottom - rw_bottom = (116 - 100) / sizeof(address) = 2;
90-
size_t ro_start = ro_bottom - rw_bottom;
91-
92-
// Note: ptrmap is big enough only to cover the last pointer in ro_region.
93-
// See ArchivePtrMarker::compact()
94-
_rw_ptrmap->initialize(rw_size);
95-
_ro_ptrmap->initialize(_ptrmap->size() - ro_start);
96-
97-
for (size_t rw_bit = 0; rw_bit < _rw_ptrmap->size(); rw_bit++) {
98-
_rw_ptrmap->at_put(rw_bit, _ptrmap->at(rw_bit));
90+
91+
// The last (exclusive) bit in _ptrmap that covers the rw/ro regions.
92+
// Note: _ptrmap is dynamically expanded only when an actual pointer is written, so
93+
// it may not be as large as we want.
94+
size_t rw_end = MIN2<size_t>(rw_start + rw_size, _ptrmap->size());
95+
size_t ro_end = MIN2<size_t>(ro_start + ro_size, _ptrmap->size());
96+
97+
rw_ptrmap->initialize(rw_size);
98+
ro_ptrmap->initialize(ro_size);
99+
100+
for (size_t rw_bit = rw_start; rw_bit < rw_end; rw_bit++) {
101+
rw_ptrmap->at_put(rw_bit - rw_start, _ptrmap->at(rw_bit));
99102
}
100103

101-
for(size_t ro_bit = ro_start; ro_bit < _ptrmap->size(); ro_bit++) {
102-
_ro_ptrmap->at_put(ro_bit-ro_start, _ptrmap->at(ro_bit));
104+
for(size_t ro_bit = ro_start; ro_bit < ro_end; ro_bit++) {
105+
ro_ptrmap->at_put(ro_bit - ro_start, _ptrmap->at(ro_bit));
103106
}
104-
assert(_ptrmap->size() - ro_start == _ro_ptrmap->size(), "must be");
107+
108+
_rw_ptrmap = rw_ptrmap;
109+
_ro_ptrmap = ro_ptrmap;
105110
}
106111

107112
void ArchivePtrMarker::mark_pointer(address* ptr_loc) {

‎src/hotspot/share/cds/dynamicArchive.cpp

-1
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,6 @@ class DynamicArchiveBuilder : public ArchiveBuilder {
173173

174174
post_dump();
175175

176-
assert(_num_dump_regions_used == _total_dump_regions, "must be");
177176
verify_universe("After CDS dynamic dump");
178177
}
179178

‎src/hotspot/share/cds/filemap.hpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -399,7 +399,7 @@ class FileMapInfo : public CHeapObj<mtInternal> {
399399
// The offset of the (exclusive) end of the last core region in this archive, relative to SharedBaseAddress
400400
size_t mapping_end_offset() const { return last_core_region()->mapping_end_offset(); }
401401

402-
char* mapped_base() const { return first_core_region()->mapped_base(); }
402+
char* mapped_base() const { return header()->mapped_base_address(); }
403403
char* mapped_end() const { return last_core_region()->mapped_end(); }
404404

405405
// Non-zero if the archive needs to be mapped a non-default location due to ASLR.

‎src/hotspot/share/cds/metaspaceShared.cpp

+77-36
Original file line numberDiff line numberDiff line change
@@ -149,6 +149,10 @@ size_t MetaspaceShared::core_region_alignment() {
149149
return os::cds_core_region_alignment();
150150
}
151151

152+
size_t MetaspaceShared::protection_zone_size() {
153+
return os::cds_core_region_alignment();
154+
}
155+
152156
static bool shared_base_valid(char* shared_base) {
153157
// We check user input for SharedBaseAddress at dump time.
154158

@@ -1253,6 +1257,7 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
12531257

12541258
ReservedSpace total_space_rs, archive_space_rs, class_space_rs;
12551259
MapArchiveResult result = MAP_ARCHIVE_OTHER_FAILURE;
1260+
size_t prot_zone_size = 0;
12561261
char* mapped_base_address = reserve_address_space_for_archives(static_mapinfo,
12571262
dynamic_mapinfo,
12581263
use_requested_addr,
@@ -1264,14 +1269,21 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
12641269
log_debug(cds)("Failed to reserve spaces (use_requested_addr=%u)", (unsigned)use_requested_addr);
12651270
} else {
12661271

1272+
if (Metaspace::using_class_space()) {
1273+
prot_zone_size = protection_zone_size();
1274+
}
1275+
12671276
#ifdef ASSERT
12681277
// Some sanity checks after reserving address spaces for archives
12691278
// and class space.
12701279
assert(archive_space_rs.is_reserved(), "Sanity");
12711280
if (Metaspace::using_class_space()) {
1281+
assert(archive_space_rs.base() == mapped_base_address &&
1282+
archive_space_rs.size() > protection_zone_size(),
1283+
"Archive space must lead and include the protection zone");
12721284
// Class space must closely follow the archive space. Both spaces
12731285
// must be aligned correctly.
1274-
assert(class_space_rs.is_reserved(),
1286+
assert(class_space_rs.is_reserved() && class_space_rs.size() > 0,
12751287
"A class space should have been reserved");
12761288
assert(class_space_rs.base() >= archive_space_rs.end(),
12771289
"class space should follow the cds archive space");
@@ -1284,8 +1296,9 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
12841296
}
12851297
#endif // ASSERT
12861298

1287-
log_info(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes",
1288-
p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size());
1299+
log_info(cds)("Reserved archive_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes%s",
1300+
p2i(archive_space_rs.base()), p2i(archive_space_rs.end()), archive_space_rs.size(),
1301+
(prot_zone_size > 0 ? " (includes protection zone)" : ""));
12891302
log_info(cds)("Reserved class_space_rs [" INTPTR_FORMAT " - " INTPTR_FORMAT "] (%zu) bytes",
12901303
p2i(class_space_rs.base()), p2i(class_space_rs.end()), class_space_rs.size());
12911304

@@ -1312,8 +1325,35 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
13121325
MemoryReserver::release(archive_space_rs);
13131326
// Mark as not reserved
13141327
archive_space_rs = {};
1328+
// The protection zone is part of the archive:
1329+
// See comment above, the Windows way of loading CDS is to mmap the individual
1330+
// parts of the archive into the address region we just vacated. The protection
1331+
// zone will not be mapped (and, in fact, does not exist as physical region in
1332+
// the archive). Therefore, after removing the archive space above, we must
1333+
// re-reserve the protection zone part lest something else gets mapped into that
1334+
// area later.
1335+
if (prot_zone_size > 0) {
1336+
assert(prot_zone_size >= os::vm_allocation_granularity(), "must be"); // not just page size!
1337+
char* p = os::attempt_reserve_memory_at(mapped_base_address, prot_zone_size,
1338+
false, MemTag::mtClassShared);
1339+
assert(p == mapped_base_address || p == nullptr, "must be");
1340+
if (p == nullptr) {
1341+
log_debug(cds)("Failed to re-reserve protection zone");
1342+
return MAP_ARCHIVE_MMAP_FAILURE;
1343+
}
1344+
}
13151345
}
13161346
}
1347+
1348+
if (prot_zone_size > 0) {
1349+
os::commit_memory(mapped_base_address, prot_zone_size, false); // will later be protected
1350+
// Before mapping the core regions into the newly established address space, we mark
1351+
// start and the end of the future protection zone with canaries. That way we easily
1352+
// catch mapping errors (accidentally mapping data into the future protection zone).
1353+
*(mapped_base_address) = 'P';
1354+
*(mapped_base_address + prot_zone_size - 1) = 'P';
1355+
}
1356+
13171357
MapArchiveResult static_result = map_archive(static_mapinfo, mapped_base_address, archive_space_rs);
13181358
MapArchiveResult dynamic_result = (static_result == MAP_ARCHIVE_SUCCESS) ?
13191359
map_archive(dynamic_mapinfo, mapped_base_address, archive_space_rs) : MAP_ARCHIVE_OTHER_FAILURE;
@@ -1357,38 +1397,40 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
13571397
if (result == MAP_ARCHIVE_SUCCESS) {
13581398
SharedBaseAddress = (size_t)mapped_base_address;
13591399
#ifdef _LP64
1360-
if (Metaspace::using_class_space()) {
1361-
// Set up ccs in metaspace.
1362-
Metaspace::initialize_class_space(class_space_rs);
1363-
1364-
// Set up compressed Klass pointer encoding: the encoding range must
1365-
// cover both archive and class space.
1366-
address cds_base = (address)static_mapinfo->mapped_base();
1367-
address ccs_end = (address)class_space_rs.end();
1368-
assert(ccs_end > cds_base, "Sanity check");
1369-
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
1370-
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
1371-
// - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
1372-
// - every archived Klass' prototype (only if +UseCompactObjectHeaders)
1373-
//
1374-
// In order for those IDs to still be valid, we need to dictate base and shift: base should be the
1375-
// mapping start, shift the shift used at archive generation time.
1376-
address precomputed_narrow_klass_base = cds_base;
1377-
const int precomputed_narrow_klass_shift = ArchiveBuilder::precomputed_narrow_klass_shift();
1378-
CompressedKlassPointers::initialize_for_given_encoding(
1379-
cds_base, ccs_end - cds_base, // Klass range
1380-
precomputed_narrow_klass_base, precomputed_narrow_klass_shift // precomputed encoding, see ArchiveBuilder
1381-
);
1382-
} else {
1383-
// Let JVM freely chose encoding base and shift
1384-
CompressedKlassPointers::initialize (
1385-
cds_base, ccs_end - cds_base // Klass range
1386-
);
1387-
}
1388-
// map_or_load_heap_region() compares the current narrow oop and klass encodings
1389-
// with the archived ones, so it must be done after all encodings are determined.
1390-
static_mapinfo->map_or_load_heap_region();
1391-
}
1400+
if (Metaspace::using_class_space()) {
1401+
assert(prot_zone_size > 0 &&
1402+
*(mapped_base_address) == 'P' &&
1403+
*(mapped_base_address + prot_zone_size - 1) == 'P',
1404+
"Protection zone was overwritten?");
1405+
// Set up ccs in metaspace.
1406+
Metaspace::initialize_class_space(class_space_rs);
1407+
1408+
// Set up compressed Klass pointer encoding: the encoding range must
1409+
// cover both archive and class space.
1410+
const address encoding_base = (address)mapped_base_address;
1411+
const address klass_range_start = encoding_base + prot_zone_size;
1412+
const size_t klass_range_size = (address)class_space_rs.end() - klass_range_start;
1413+
if (INCLUDE_CDS_JAVA_HEAP || UseCompactObjectHeaders) {
1414+
// The CDS archive may contain narrow Klass IDs that were precomputed at archive generation time:
1415+
// - every archived java object header (only if INCLUDE_CDS_JAVA_HEAP)
1416+
// - every archived Klass' prototype (only if +UseCompactObjectHeaders)
1417+
//
1418+
// In order for those IDs to still be valid, we need to dictate base and shift: base should be the
1419+
// mapping start (including protection zone), shift should be the shift used at archive generation time.
1420+
CompressedKlassPointers::initialize_for_given_encoding(
1421+
klass_range_start, klass_range_size,
1422+
encoding_base, ArchiveBuilder::precomputed_narrow_klass_shift() // precomputed encoding, see ArchiveBuilder
1423+
);
1424+
} else {
1425+
// Let JVM freely choose encoding base and shift
1426+
CompressedKlassPointers::initialize(klass_range_start, klass_range_size);
1427+
}
1428+
CompressedKlassPointers::establish_protection_zone(encoding_base, prot_zone_size);
1429+
1430+
// map_or_load_heap_region() compares the current narrow oop and klass encodings
1431+
// with the archived ones, so it must be done after all encodings are determined.
1432+
static_mapinfo->map_or_load_heap_region();
1433+
}
13921434
#endif // _LP64
13931435
log_info(cds)("initial optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled");
13941436
log_info(cds)("initial full module graph: %s", CDSConfig::is_using_full_module_graph() ? "enabled" : "disabled");
@@ -1470,7 +1512,6 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
14701512
const size_t archive_space_alignment = core_region_alignment();
14711513

14721514
// Size and requested location of the archive_space_rs (for both static and dynamic archives)
1473-
assert(static_mapinfo->mapping_base_offset() == 0, "Must be");
14741515
size_t archive_end_offset = (dynamic_mapinfo == nullptr) ? static_mapinfo->mapping_end_offset() : dynamic_mapinfo->mapping_end_offset();
14751516
size_t archive_space_size = align_up(archive_end_offset, archive_space_alignment);
14761517

‎src/hotspot/share/cds/metaspaceShared.hpp

+1
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,7 @@ class MetaspaceShared : AllStatic {
139139
// Alignment for the 2 core CDS regions (RW/RO) only.
140140
// (Heap region alignments are decided by GC).
141141
static size_t core_region_alignment();
142+
static size_t protection_zone_size();
142143
static void rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread* thread, InstanceKlass* ik);
143144
// print loaded classes names to file.
144145
static void dump_loaded_classes(const char* file_name, TRAPS);

‎src/hotspot/share/include/cds.h

+4-1
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,10 @@ typedef struct CDSFileMapRegion {
7070
size_t _ptrmap_offset; // Bitmap for relocating native pointer fields in archived heap objects.
7171
// (The base address is the bottom of the BM region).
7272
size_t _ptrmap_size_in_bits;
73-
char* _mapped_base; // Actually mapped address (null if this region is not mapped).
73+
char* _mapped_base; // Actually mapped address used for mapping the core regions. At that address the
74+
// zero nklass protection zone is established; following that (at offset
75+
// MetaspaceShared::protection_zone_size()) the lowest core region (rw for the
76+
// static archive) is is mapped.
7477
bool _in_reserved_space; // Is this region in a ReservedSpace
7578
} CDSFileMapRegion;
7679

1 commit comments

Comments
 (1)

openjdk-notifier[bot] commented on Mar 30, 2025

@openjdk-notifier[bot]
Please sign in to comment.