@@ -176,143 +176,122 @@ GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap
176
176
GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new (mtCode) GrowableArray<CodeHeap*> (static_cast <int >(CodeBlobType::All), mtCode);
177
177
GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new (mtCode) GrowableArray<CodeHeap*> (static_cast <int >(CodeBlobType::All), mtCode);
178
178
179
- void CodeCache::check_heap_sizes (size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) {
180
- size_t total_size = non_nmethod_size + profiled_size + non_profiled_size;
181
- // Prepare error message
182
- const char * error = " Invalid code heap sizes" ;
183
- err_msg message (" NonNMethodCodeHeapSize (" SIZE_FORMAT " K) + ProfiledCodeHeapSize (" SIZE_FORMAT " K)"
184
- " + NonProfiledCodeHeapSize (" SIZE_FORMAT " K) = " SIZE_FORMAT " K" ,
185
- non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K);
186
-
187
- if (total_size > cache_size) {
188
- // Some code heap sizes were explicitly set: total_size must be <= cache_size
189
- message.append (" is greater than ReservedCodeCacheSize (" SIZE_FORMAT " K)." , cache_size/K);
190
- vm_exit_during_initialization (error, message);
191
- } else if (all_set && total_size != cache_size) {
192
- // All code heap sizes were explicitly set: total_size must equal cache_size
193
- message.append (" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT " K)." , cache_size/K);
194
- vm_exit_during_initialization (error, message);
179
+ static void check_min_size (const char * codeheap, size_t size, size_t required_size) {
180
+ if (size < required_size) {
181
+ log_debug (codecache)(" Code heap (%s) size " SIZE_FORMAT " K below required minimal size " SIZE_FORMAT " K" ,
182
+ codeheap, size/K, required_size/K);
183
+ err_msg title (" Not enough space in %s to run VM" , codeheap);
184
+ err_msg message (SIZE_FORMAT " K < " SIZE_FORMAT " K" , size/K, required_size/K);
185
+ vm_exit_during_initialization (title, message);
195
186
}
196
187
}
197
188
189
+ struct CodeHeapInfo {
190
+ size_t size;
191
+ bool set;
192
+ bool enabled;
193
+ };
194
+
195
+ static void set_size_of_unset_code_heap (CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) {
196
+ assert (!heap->set , " sanity" );
197
+ heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size;
198
+ }
199
+
198
200
void CodeCache::initialize_heaps () {
199
- bool non_nmethod_set = FLAG_IS_CMDLINE (NonNMethodCodeHeapSize);
200
- bool profiled_set = FLAG_IS_CMDLINE (ProfiledCodeHeapSize);
201
- bool non_profiled_set = FLAG_IS_CMDLINE (NonProfiledCodeHeapSize);
202
- const size_t ps = page_size (false , 8 );
203
- const size_t min_size = MAX2 (os::vm_allocation_granularity (), ps);
204
- const size_t cache_size = ReservedCodeCacheSize;
205
- size_t non_nmethod_size = NonNMethodCodeHeapSize;
206
- size_t profiled_size = ProfiledCodeHeapSize;
207
- size_t non_profiled_size = NonProfiledCodeHeapSize;
208
- // Check if total size set via command line flags exceeds the reserved size
209
- check_heap_sizes ((non_nmethod_set ? non_nmethod_size : min_size),
210
- (profiled_set ? profiled_size : min_size),
211
- (non_profiled_set ? non_profiled_size : min_size),
212
- cache_size,
213
- non_nmethod_set && profiled_set && non_profiled_set);
214
-
215
- // Determine size of compiler buffers
216
- size_t code_buffers_size = 0 ;
217
- #ifdef COMPILER1
218
- // C1 temporary code buffers (see Compiler::init_buffer_blob())
219
- const int c1_count = CompilationPolicy::c1_count ();
220
- code_buffers_size += c1_count * Compiler::code_buffer_size ();
221
- #endif
222
- #ifdef COMPILER2
223
- // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
224
- const int c2_count = CompilationPolicy::c2_count ();
225
- // Initial size of constant table (this may be increased if a compiled method needs more space)
226
- code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size ();
227
- #endif
228
201
229
- // Increase default non_nmethod_size to account for compiler buffers
230
- if (!non_nmethod_set) {
231
- non_nmethod_size += code_buffers_size;
232
- }
233
- // Calculate default CodeHeap sizes if not set by user
234
- if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
235
- // Leave room for the other two parts of the code cache
236
- const size_t max_non_nmethod_size = cache_size - 2 * min_size;
237
- // Check if we have enough space for the non-nmethod code heap
238
- if (max_non_nmethod_size >= non_nmethod_size) {
239
- // Use the default value for non_nmethod_size and one half of the
240
- // remaining size for non-profiled and one half for profiled methods
241
- size_t remaining_size = cache_size - non_nmethod_size;
242
- profiled_size = remaining_size / 2 ;
243
- non_profiled_size = remaining_size - profiled_size;
244
- } else {
245
- // Use all space for the non-nmethod heap and set other heaps to minimal size
246
- non_nmethod_size = max_non_nmethod_size;
247
- profiled_size = min_size;
248
- non_profiled_size = min_size;
249
- }
250
- } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
251
- // The user explicitly set some code heap sizes. Increase or decrease the (default)
252
- // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
253
- // code heap sizes and then only change non-nmethod code heap size if still necessary.
254
- intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
255
- if (non_profiled_set) {
256
- if (!profiled_set) {
257
- // Adapt size of profiled code heap
258
- if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0 ) {
259
- // Not enough space available, set to minimum size
260
- diff_size += profiled_size - min_size;
261
- profiled_size = min_size;
262
- } else {
263
- profiled_size += diff_size;
264
- diff_size = 0 ;
265
- }
266
- }
267
- } else if (profiled_set) {
268
- // Adapt size of non-profiled code heap
269
- if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0 ) {
270
- // Not enough space available, set to minimum size
271
- diff_size += non_profiled_size - min_size;
272
- non_profiled_size = min_size;
273
- } else {
274
- non_profiled_size += diff_size;
275
- diff_size = 0 ;
276
- }
277
- } else if (non_nmethod_set) {
278
- // Distribute remaining size between profiled and non-profiled code heaps
279
- diff_size = cache_size - non_nmethod_size;
280
- profiled_size = diff_size / 2 ;
281
- non_profiled_size = diff_size - profiled_size;
282
- diff_size = 0 ;
283
- }
284
- if (diff_size != 0 ) {
285
- // Use non-nmethod code heap for remaining space requirements
286
- assert (!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0 , " sanity" );
287
- non_nmethod_size += diff_size;
288
- }
289
- }
202
+ CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE (NonNMethodCodeHeapSize), true };
203
+ CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE (ProfiledCodeHeapSize), true };
204
+ CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE (NonProfiledCodeHeapSize), true };
290
205
291
- // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
206
+ const bool cache_size_set = FLAG_IS_CMDLINE (ReservedCodeCacheSize);
207
+ const size_t ps = page_size (false , 8 );
208
+ const size_t min_size = MAX2 (os::vm_allocation_granularity (), ps);
209
+ const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY (* 3 ); // Make sure we have enough space for VM internal code
210
+ size_t cache_size = align_up (ReservedCodeCacheSize, min_size);
211
+
212
+ // Prerequisites
292
213
if (!heap_available (CodeBlobType::MethodProfiled)) {
293
- non_profiled_size += profiled_size;
294
- profiled_size = 0 ;
214
+ // For compatibility reasons, disabled tiered compilation overrides
215
+ // segment size even if it is set explicitly.
216
+ non_profiled.size += profiled.size ;
217
+ // Profiled code heap is not available, forcibly set size to 0
218
+ profiled.size = 0 ;
219
+ profiled.set = true ;
220
+ profiled.enabled = false ;
221
+ }
222
+
223
+ assert (heap_available (CodeBlobType::MethodNonProfiled), " MethodNonProfiled heap is always available for segmented code heap" );
224
+
225
+ size_t compiler_buffer_size = 0 ;
226
+ COMPILER1_PRESENT (compiler_buffer_size += CompilationPolicy::c1_count () * Compiler::code_buffer_size ());
227
+ COMPILER2_PRESENT (compiler_buffer_size += CompilationPolicy::c2_count () * C2Compiler::initial_code_buffer_size ());
228
+
229
+ if (!non_nmethod.set ) {
230
+ non_nmethod.size += compiler_buffer_size;
295
231
}
296
- // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
297
- if (!heap_available (CodeBlobType::MethodNonProfiled)) {
298
- non_nmethod_size += non_profiled_size;
299
- non_profiled_size = 0 ;
232
+
233
+ if (!profiled.set && !non_profiled.set ) {
234
+ non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ?
235
+ (cache_size - non_nmethod.size ) / 2 : min_size;
236
+ }
237
+
238
+ if (profiled.set && !non_profiled.set ) {
239
+ set_size_of_unset_code_heap (&non_profiled, cache_size, non_nmethod.size + profiled.size , min_size);
240
+ }
241
+
242
+ if (!profiled.set && non_profiled.set ) {
243
+ set_size_of_unset_code_heap (&profiled, cache_size, non_nmethod.size + non_profiled.size , min_size);
244
+ }
245
+
246
+ // Compatibility.
247
+ size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size;
248
+ if (!non_nmethod.set && profiled.set && non_profiled.set ) {
249
+ set_size_of_unset_code_heap (&non_nmethod, cache_size, profiled.size + non_profiled.size , non_nmethod_min_size);
300
250
}
301
- // Make sure we have enough space for VM internal code
302
- uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY (* 3 );
303
- if (non_nmethod_size < min_code_cache_size) {
304
- vm_exit_during_initialization (err_msg (
305
- " Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT " K < " SIZE_FORMAT " K" ,
306
- non_nmethod_size/K, min_code_cache_size/K));
251
+
252
+ size_t total = non_nmethod.size + profiled.size + non_profiled.size ;
253
+ if (total != cache_size && !cache_size_set) {
254
+ log_info (codecache)(" ReservedCodeCache size " SIZE_FORMAT " K changed to total segments size NonNMethod "
255
+ SIZE_FORMAT " K NonProfiled " SIZE_FORMAT " K Profiled " SIZE_FORMAT " K = " SIZE_FORMAT " K" ,
256
+ cache_size/K, non_nmethod.size /K, non_profiled.size /K, profiled.size /K, total/K);
257
+ // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly
258
+ cache_size = total;
307
259
}
308
260
309
- // Verify sizes and update flag values
310
- assert (non_profiled_size + profiled_size + non_nmethod_size == cache_size, " Invalid code heap sizes" );
311
- FLAG_SET_ERGO (NonNMethodCodeHeapSize, non_nmethod_size);
312
- FLAG_SET_ERGO (ProfiledCodeHeapSize, profiled_size);
313
- FLAG_SET_ERGO (NonProfiledCodeHeapSize, non_profiled_size);
261
+ log_debug (codecache)(" Initializing code heaps ReservedCodeCache " SIZE_FORMAT " K NonNMethod " SIZE_FORMAT " K"
262
+ " NonProfiled " SIZE_FORMAT " K Profiled " SIZE_FORMAT " K" ,
263
+ cache_size/K, non_nmethod.size /K, non_profiled.size /K, profiled.size /K);
264
+
265
+ // Validation
266
+ // Check minimal required sizes
267
+ check_min_size (" non-nmethod code heap" , non_nmethod.size , non_nmethod_min_size);
268
+ if (profiled.enabled ) {
269
+ check_min_size (" profiled code heap" , profiled.size , min_size);
270
+ }
271
+ if (non_profiled.enabled ) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity
272
+ check_min_size (" non-profiled code heap" , non_profiled.size , min_size);
273
+ }
274
+ if (cache_size_set) {
275
+ check_min_size (" reserved code cache" , cache_size, min_cache_size);
276
+ }
314
277
315
- // Print warning if using large pages but not able to use the size given
278
+ // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes
279
+ if (total != cache_size && cache_size_set) {
280
+ err_msg message (" NonNMethodCodeHeapSize (" SIZE_FORMAT " K)" , non_nmethod.size /K);
281
+ if (profiled.enabled ) {
282
+ message.append (" + ProfiledCodeHeapSize (" SIZE_FORMAT " K)" , profiled.size /K);
283
+ }
284
+ if (non_profiled.enabled ) {
285
+ message.append (" + NonProfiledCodeHeapSize (" SIZE_FORMAT " K)" , non_profiled.size /K);
286
+ }
287
+ message.append (" = " SIZE_FORMAT " K" , total/K);
288
+ message.append ((total > cache_size) ? " is greater than " : " is less than " );
289
+ message.append (" ReservedCodeCacheSize (" SIZE_FORMAT " K)." , cache_size/K);
290
+
291
+ vm_exit_during_initialization (" Invalid code heap sizes" , message);
292
+ }
293
+
294
+ // Compatibility. Print warning if using large pages but not able to use the size given
316
295
if (UseLargePages) {
317
296
const size_t lg_ps = page_size (false , 1 );
318
297
if (ps < lg_ps) {
@@ -324,32 +303,40 @@ void CodeCache::initialize_heaps() {
324
303
325
304
// Note: if large page support is enabled, min_size is at least the large
326
305
// page size. This ensures that the code cache is covered by large pages.
327
- non_nmethod_size = align_up (non_nmethod_size, min_size);
328
- profiled_size = align_down (profiled_size, min_size);
329
- non_profiled_size = align_down (non_profiled_size , min_size);
330
-
331
- // Reserve one continuous chunk of memory for CodeHeaps and split it into
332
- // parts for the individual heaps. The memory layout looks like this:
333
- // ---------- high -----------
334
- // Non- profiled nmethods
335
- // Non-nmethods
336
- // Profiled nmethods
337
- // ---------- low ------------
306
+ non_profiled. size += non_nmethod. size & alignment_mask ( min_size);
307
+ non_profiled. size += profiled. size & alignment_mask ( min_size);
308
+ non_nmethod. size = align_down (non_nmethod. size , min_size);
309
+ profiled. size = align_down (profiled. size , min_size);
310
+ non_profiled. size = align_down (non_profiled. size , min_size);
311
+
312
+ FLAG_SET_ERGO (NonNMethodCodeHeapSize, non_nmethod. size );
313
+ FLAG_SET_ERGO (ProfiledCodeHeapSize, profiled. size );
314
+ FLAG_SET_ERGO (NonProfiledCodeHeapSize, non_profiled. size );
315
+ FLAG_SET_ERGO (ReservedCodeCacheSize, cache_size);
316
+
338
317
ReservedCodeSpace rs = reserve_heap_memory (cache_size, ps);
339
- ReservedSpace profiled_space = rs.first_part (profiled_size);
340
- ReservedSpace rest = rs.last_part (profiled_size);
341
- ReservedSpace non_method_space = rest.first_part (non_nmethod_size);
342
- ReservedSpace non_profiled_space = rest.last_part (non_nmethod_size);
343
318
344
319
// Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory.
345
320
LSAN_REGISTER_ROOT_REGION (rs.base (), rs.size ());
346
321
322
+ size_t offset = 0 ;
323
+ if (profiled.enabled ) {
324
+ ReservedSpace profiled_space = rs.partition (offset, profiled.size );
325
+ offset += profiled.size ;
326
+ // Tier 2 and tier 3 (profiled) methods
327
+ add_heap (profiled_space, " CodeHeap 'profiled nmethods'" , CodeBlobType::MethodProfiled);
328
+ }
329
+
330
+ ReservedSpace non_method_space = rs.partition (offset, non_nmethod.size );
331
+ offset += non_nmethod.size ;
347
332
// Non-nmethods (stubs, adapters, ...)
348
333
add_heap (non_method_space, " CodeHeap 'non-nmethods'" , CodeBlobType::NonNMethod);
349
- // Tier 2 and tier 3 (profiled) methods
350
- add_heap (profiled_space, " CodeHeap 'profiled nmethods'" , CodeBlobType::MethodProfiled);
351
- // Tier 1 and tier 4 (non-profiled) methods and native methods
352
- add_heap (non_profiled_space, " CodeHeap 'non-profiled nmethods'" , CodeBlobType::MethodNonProfiled);
334
+
335
+ if (non_profiled.enabled ) {
336
+ ReservedSpace non_profiled_space = rs.partition (offset, non_profiled.size );
337
+ // Tier 1 and tier 4 (non-profiled) methods and native methods
338
+ add_heap (non_profiled_space, " CodeHeap 'non-profiled nmethods'" , CodeBlobType::MethodNonProfiled);
339
+ }
353
340
}
354
341
355
342
size_t CodeCache::page_size (bool aligned, size_t min_pages) {
0 commit comments