@@ -102,9 +102,9 @@ void CodeBuffer::initialize(csize_t code_size, csize_t locs_size) {
102
102
assert (SECT_LIMIT == 3 , " total_size explicitly lists all section alignments" );
103
103
int total_size = code_size + _consts.alignment () + _insts.alignment () + _stubs.alignment () + SECT_LIMIT * slop;
104
104
105
- assert (blob () == NULL , " only once" );
105
+ assert (blob () == nullptr , " only once" );
106
106
set_blob (BufferBlob::create (_name, total_size));
107
- if (blob () == NULL ) {
107
+ if (blob () == nullptr ) {
108
108
// The assembler constructor will throw a fatal on an empty CodeBuffer.
109
109
return ; // caller must test this
110
110
}
@@ -130,7 +130,7 @@ CodeBuffer::~CodeBuffer() {
130
130
// If we allocated our code buffer from the CodeCache via a BufferBlob, and
131
131
// it's not permanent, then free the BufferBlob. The rest of the memory
132
132
// will be freed when the ResourceObj is released.
133
- for (CodeBuffer* cb = this ; cb != NULL ; cb = cb->before_expand ()) {
133
+ for (CodeBuffer* cb = this ; cb != nullptr ; cb = cb->before_expand ()) {
134
134
// Previous incarnations of this buffer are held live, so that internal
135
135
// addresses constructed before expansions will not be confused.
136
136
cb->free_blob ();
@@ -171,7 +171,7 @@ void CodeBuffer::initialize_section_size(CodeSection* cs, csize_t size) {
171
171
172
172
void CodeBuffer::set_blob (BufferBlob* blob) {
173
173
_blob = blob;
174
- if (blob != NULL ) {
174
+ if (blob != nullptr ) {
175
175
address start = blob->content_begin ();
176
176
address end = blob->content_end ();
177
177
// Round up the starting address.
@@ -191,21 +191,21 @@ void CodeBuffer::set_blob(BufferBlob* blob) {
191
191
}
192
192
193
193
void CodeBuffer::free_blob () {
194
- if (_blob != NULL ) {
194
+ if (_blob != nullptr ) {
195
195
BufferBlob::free (_blob);
196
- set_blob (NULL );
196
+ set_blob (nullptr );
197
197
}
198
198
}
199
199
200
200
const char * CodeBuffer::code_section_name (int n) {
201
201
#ifdef PRODUCT
202
- return NULL ;
202
+ return nullptr ;
203
203
#else // PRODUCT
204
204
switch (n) {
205
205
case SECT_CONSTS: return " consts" ;
206
206
case SECT_INSTS: return " insts" ;
207
207
case SECT_STUBS: return " stubs" ;
208
- default : return NULL ;
208
+ default : return nullptr ;
209
209
}
210
210
#endif // PRODUCT
211
211
}
@@ -236,14 +236,14 @@ bool CodeBuffer::is_backward_branch(Label& L) {
236
236
#ifndef PRODUCT
237
237
address CodeBuffer::decode_begin () {
238
238
address begin = _insts.start ();
239
- if (_decode_begin != NULL && _decode_begin > begin)
239
+ if (_decode_begin != nullptr && _decode_begin > begin)
240
240
begin = _decode_begin;
241
241
return begin;
242
242
}
243
243
#endif // !PRODUCT
244
244
245
245
GrowableArray<int >* CodeBuffer::create_patch_overflow () {
246
- if (_overflow_arena == NULL ) {
246
+ if (_overflow_arena == nullptr ) {
247
247
_overflow_arena = new (mtCode) Arena (mtCode);
248
248
}
249
249
return new (_overflow_arena) GrowableArray<int >(_overflow_arena, 8 , 0 , 0 );
@@ -269,7 +269,7 @@ address CodeSection::target(Label& L, address branch_pc) {
269
269
270
270
// Need to return a pc, doesn't matter what it is since it will be
271
271
// replaced during resolution later.
272
- // Don't return NULL or badAddress, since branches shouldn't overflow.
272
+ // Don't return null or badAddress, since branches shouldn't overflow.
273
273
// Don't return base either because that could overflow displacements
274
274
// for shorter branches. It will get checked when bound.
275
275
return branch_pc;
@@ -365,7 +365,7 @@ void CodeSection::relocate(address at, RelocationHolder const& spec, int format)
365
365
}
366
366
367
367
void CodeSection::initialize_locs (int locs_capacity) {
368
- assert (_locs_start == NULL , " only one locs init step, please" );
368
+ assert (_locs_start == nullptr , " only one locs init step, please" );
369
369
// Apply a priori lower limits to relocation size:
370
370
csize_t min_locs = MAX2 (size () / 16 , (csize_t )4 );
371
371
if (locs_capacity < min_locs) locs_capacity = min_locs;
@@ -377,7 +377,7 @@ void CodeSection::initialize_locs(int locs_capacity) {
377
377
}
378
378
379
379
void CodeSection::initialize_shared_locs (relocInfo* buf, int length) {
380
- assert (_locs_start == NULL , " do this before locs are allocated" );
380
+ assert (_locs_start == nullptr , " do this before locs are allocated" );
381
381
// Internal invariant: locs buf must be fully aligned.
382
382
// See copy_relocations_to() below.
383
383
while ((uintptr_t )buf % HeapWordSize != 0 && length > 0 ) {
@@ -403,7 +403,7 @@ void CodeSection::initialize_locs_from(const CodeSection* source_cs) {
403
403
}
404
404
405
405
void CodeSection::expand_locs (int new_capacity) {
406
- if (_locs_start == NULL ) {
406
+ if (_locs_start == nullptr ) {
407
407
initialize_locs (new_capacity);
408
408
return ;
409
409
} else {
@@ -468,8 +468,8 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
468
468
assert ( (dest->_total_start - _insts.start ()) % alignSize == 0 , " copy must preserve alignment" );
469
469
}
470
470
471
- const CodeSection* prev_cs = NULL ;
472
- CodeSection* prev_dest_cs = NULL ;
471
+ const CodeSection* prev_cs = nullptr ;
472
+ CodeSection* prev_dest_cs = nullptr ;
473
473
474
474
for (int n = (int ) SECT_FIRST; n < (int ) SECT_LIMIT; n++) {
475
475
// figure compact layout of each section
@@ -481,7 +481,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
481
481
// Compute initial padding; assign it to the previous non-empty guy.
482
482
// Cf. figure_expanded_capacities.
483
483
csize_t padding = cs->align_at_start (buf_offset) - buf_offset;
484
- if (prev_dest_cs != NULL ) {
484
+ if (prev_dest_cs != nullptr ) {
485
485
if (padding != 0 ) {
486
486
buf_offset += padding;
487
487
prev_dest_cs->_limit += padding;
@@ -493,7 +493,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
493
493
prev_cs = cs;
494
494
}
495
495
496
- debug_only (dest_cs->_start = NULL ); // defeat double-initialization assert
496
+ debug_only (dest_cs->_start = nullptr ); // defeat double-initialization assert
497
497
dest_cs->initialize (buf+buf_offset, csize);
498
498
dest_cs->set_end (buf+buf_offset+csize);
499
499
assert (dest_cs->is_allocated (), " must always be allocated" );
@@ -510,7 +510,7 @@ void CodeBuffer::compute_final_layout(CodeBuffer* dest) const {
510
510
// Append an oop reference that keeps the class alive.
511
511
static void append_oop_references (GrowableArray<oop>* oops, Klass* k) {
512
512
oop cl = k->klass_holder ();
513
- if (cl != NULL && !oops->contains (cl)) {
513
+ if (cl != nullptr && !oops->contains (cl)) {
514
514
oops->append (cl);
515
515
}
516
516
}
@@ -613,7 +613,7 @@ int CodeBuffer::total_skipped_instructions_size() const {
613
613
}
614
614
615
615
csize_t CodeBuffer::total_relocation_size () const {
616
- csize_t total = copy_relocations_to (NULL ); // dry run only
616
+ csize_t total = copy_relocations_to (nullptr ); // dry run only
617
617
return (csize_t ) align_up (total, HeapWordSize);
618
618
}
619
619
@@ -656,7 +656,7 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
656
656
} else { // else shrink the filler to fit
657
657
filler = relocInfo (relocInfo::none, jump);
658
658
}
659
- if (buf != NULL ) {
659
+ if (buf != nullptr ) {
660
660
assert (buf_offset + (csize_t )sizeof (filler) <= buf_limit, " filler in bounds" );
661
661
*(relocInfo*)(buf+buf_offset) = filler;
662
662
}
@@ -671,7 +671,7 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
671
671
code_end_so_far += csize; // advance past this guy's instructions too
672
672
673
673
// Done with filler; emit the real relocations:
674
- if (buf != NULL && lsize != 0 ) {
674
+ if (buf != nullptr && lsize != 0 ) {
675
675
assert (buf_offset + lsize <= buf_limit, " target in bounds" );
676
676
assert ((uintptr_t )lstart % HeapWordSize == 0 , " sane start" );
677
677
if (buf_offset % HeapWordSize == 0 ) {
@@ -688,7 +688,7 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
688
688
689
689
// Align end of relocation info in target.
690
690
while (buf_offset % HeapWordSize != 0 ) {
691
- if (buf != NULL ) {
691
+ if (buf != nullptr ) {
692
692
relocInfo padding = relocInfo (relocInfo::none, 0 );
693
693
assert (buf_offset + (csize_t )sizeof (padding) <= buf_limit, " padding in bounds" );
694
694
*(relocInfo*)(buf+buf_offset) = padding;
@@ -702,15 +702,15 @@ csize_t CodeBuffer::copy_relocations_to(address buf, csize_t buf_limit, bool onl
702
702
}
703
703
704
704
csize_t CodeBuffer::copy_relocations_to (CodeBlob* dest) const {
705
- address buf = NULL ;
705
+ address buf = nullptr ;
706
706
csize_t buf_offset = 0 ;
707
707
csize_t buf_limit = 0 ;
708
708
709
- if (dest != NULL ) {
709
+ if (dest != nullptr ) {
710
710
buf = (address)dest->relocation_begin ();
711
711
buf_limit = (address)dest->relocation_end () - buf;
712
712
}
713
- // if dest == NULL , this is just the sizing pass
713
+ // if dest is null , this is just the sizing pass
714
714
//
715
715
buf_offset = copy_relocations_to (buf, buf_limit, false );
716
716
@@ -752,7 +752,7 @@ void CodeBuffer::copy_code_to(CodeBlob* dest_blob) {
752
752
// ascending address).
753
753
void CodeBuffer::relocate_code_to (CodeBuffer* dest) const {
754
754
address dest_end = dest->_total_start + dest->_total_size ;
755
- address dest_filled = NULL ;
755
+ address dest_filled = nullptr ;
756
756
for (int n = (int ) SECT_FIRST; n < (int ) SECT_LIMIT; n++) {
757
757
// pull code out of each section
758
758
const CodeSection* cs = code_section (n);
@@ -768,7 +768,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
768
768
(HeapWord*)dest_cs->start (),
769
769
wsize / HeapWordSize);
770
770
771
- if (dest->blob () == NULL ) {
771
+ if (dest->blob () == nullptr ) {
772
772
// Destination is a final resting place, not just another buffer.
773
773
// Normalize uninitialized bytes in the final padding.
774
774
Copy::fill_to_bytes (dest_cs->end (), dest_cs->remaining (),
@@ -802,7 +802,7 @@ void CodeBuffer::relocate_code_to(CodeBuffer* dest) const {
802
802
}
803
803
}
804
804
805
- if (dest->blob () == NULL && dest_filled != NULL ) {
805
+ if (dest->blob () == nullptr && dest_filled != nullptr ) {
806
806
// Destination is a final resting place, not just another buffer.
807
807
// Normalize uninitialized bytes in the final padding.
808
808
Copy::fill_to_bytes (dest_filled, dest_end - dest_filled,
@@ -865,7 +865,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
865
865
this ->print ();
866
866
}
867
867
868
- if (StressCodeBuffers && blob () != NULL ) {
868
+ if (StressCodeBuffers && blob () != nullptr ) {
869
869
static int expand_count = 0 ;
870
870
if (expand_count >= 0 ) expand_count += 1 ;
871
871
if (expand_count > 100 && is_power_of_2 (expand_count)) {
@@ -878,7 +878,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
878
878
879
879
// Resizing must be allowed
880
880
{
881
- if (blob () == NULL ) return ; // caller must check for blob == NULL
881
+ if (blob () == nullptr ) return ; // caller must check if blob is null
882
882
}
883
883
884
884
// Figure new capacity for each section.
@@ -889,7 +889,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
889
889
890
890
// Create a new (temporary) code buffer to hold all the new data
891
891
CodeBuffer cb (name (), new_total_cap, 0 );
892
- if (cb.blob () == NULL ) {
892
+ if (cb.blob () == nullptr ) {
893
893
// Failed to allocate in code cache.
894
894
free_blob ();
895
895
return ;
@@ -901,7 +901,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
901
901
// has been created at any time in this CodeBuffer's past.
902
902
CodeBuffer* bxp = new CodeBuffer (_total_start, _total_size);
903
903
bxp->take_over_code_from (this ); // remember the old undersized blob
904
- DEBUG_ONLY (this ->_blob = NULL ); // silence a later assert
904
+ DEBUG_ONLY (this ->_blob = nullptr ); // silence a later assert
905
905
bxp->_before_expand = this ->_before_expand ;
906
906
this ->_before_expand = bxp;
907
907
@@ -916,7 +916,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
916
916
assert (cb_sect->capacity () >= new_capacity[n], " big enough" );
917
917
address cb_start = cb_sect->start ();
918
918
cb_sect->set_end (cb_start + this_sect->size ());
919
- if (this_sect->mark () == NULL ) {
919
+ if (this_sect->mark () == nullptr ) {
920
920
cb_sect->clear_mark ();
921
921
} else {
922
922
cb_sect->set_mark (cb_start + this_sect->mark_off ());
@@ -932,7 +932,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
932
932
// Copy the temporary code buffer into the current code buffer.
933
933
// Basically, do {*this = cb}, except for some control information.
934
934
this ->take_over_code_from (&cb);
935
- cb.set_blob (NULL );
935
+ cb.set_blob (nullptr );
936
936
937
937
// Zap the old code buffer contents, to avoid mistakenly using them.
938
938
debug_only (Copy::fill_to_bytes (bxp->_total_start , bxp->_total_size ,
@@ -942,7 +942,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
942
942
debug_only (verify_section_allocation ();)
943
943
944
944
#ifndef PRODUCT
945
- _decode_begin = NULL ; // sanity
945
+ _decode_begin = nullptr ; // sanity
946
946
if (PrintNMethods && (WizardMode || Verbose)) {
947
947
tty->print (" expanded CodeBuffer:" );
948
948
this ->print ();
@@ -952,7 +952,7 @@ void CodeBuffer::expand(CodeSection* which_cs, csize_t amount) {
952
952
953
953
void CodeBuffer::take_over_code_from (CodeBuffer* cb) {
954
954
// Must already have disposed of the old blob somehow.
955
- assert (blob () == NULL , " must be empty" );
955
+ assert (blob () == nullptr , " must be empty" );
956
956
// Take the new blob away from cb.
957
957
set_blob (cb->blob ());
958
958
// Take over all the section pointers.
@@ -962,16 +962,16 @@ void CodeBuffer::take_over_code_from(CodeBuffer* cb) {
962
962
this_sect->take_over_code_from (cb_sect);
963
963
}
964
964
_overflow_arena = cb->_overflow_arena ;
965
- cb->_overflow_arena = NULL ;
965
+ cb->_overflow_arena = nullptr ;
966
966
// Make sure the old cb won't try to use it or free it.
967
967
DEBUG_ONLY (cb->_blob = (BufferBlob*)badAddress);
968
968
}
969
969
970
970
void CodeBuffer::verify_section_allocation () {
971
971
address tstart = _total_start;
972
- if (tstart == badAddress) return ; // smashed by set_blob(NULL )
972
+ if (tstart == badAddress) return ; // smashed by set_blob(nullptr )
973
973
address tend = tstart + _total_size;
974
- if (_blob != NULL ) {
974
+ if (_blob != nullptr ) {
975
975
guarantee (tstart >= _blob->content_begin (), " sanity" );
976
976
guarantee (tend <= _blob->content_end (), " sanity" );
977
977
}
@@ -996,7 +996,7 @@ void CodeBuffer::verify_section_allocation() {
996
996
}
997
997
998
998
void CodeBuffer::log_section_sizes (const char * name) {
999
- if (xtty != NULL ) {
999
+ if (xtty != nullptr ) {
1000
1000
ttyLocker ttyl;
1001
1001
// log info about buffer usage
1002
1002
xtty->print_cr (" <blob name='%s' total_size='%d'>" , name, _total_size);
@@ -1020,7 +1020,7 @@ bool CodeBuffer::finalize_stubs() {
1020
1020
}
1021
1021
1022
1022
void CodeBuffer::shared_stub_to_interp_for (ciMethod* callee, csize_t call_offset) {
1023
- if (_shared_stub_to_interp_requests == NULL ) {
1023
+ if (_shared_stub_to_interp_requests == nullptr ) {
1024
1024
_shared_stub_to_interp_requests = new SharedStubToInterpRequests (8 );
1025
1025
}
1026
1026
SharedStubToInterpRequest request (callee, call_offset);
@@ -1061,8 +1061,8 @@ void CodeSection::print(const char* name) {
1061
1061
}
1062
1062
1063
1063
void CodeBuffer::print () {
1064
- if (this == NULL ) {
1065
- tty->print_cr (" NULL CodeBuffer pointer" );
1064
+ if (this == nullptr ) {
1065
+ tty->print_cr (" null CodeBuffer pointer" );
1066
1066
return ;
1067
1067
}
1068
1068
0 commit comments