mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2026-01-15 21:11:43 +01:00
Compare commits
31 Commits
batrdmi/tr
...
jbr-21+10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
861e302011 | ||
|
|
50dcc2aec5 | ||
|
|
28f5250fa5 | ||
|
|
0c9658446d | ||
|
|
26b111d714 | ||
|
|
52388179e6 | ||
|
|
11194e8b82 | ||
|
|
33bec20710 | ||
|
|
46bcc4901e | ||
|
|
a9a53f417d | ||
|
|
98a392c4fc | ||
|
|
9ccf8ad91f | ||
|
|
bdcbafb219 | ||
|
|
f1d76fa925 | ||
|
|
9c202a5a8f | ||
|
|
ca73f7e80f | ||
|
|
5b2d430131 | ||
|
|
f7dee77d73 | ||
|
|
ec901f28c3 | ||
|
|
8933c2d06a | ||
|
|
77519e5f4f | ||
|
|
2ef001e097 | ||
|
|
8c2c8b3f7f | ||
|
|
6d4b02b6c9 | ||
|
|
7dfe75cf55 | ||
|
|
66742c83d4 | ||
|
|
7c50ab1612 | ||
|
|
92474f13f0 | ||
|
|
ee5f6e156d | ||
|
|
7f71a1040d | ||
|
|
d782125c8f |
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -444,10 +444,9 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
__ b(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -351,8 +351,6 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
__ j(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#ifdef COMPILER1
|
||||
@@ -442,9 +440,9 @@ void ZBarrierSetAssembler::generate_c1_load_barrier_runtime_stub(StubAssembler*
|
||||
__ epilogue();
|
||||
}
|
||||
|
||||
#undef __
|
||||
#endif // COMPILER1
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -698,10 +698,9 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
__ jmp(*stub->continuation());
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
#endif // COMPILER2
|
||||
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
|
||||
|
||||
@@ -1642,7 +1642,6 @@ address StubGenerator::generate_base64_encodeBlock()
|
||||
// calculate length from offsets
|
||||
__ movl(length, end_offset);
|
||||
__ subl(length, start_offset);
|
||||
__ cmpl(length, 0);
|
||||
__ jcc(Assembler::lessEqual, L_exit);
|
||||
|
||||
// Code for 512-bit VBMI encoding. Encodes 48 input bytes into 64
|
||||
@@ -1685,8 +1684,7 @@ address StubGenerator::generate_base64_encodeBlock()
|
||||
}
|
||||
|
||||
__ BIND(L_not512);
|
||||
if (VM_Version::supports_avx2()
|
||||
&& VM_Version::supports_avx512vlbw()) {
|
||||
if (VM_Version::supports_avx2()) {
|
||||
/*
|
||||
** This AVX2 encoder is based off the paper at:
|
||||
** https://dl.acm.org/doi/10.1145/3132709
|
||||
@@ -1703,15 +1701,17 @@ address StubGenerator::generate_base64_encodeBlock()
|
||||
__ vmovdqu(xmm9, ExternalAddress(StubRoutines::x86::base64_avx2_shuffle_addr()), rax);
|
||||
// 6-bit mask for 2nd and 4th (and multiples) 6-bit values
|
||||
__ movl(rax, 0x0fc0fc00);
|
||||
__ movdl(xmm8, rax);
|
||||
__ vmovdqu(xmm1, ExternalAddress(StubRoutines::x86::base64_avx2_input_mask_addr()), rax);
|
||||
__ evpbroadcastd(xmm8, rax, Assembler::AVX_256bit);
|
||||
__ vpbroadcastd(xmm8, xmm8, Assembler::AVX_256bit);
|
||||
|
||||
// Multiplication constant for "shifting" right by 6 and 10
|
||||
// bits
|
||||
__ movl(rax, 0x04000040);
|
||||
|
||||
__ subl(length, 24);
|
||||
__ evpbroadcastd(xmm7, rax, Assembler::AVX_256bit);
|
||||
__ movdl(xmm7, rax);
|
||||
__ vpbroadcastd(xmm7, xmm7, Assembler::AVX_256bit);
|
||||
|
||||
// For the first load, we mask off reading of the first 4
|
||||
// bytes into the register. This is so we can get 4 3-byte
|
||||
@@ -1813,19 +1813,23 @@ address StubGenerator::generate_base64_encodeBlock()
|
||||
// Load masking register for first and third (and multiples)
|
||||
// 6-bit values.
|
||||
__ movl(rax, 0x003f03f0);
|
||||
__ evpbroadcastd(xmm6, rax, Assembler::AVX_256bit);
|
||||
__ movdl(xmm6, rax);
|
||||
__ vpbroadcastd(xmm6, xmm6, Assembler::AVX_256bit);
|
||||
// Multiplication constant for "shifting" left by 4 and 8 bits
|
||||
__ movl(rax, 0x01000010);
|
||||
__ evpbroadcastd(xmm5, rax, Assembler::AVX_256bit);
|
||||
__ movdl(xmm5, rax);
|
||||
__ vpbroadcastd(xmm5, xmm5, Assembler::AVX_256bit);
|
||||
|
||||
// Isolate 6-bit chunks of interest
|
||||
__ vpand(xmm0, xmm8, xmm1, Assembler::AVX_256bit);
|
||||
|
||||
// Load constants for encoding
|
||||
__ movl(rax, 0x19191919);
|
||||
__ evpbroadcastd(xmm3, rax, Assembler::AVX_256bit);
|
||||
__ movdl(xmm3, rax);
|
||||
__ vpbroadcastd(xmm3, xmm3, Assembler::AVX_256bit);
|
||||
__ movl(rax, 0x33333333);
|
||||
__ evpbroadcastd(xmm4, rax, Assembler::AVX_256bit);
|
||||
__ movdl(xmm4, rax);
|
||||
__ vpbroadcastd(xmm4, xmm4, Assembler::AVX_256bit);
|
||||
|
||||
// Shift output bytes 0 and 2 into proper lanes
|
||||
__ vpmulhuw(xmm2, xmm0, xmm7, Assembler::AVX_256bit);
|
||||
@@ -2133,6 +2137,80 @@ address StubGenerator::base64_vbmi_join_2_3_addr() {
|
||||
return start;
|
||||
}
|
||||
|
||||
address StubGenerator::base64_AVX2_decode_tables_addr() {
|
||||
__ align64();
|
||||
StubCodeMark mark(this, "StubRoutines", "AVX2_tables_base64");
|
||||
address start = __ pc();
|
||||
|
||||
assert(((unsigned long long)start & 0x3f) == 0,
|
||||
"Alignment problem (0x%08llx)", (unsigned long long)start);
|
||||
__ emit_data(0x2f2f2f2f, relocInfo::none, 0);
|
||||
__ emit_data(0x5f5f5f5f, relocInfo::none, 0); // for URL
|
||||
|
||||
__ emit_data(0xffffffff, relocInfo::none, 0);
|
||||
__ emit_data(0xfcfcfcfc, relocInfo::none, 0); // for URL
|
||||
|
||||
// Permute table
|
||||
__ emit_data64(0x0000000100000000, relocInfo::none);
|
||||
__ emit_data64(0x0000000400000002, relocInfo::none);
|
||||
__ emit_data64(0x0000000600000005, relocInfo::none);
|
||||
__ emit_data64(0xffffffffffffffff, relocInfo::none);
|
||||
|
||||
// Shuffle table
|
||||
__ emit_data64(0x090a040506000102, relocInfo::none);
|
||||
__ emit_data64(0xffffffff0c0d0e08, relocInfo::none);
|
||||
__ emit_data64(0x090a040506000102, relocInfo::none);
|
||||
__ emit_data64(0xffffffff0c0d0e08, relocInfo::none);
|
||||
|
||||
// merge table
|
||||
__ emit_data(0x01400140, relocInfo::none, 0);
|
||||
|
||||
// merge multiplier
|
||||
__ emit_data(0x00011000, relocInfo::none, 0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
address StubGenerator::base64_AVX2_decode_LUT_tables_addr() {
|
||||
__ align64();
|
||||
StubCodeMark mark(this, "StubRoutines", "AVX2_tables_URL_base64");
|
||||
address start = __ pc();
|
||||
|
||||
assert(((unsigned long long)start & 0x3f) == 0,
|
||||
"Alignment problem (0x%08llx)", (unsigned long long)start);
|
||||
// lut_lo
|
||||
__ emit_data64(0x1111111111111115, relocInfo::none);
|
||||
__ emit_data64(0x1a1b1b1b1a131111, relocInfo::none);
|
||||
__ emit_data64(0x1111111111111115, relocInfo::none);
|
||||
__ emit_data64(0x1a1b1b1b1a131111, relocInfo::none);
|
||||
|
||||
// lut_roll
|
||||
__ emit_data64(0xb9b9bfbf04131000, relocInfo::none);
|
||||
__ emit_data64(0x0000000000000000, relocInfo::none);
|
||||
__ emit_data64(0xb9b9bfbf04131000, relocInfo::none);
|
||||
__ emit_data64(0x0000000000000000, relocInfo::none);
|
||||
|
||||
// lut_lo URL
|
||||
__ emit_data64(0x1111111111111115, relocInfo::none);
|
||||
__ emit_data64(0x1b1b1a1b1b131111, relocInfo::none);
|
||||
__ emit_data64(0x1111111111111115, relocInfo::none);
|
||||
__ emit_data64(0x1b1b1a1b1b131111, relocInfo::none);
|
||||
|
||||
// lut_roll URL
|
||||
__ emit_data64(0xb9b9bfbf0411e000, relocInfo::none);
|
||||
__ emit_data64(0x0000000000000000, relocInfo::none);
|
||||
__ emit_data64(0xb9b9bfbf0411e000, relocInfo::none);
|
||||
__ emit_data64(0x0000000000000000, relocInfo::none);
|
||||
|
||||
// lut_hi
|
||||
__ emit_data64(0x0804080402011010, relocInfo::none);
|
||||
__ emit_data64(0x1010101010101010, relocInfo::none);
|
||||
__ emit_data64(0x0804080402011010, relocInfo::none);
|
||||
__ emit_data64(0x1010101010101010, relocInfo::none);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
address StubGenerator::base64_decoding_table_addr() {
|
||||
StubCodeMark mark(this, "StubRoutines", "decoding_table_base64");
|
||||
address start = __ pc();
|
||||
@@ -2289,7 +2367,7 @@ address StubGenerator::generate_base64_decodeBlock() {
|
||||
|
||||
Label L_process256, L_process64, L_process64Loop, L_exit, L_processdata, L_loadURL;
|
||||
Label L_continue, L_finalBit, L_padding, L_donePadding, L_bruteForce;
|
||||
Label L_forceLoop, L_bottomLoop, L_checkMIME, L_exit_no_vzero;
|
||||
Label L_forceLoop, L_bottomLoop, L_checkMIME, L_exit_no_vzero, L_lastChunk;
|
||||
|
||||
// calculate length from offsets
|
||||
__ movl(length, end_offset);
|
||||
@@ -2299,11 +2377,11 @@ address StubGenerator::generate_base64_decodeBlock() {
|
||||
// If AVX512 VBMI not supported, just compile non-AVX code
|
||||
if(VM_Version::supports_avx512_vbmi() &&
|
||||
VM_Version::supports_avx512bw()) {
|
||||
__ cmpl(length, 128); // 128-bytes is break-even for AVX-512
|
||||
__ jcc(Assembler::lessEqual, L_bruteForce);
|
||||
__ cmpl(length, 31); // 32-bytes is break-even for AVX-512
|
||||
__ jcc(Assembler::lessEqual, L_lastChunk);
|
||||
|
||||
__ cmpl(isMIME, 0);
|
||||
__ jcc(Assembler::notEqual, L_bruteForce);
|
||||
__ jcc(Assembler::notEqual, L_lastChunk);
|
||||
|
||||
// Load lookup tables based on isURL
|
||||
__ cmpl(isURL, 0);
|
||||
@@ -2554,6 +2632,89 @@ address StubGenerator::generate_base64_decodeBlock() {
|
||||
__ BIND(L_bruteForce);
|
||||
} // End of if(avx512_vbmi)
|
||||
|
||||
if (VM_Version::supports_avx2()) {
|
||||
Label L_tailProc, L_topLoop, L_enterLoop;
|
||||
|
||||
__ cmpl(isMIME, 0);
|
||||
__ jcc(Assembler::notEqual, L_lastChunk);
|
||||
|
||||
// Check for buffer too small (for algorithm)
|
||||
__ subl(length, 0x2c);
|
||||
__ jcc(Assembler::less, L_tailProc);
|
||||
|
||||
__ shll(isURL, 2);
|
||||
|
||||
// Algorithm adapted from https://arxiv.org/abs/1704.00605, "Faster Base64
|
||||
// Encoding and Decoding using AVX2 Instructions". URL modifications added.
|
||||
|
||||
// Set up constants
|
||||
__ lea(r13, ExternalAddress(StubRoutines::x86::base64_AVX2_decode_tables_addr()));
|
||||
__ vpbroadcastd(xmm4, Address(r13, isURL, Address::times_1), Assembler::AVX_256bit); // 2F or 5F
|
||||
__ vpbroadcastd(xmm10, Address(r13, isURL, Address::times_1, 0x08), Assembler::AVX_256bit); // -1 or -4
|
||||
__ vmovdqu(xmm12, Address(r13, 0x10)); // permute
|
||||
__ vmovdqu(xmm13, Address(r13, 0x30)); // shuffle
|
||||
__ vpbroadcastd(xmm7, Address(r13, 0x50), Assembler::AVX_256bit); // merge
|
||||
__ vpbroadcastd(xmm6, Address(r13, 0x54), Assembler::AVX_256bit); // merge mult
|
||||
|
||||
__ lea(r13, ExternalAddress(StubRoutines::x86::base64_AVX2_decode_LUT_tables_addr()));
|
||||
__ shll(isURL, 4);
|
||||
__ vmovdqu(xmm11, Address(r13, isURL, Address::times_1, 0x00)); // lut_lo
|
||||
__ vmovdqu(xmm8, Address(r13, isURL, Address::times_1, 0x20)); // lut_roll
|
||||
__ shrl(isURL, 6); // restore isURL
|
||||
__ vmovdqu(xmm9, Address(r13, 0x80)); // lut_hi
|
||||
__ jmp(L_enterLoop);
|
||||
|
||||
__ align32();
|
||||
__ bind(L_topLoop);
|
||||
// Add in the offset value (roll) to get 6-bit out values
|
||||
__ vpaddb(xmm0, xmm0, xmm2, Assembler::AVX_256bit);
|
||||
// Merge and permute the output bits into appropriate output byte lanes
|
||||
__ vpmaddubsw(xmm0, xmm0, xmm7, Assembler::AVX_256bit);
|
||||
__ vpmaddwd(xmm0, xmm0, xmm6, Assembler::AVX_256bit);
|
||||
__ vpshufb(xmm0, xmm0, xmm13, Assembler::AVX_256bit);
|
||||
__ vpermd(xmm0, xmm12, xmm0, Assembler::AVX_256bit);
|
||||
// Store the output bytes
|
||||
__ vmovdqu(Address(dest, dp, Address::times_1, 0), xmm0);
|
||||
__ addptr(source, 0x20);
|
||||
__ addptr(dest, 0x18);
|
||||
__ subl(length, 0x20);
|
||||
__ jcc(Assembler::less, L_tailProc);
|
||||
|
||||
__ bind(L_enterLoop);
|
||||
|
||||
// Load in encoded string (32 bytes)
|
||||
__ vmovdqu(xmm2, Address(source, start_offset, Address::times_1, 0x0));
|
||||
// Extract the high nibble for indexing into the lut tables. High 4 bits are don't care.
|
||||
__ vpsrld(xmm1, xmm2, 0x4, Assembler::AVX_256bit);
|
||||
__ vpand(xmm1, xmm4, xmm1, Assembler::AVX_256bit);
|
||||
// Extract the low nibble. 5F/2F will isolate the low-order 4 bits. High 4 bits are don't care.
|
||||
__ vpand(xmm3, xmm2, xmm4, Assembler::AVX_256bit);
|
||||
// Check for special-case (0x2F or 0x5F (URL))
|
||||
__ vpcmpeqb(xmm0, xmm4, xmm2, Assembler::AVX_256bit);
|
||||
// Get the bitset based on the low nibble. vpshufb uses low-order 4 bits only.
|
||||
__ vpshufb(xmm3, xmm11, xmm3, Assembler::AVX_256bit);
|
||||
// Get the bit value of the high nibble
|
||||
__ vpshufb(xmm5, xmm9, xmm1, Assembler::AVX_256bit);
|
||||
// Make sure 2F / 5F shows as valid
|
||||
__ vpandn(xmm3, xmm0, xmm3, Assembler::AVX_256bit);
|
||||
// Make adjustment for roll index. For non-URL, this is a no-op,
|
||||
// for URL, this adjusts by -4. This is to properly index the
|
||||
// roll value for 2F / 5F.
|
||||
__ vpand(xmm0, xmm0, xmm10, Assembler::AVX_256bit);
|
||||
// If the and of the two is non-zero, we have an invalid input character
|
||||
__ vptest(xmm3, xmm5);
|
||||
// Extract the "roll" value - value to add to the input to get 6-bit out value
|
||||
__ vpaddb(xmm0, xmm0, xmm1, Assembler::AVX_256bit); // Handle 2F / 5F
|
||||
__ vpshufb(xmm0, xmm8, xmm0, Assembler::AVX_256bit);
|
||||
__ jcc(Assembler::equal, L_topLoop); // Fall through on error
|
||||
|
||||
__ bind(L_tailProc);
|
||||
|
||||
__ addl(length, 0x2c);
|
||||
|
||||
__ vzeroupper();
|
||||
}
|
||||
|
||||
// Use non-AVX code to decode 4-byte chunks into 3 bytes of output
|
||||
|
||||
// Register state (Linux):
|
||||
@@ -2584,6 +2745,8 @@ address StubGenerator::generate_base64_decodeBlock() {
|
||||
const Register byte3 = WIN64_ONLY(r8) NOT_WIN64(rdx);
|
||||
const Register byte4 = WIN64_ONLY(r10) NOT_WIN64(r9);
|
||||
|
||||
__ bind(L_lastChunk);
|
||||
|
||||
__ shrl(length, 2); // Multiple of 4 bytes only - length is # 4-byte chunks
|
||||
__ cmpl(length, 0);
|
||||
__ jcc(Assembler::lessEqual, L_exit_no_vzero);
|
||||
@@ -3829,12 +3992,12 @@ void StubGenerator::generate_all() {
|
||||
}
|
||||
|
||||
if (UseBASE64Intrinsics) {
|
||||
if(VM_Version::supports_avx2() &&
|
||||
VM_Version::supports_avx512bw() &&
|
||||
VM_Version::supports_avx512vl()) {
|
||||
if(VM_Version::supports_avx2()) {
|
||||
StubRoutines::x86::_avx2_shuffle_base64 = base64_avx2_shuffle_addr();
|
||||
StubRoutines::x86::_avx2_input_mask_base64 = base64_avx2_input_mask_addr();
|
||||
StubRoutines::x86::_avx2_lut_base64 = base64_avx2_lut_addr();
|
||||
StubRoutines::x86::_avx2_decode_tables_base64 = base64_AVX2_decode_tables_addr();
|
||||
StubRoutines::x86::_avx2_decode_lut_tables_base64 = base64_AVX2_decode_LUT_tables_addr();
|
||||
}
|
||||
StubRoutines::x86::_encoding_table_base64 = base64_encoding_table_addr();
|
||||
if (VM_Version::supports_avx512_vbmi()) {
|
||||
|
||||
@@ -441,6 +441,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address base64_vbmi_join_1_2_addr();
|
||||
address base64_vbmi_join_2_3_addr();
|
||||
address base64_decoding_table_addr();
|
||||
address base64_AVX2_decode_tables_addr();
|
||||
address base64_AVX2_decode_LUT_tables_addr();
|
||||
|
||||
// Code for generating Base64 decoding.
|
||||
//
|
||||
|
||||
@@ -71,6 +71,8 @@ address StubRoutines::x86::_shuffle_base64 = NULL;
|
||||
address StubRoutines::x86::_avx2_shuffle_base64 = NULL;
|
||||
address StubRoutines::x86::_avx2_input_mask_base64 = NULL;
|
||||
address StubRoutines::x86::_avx2_lut_base64 = NULL;
|
||||
address StubRoutines::x86::_avx2_decode_tables_base64 = NULL;
|
||||
address StubRoutines::x86::_avx2_decode_lut_tables_base64 = NULL;
|
||||
address StubRoutines::x86::_lookup_lo_base64 = NULL;
|
||||
address StubRoutines::x86::_lookup_hi_base64 = NULL;
|
||||
address StubRoutines::x86::_lookup_lo_base64url = NULL;
|
||||
|
||||
@@ -185,6 +185,8 @@ class x86 {
|
||||
static address _avx2_shuffle_base64;
|
||||
static address _avx2_input_mask_base64;
|
||||
static address _avx2_lut_base64;
|
||||
static address _avx2_decode_tables_base64;
|
||||
static address _avx2_decode_lut_tables_base64;
|
||||
static address _lookup_lo_base64;
|
||||
static address _lookup_hi_base64;
|
||||
static address _lookup_lo_base64url;
|
||||
@@ -325,6 +327,8 @@ class x86 {
|
||||
static address base64_vbmi_join_1_2_addr() { return _join_1_2_base64; }
|
||||
static address base64_vbmi_join_2_3_addr() { return _join_2_3_base64; }
|
||||
static address base64_decoding_table_addr() { return _decoding_table_base64; }
|
||||
static address base64_AVX2_decode_tables_addr() { return _avx2_decode_tables_base64; }
|
||||
static address base64_AVX2_decode_LUT_tables_addr() { return _avx2_decode_lut_tables_base64; }
|
||||
#endif
|
||||
static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
|
||||
static address arrays_hashcode_powers_of_31() { return (address)_arrays_hashcode_powers_of_31; }
|
||||
|
||||
@@ -1140,7 +1140,7 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
|
||||
// Base64 Intrinsics (Check the condition for which the intrinsic will be active)
|
||||
if ((UseAVX > 2) && supports_avx512vl() && supports_avx512bw()) {
|
||||
if (UseAVX >= 2) {
|
||||
if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) {
|
||||
UseBASE64Intrinsics = true;
|
||||
}
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "runtime/safefetch.hpp"
|
||||
#include "sanitizers/address.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@@ -63,7 +64,7 @@ bool handle_safefetch(int sig, address ignored1, void* ignored2) {
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static bool _SafeFetchXX_internal(const T *adr, T* result) {
|
||||
ATTRIBUTE_NO_ASAN static bool _SafeFetchXX_internal(const T *adr, T* result) {
|
||||
|
||||
T n = 0;
|
||||
|
||||
|
||||
@@ -26,12 +26,13 @@
|
||||
#ifndef OS_WINDOWS_SAFEFETCH_WINDOWS_HPP
|
||||
#define OS_WINDOWS_SAFEFETCH_WINDOWS_HPP
|
||||
|
||||
#include "sanitizers/address.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// On windows, we use structured exception handling to implement SafeFetch
|
||||
|
||||
template <class T>
|
||||
inline T SafeFetchXX(const T* adr, T errValue) {
|
||||
ATTRIBUTE_NO_ASAN inline T SafeFetchXX(const T* adr, T errValue) {
|
||||
T v = 0;
|
||||
__try {
|
||||
v = *adr;
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cppVtables.hpp"
|
||||
#include "cds/dumpAllocStats.hpp"
|
||||
@@ -830,14 +831,11 @@ uintx ArchiveBuilder::any_to_offset(address p) const {
|
||||
return buffer_to_offset(p);
|
||||
}
|
||||
|
||||
// Update a Java object to point its Klass* to the address whene
|
||||
// the class would be mapped at runtime.
|
||||
void ArchiveBuilder::relocate_klass_ptr_of_oop(oop o) {
|
||||
narrowKlass ArchiveBuilder::get_requested_narrow_klass(Klass* k) {
|
||||
assert(DumpSharedSpaces, "sanity");
|
||||
Klass* k = get_buffered_klass(o->klass());
|
||||
k = get_buffered_klass(k);
|
||||
Klass* requested_k = to_requested(k);
|
||||
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
|
||||
o->set_narrow_klass(nk);
|
||||
return CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
|
||||
}
|
||||
|
||||
// RelocateBufferToRequested --- Relocate all the pointers in rw/ro,
|
||||
@@ -1062,19 +1060,18 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||
|
||||
while (start < end) {
|
||||
size_t byte_size;
|
||||
oop archived_oop = cast_to_oop(start);
|
||||
oop original_oop = HeapShared::get_original_object(archived_oop);
|
||||
oop original_oop = ArchiveHeapWriter::buffered_addr_to_source_obj(start);
|
||||
if (original_oop != nullptr) {
|
||||
ResourceMark rm;
|
||||
log_info(cds, map)(PTR_FORMAT ": @@ Object %s",
|
||||
p2i(to_requested(start)), original_oop->klass()->external_name());
|
||||
byte_size = original_oop->size() * BytesPerWord;
|
||||
} else if (archived_oop == HeapShared::roots()) {
|
||||
} else if (start == ArchiveHeapWriter::buffered_heap_roots_addr()) {
|
||||
// HeapShared::roots() is copied specially so it doesn't exist in
|
||||
// HeapShared::OriginalObjectTable. See HeapShared::copy_roots().
|
||||
log_info(cds, map)(PTR_FORMAT ": @@ Object HeapShared::roots (ObjArray)",
|
||||
p2i(to_requested(start)));
|
||||
byte_size = objArrayOopDesc::object_size(HeapShared::roots()->length()) * BytesPerWord;
|
||||
byte_size = ArchiveHeapWriter::heap_roots_word_size() * BytesPerWord;
|
||||
} else {
|
||||
// We have reached the end of the region
|
||||
break;
|
||||
@@ -1091,7 +1088,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||
}
|
||||
}
|
||||
static address to_requested(address p) {
|
||||
return HeapShared::to_requested_address(p);
|
||||
return ArchiveHeapWriter::buffered_addr_to_requested_addr(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
@@ -443,7 +443,7 @@ public:
|
||||
return alloc_stats()->string_stats();
|
||||
}
|
||||
|
||||
void relocate_klass_ptr_of_oop(oop o);
|
||||
narrowKlass get_requested_narrow_klass(Klass* k);
|
||||
|
||||
static Klass* get_buffered_klass(Klass* src_klass) {
|
||||
Klass* klass = (Klass*)current()->get_buffered_addr((address)src_klass);
|
||||
|
||||
657
src/hotspot/share/cds/archiveHeapWriter.cpp
Normal file
657
src/hotspot/share/cds/archiveHeapWriter.cpp
Normal file
@@ -0,0 +1,657 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/filemap.hpp"
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
|
||||
GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer;
|
||||
|
||||
// The following are offsets from buffer_bottom()
|
||||
size_t ArchiveHeapWriter::_buffer_top;
|
||||
size_t ArchiveHeapWriter::_open_bottom;
|
||||
size_t ArchiveHeapWriter::_open_top;
|
||||
size_t ArchiveHeapWriter::_closed_bottom;
|
||||
size_t ArchiveHeapWriter::_closed_top;
|
||||
size_t ArchiveHeapWriter::_heap_roots_bottom;
|
||||
|
||||
size_t ArchiveHeapWriter::_heap_roots_word_size;
|
||||
|
||||
address ArchiveHeapWriter::_requested_open_region_bottom;
|
||||
address ArchiveHeapWriter::_requested_open_region_top;
|
||||
address ArchiveHeapWriter::_requested_closed_region_bottom;
|
||||
address ArchiveHeapWriter::_requested_closed_region_top;
|
||||
|
||||
ResourceBitMap* ArchiveHeapWriter::_closed_oopmap;
|
||||
ResourceBitMap* ArchiveHeapWriter::_open_oopmap;
|
||||
|
||||
GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
|
||||
GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
|
||||
|
||||
ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
|
||||
ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
|
||||
|
||||
void ArchiveHeapWriter::init() {
|
||||
if (HeapShared::can_write()) {
|
||||
Universe::heap()->collect(GCCause::_java_lang_system_gc);
|
||||
|
||||
_buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable();
|
||||
|
||||
_requested_open_region_bottom = nullptr;
|
||||
_requested_open_region_top = nullptr;
|
||||
_requested_closed_region_bottom = nullptr;
|
||||
_requested_closed_region_top = nullptr;
|
||||
|
||||
_native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
|
||||
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
|
||||
|
||||
guarantee(UseG1GC, "implementation limitation");
|
||||
guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::add_source_obj(oop src_obj) {
|
||||
_source_objs->append(src_obj);
|
||||
}
|
||||
|
||||
// For the time being, always support two regions (to be strictly compatible with existing G1
|
||||
// mapping code. We might eventually use a single region (JDK-8298048).
|
||||
void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
GrowableArray<MemRegion>* closed_regions, GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
|
||||
assert(HeapShared::can_write(), "sanity");
|
||||
allocate_buffer();
|
||||
copy_source_objs_to_buffer(roots);
|
||||
set_requested_address_for_regions(closed_regions, open_regions);
|
||||
relocate_embedded_oops(roots, closed_bitmaps, open_bitmaps);
|
||||
}
|
||||
|
||||
bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
|
||||
return is_too_large_to_archive(o->size());
|
||||
}
|
||||
|
||||
bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
|
||||
typeArrayOop value = java_lang_String::value_no_keepalive(string);
|
||||
return is_too_large_to_archive(value);
|
||||
}
|
||||
|
||||
bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
|
||||
assert(size > 0, "no zero-size object");
|
||||
assert(size * HeapWordSize > size, "no overflow");
|
||||
static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
|
||||
|
||||
size_t byte_size = size * HeapWordSize;
|
||||
if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Various lookup functions between source_obj, buffered_obj and requested_obj
|
||||
bool ArchiveHeapWriter::is_in_requested_regions(oop o) {
|
||||
assert(_requested_open_region_bottom != nullptr, "do not call before this is initialized");
|
||||
assert(_requested_closed_region_bottom != nullptr, "do not call before this is initialized");
|
||||
|
||||
address a = cast_from_oop<address>(o);
|
||||
return (_requested_open_region_bottom <= a && a < _requested_open_region_top) ||
|
||||
(_requested_closed_region_bottom <= a && a < _requested_closed_region_top);
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
|
||||
oop req_obj = cast_to_oop(_requested_open_region_bottom + offset);
|
||||
assert(is_in_requested_regions(req_obj), "must be");
|
||||
return req_obj;
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
|
||||
if (p != nullptr) {
|
||||
return requested_obj_from_buffer_offset(p->buffer_offset());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
|
||||
oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
|
||||
if (p != nullptr) {
|
||||
return *p;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
|
||||
return _requested_open_region_bottom + buffered_address_to_offset(buffered_addr);
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::heap_roots_requested_address() {
|
||||
return requested_obj_from_buffer_offset(_heap_roots_bottom);
|
||||
}
|
||||
|
||||
address ArchiveHeapWriter::heap_region_requested_bottom(int heap_region_idx) {
|
||||
assert(_buffer != nullptr, "must be initialized");
|
||||
switch (heap_region_idx) {
|
||||
case MetaspaceShared::first_closed_heap_region:
|
||||
return _requested_closed_region_bottom;
|
||||
case MetaspaceShared::first_open_heap_region:
|
||||
return _requested_open_region_bottom;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::allocate_buffer() {
|
||||
int initial_buffer_size = 100000;
|
||||
_buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
|
||||
_open_bottom = _buffer_top = 0;
|
||||
ensure_buffer_space(1); // so that buffer_bottom() works
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
|
||||
// We usually have very small heaps. If we get a huge one it's probably caused by a bug.
|
||||
guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
|
||||
_buffer->at_grow(to_array_index(min_bytes));
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
|
||||
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
|
||||
int length = roots != nullptr ? roots->length() : 0;
|
||||
_heap_roots_word_size = objArrayOopDesc::object_size(length);
|
||||
size_t byte_size = _heap_roots_word_size * HeapWordSize;
|
||||
if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
|
||||
log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
|
||||
vm_exit(1);
|
||||
}
|
||||
|
||||
maybe_fill_gc_region_gap(byte_size);
|
||||
|
||||
size_t new_top = _buffer_top + byte_size;
|
||||
ensure_buffer_space(new_top);
|
||||
|
||||
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
|
||||
memset(mem, 0, byte_size);
|
||||
{
|
||||
// This is copied from MemAllocator::finish
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
oopDesc::release_set_klass(mem, k);
|
||||
}
|
||||
{
|
||||
// This is copied from ObjArrayAllocator::initialize
|
||||
arrayOopDesc::set_length(mem, length);
|
||||
}
|
||||
|
||||
objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
|
||||
for (int i = 0; i < length; i++) {
|
||||
// Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
|
||||
oop o = roots->at(i);
|
||||
if (UseCompressedOops) {
|
||||
* arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
|
||||
} else {
|
||||
* arrayOop->obj_at_addr<oop>(i) = o;
|
||||
}
|
||||
}
|
||||
log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
|
||||
|
||||
_heap_roots_bottom = _buffer_top;
|
||||
_buffer_top = new_top;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
|
||||
copy_source_objs_to_buffer_by_region(/*copy_open_region=*/true);
|
||||
copy_roots_to_buffer(roots);
|
||||
_open_top = _buffer_top;
|
||||
|
||||
// Align the closed region to the next G1 region
|
||||
_buffer_top = _closed_bottom = align_up(_buffer_top, HeapRegion::GrainBytes);
|
||||
copy_source_objs_to_buffer_by_region(/*copy_open_region=*/false);
|
||||
_closed_top = _buffer_top;
|
||||
|
||||
log_info(cds, heap)("Size of open region = " SIZE_FORMAT " bytes", _open_top - _open_bottom);
|
||||
log_info(cds, heap)("Size of closed region = " SIZE_FORMAT " bytes", _closed_top - _closed_bottom);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::copy_source_objs_to_buffer_by_region(bool copy_open_region) {
|
||||
for (int i = 0; i < _source_objs->length(); i++) {
|
||||
oop src_obj = _source_objs->at(i);
|
||||
HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
|
||||
assert(info != nullptr, "must be");
|
||||
if (info->in_open_region() == copy_open_region) {
|
||||
// For region-based collectors such as G1, we need to make sure that we don't have
|
||||
// an object that can possible span across two regions.
|
||||
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
|
||||
info->set_buffer_offset(buffer_offset);
|
||||
|
||||
_buffer_offset_to_source_obj_table->put(buffer_offset, src_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
|
||||
size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
|
||||
return byte_size;
|
||||
}
|
||||
|
||||
int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
|
||||
assert(is_object_aligned(fill_bytes), "must be");
|
||||
size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
|
||||
|
||||
int initial_length = to_array_length(fill_bytes / elemSize);
|
||||
for (int length = initial_length; length >= 0; length --) {
|
||||
size_t array_byte_size = filler_array_byte_size(length);
|
||||
if (array_byte_size == fill_bytes) {
|
||||
return length;
|
||||
}
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
|
||||
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
|
||||
Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
|
||||
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
|
||||
memset(mem, 0, fill_bytes);
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
|
||||
cast_to_oop(mem)->set_narrow_klass(nk);
|
||||
arrayOopDesc::set_length(mem, array_length);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
|
||||
// We fill only with arrays (so we don't need to use a single HeapWord filler if the
|
||||
// leftover space is smaller than a zero-sized array object). Therefore, we need to
|
||||
// make sure there's enough space of min_filler_byte_size in the current region after
|
||||
// required_byte_size has been allocated. If not, fill the remainder of the current
|
||||
// region.
|
||||
size_t min_filler_byte_size = filler_array_byte_size(0);
|
||||
size_t new_top = _buffer_top + required_byte_size + min_filler_byte_size;
|
||||
|
||||
const size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
|
||||
const size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
|
||||
|
||||
if (cur_min_region_bottom != next_min_region_bottom) {
|
||||
// Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
|
||||
// we can map the region in any region-based collector.
|
||||
assert(next_min_region_bottom > cur_min_region_bottom, "must be");
|
||||
assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
|
||||
"no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT);
|
||||
|
||||
const size_t filler_end = next_min_region_bottom;
|
||||
const size_t fill_bytes = filler_end - _buffer_top;
|
||||
assert(fill_bytes > 0, "must be");
|
||||
ensure_buffer_space(filler_end);
|
||||
|
||||
int array_length = filler_array_length(fill_bytes);
|
||||
log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT,
|
||||
array_length, fill_bytes, _buffer_top);
|
||||
init_filler_array_at_buffer_top(array_length, fill_bytes);
|
||||
|
||||
_buffer_top = filler_end;
|
||||
}
|
||||
}
|
||||
|
||||
size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
|
||||
assert(!is_too_large_to_archive(src_obj), "already checked");
|
||||
size_t byte_size = src_obj->size() * HeapWordSize;
|
||||
assert(byte_size > 0, "no zero-size objects");
|
||||
|
||||
maybe_fill_gc_region_gap(byte_size);
|
||||
|
||||
size_t new_top = _buffer_top + byte_size;
|
||||
assert(new_top > _buffer_top, "no wrap around");
|
||||
|
||||
size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
|
||||
size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
|
||||
assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
|
||||
|
||||
ensure_buffer_space(new_top);
|
||||
|
||||
address from = cast_from_oop<address>(src_obj);
|
||||
address to = offset_to_buffered_address<address>(_buffer_top);
|
||||
assert(is_object_aligned(_buffer_top), "sanity");
|
||||
assert(is_object_aligned(byte_size), "sanity");
|
||||
memcpy(to, from, byte_size);
|
||||
|
||||
size_t buffered_obj_offset = _buffer_top;
|
||||
_buffer_top = new_top;
|
||||
|
||||
return buffered_obj_offset;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::set_requested_address_for_regions(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions) {
|
||||
assert(closed_regions->length() == 0, "must be");
|
||||
assert(open_regions->length() == 0, "must be");
|
||||
|
||||
assert(UseG1GC, "must be");
|
||||
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
|
||||
log_info(cds, heap)("Heap end = %p", heap_end);
|
||||
|
||||
size_t closed_region_byte_size = _closed_top - _closed_bottom;
|
||||
size_t open_region_byte_size = _open_top - _open_bottom;
|
||||
assert(closed_region_byte_size > 0, "must archived at least one object for closed region!");
|
||||
assert(open_region_byte_size > 0, "must archived at least one object for open region!");
|
||||
|
||||
// The following two asserts are ensured by copy_source_objs_to_buffer_by_region().
|
||||
assert(is_aligned(_closed_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
assert(is_aligned(_open_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
|
||||
_requested_closed_region_bottom = align_down(heap_end - closed_region_byte_size, HeapRegion::GrainBytes);
|
||||
_requested_open_region_bottom = _requested_closed_region_bottom - (_closed_bottom - _open_bottom);
|
||||
|
||||
assert(is_aligned(_requested_closed_region_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
assert(is_aligned(_requested_open_region_bottom, HeapRegion::GrainBytes), "sanity");
|
||||
|
||||
_requested_open_region_top = _requested_open_region_bottom + (_open_top - _open_bottom);
|
||||
_requested_closed_region_top = _requested_closed_region_bottom + (_closed_top - _closed_bottom);
|
||||
|
||||
assert(_requested_open_region_top <= _requested_closed_region_bottom, "no overlap");
|
||||
|
||||
closed_regions->append(MemRegion(offset_to_buffered_address<HeapWord*>(_closed_bottom),
|
||||
offset_to_buffered_address<HeapWord*>(_closed_top)));
|
||||
open_regions->append( MemRegion(offset_to_buffered_address<HeapWord*>(_open_bottom),
|
||||
offset_to_buffered_address<HeapWord*>(_open_top)));
|
||||
}
|
||||
|
||||
// Oop relocation
|
||||
|
||||
template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) {
|
||||
assert(is_in_requested_regions(cast_to_oop(p)), "must be");
|
||||
|
||||
address addr = address(p);
|
||||
assert(addr >= _requested_open_region_bottom, "must be");
|
||||
size_t offset = addr - _requested_open_region_bottom;
|
||||
return offset_to_buffered_address<T*>(offset);
|
||||
}
|
||||
|
||||
template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
|
||||
oop o = load_oop_from_buffer(buffered_addr);
|
||||
assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
|
||||
return o;
|
||||
}
|
||||
|
||||
template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
|
||||
oop request_oop) {
|
||||
assert(is_in_requested_regions(request_oop), "must be");
|
||||
store_oop_in_buffer(buffered_addr, request_oop);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
|
||||
// Make heap content deterministic. See comments inside HeapShared::to_requested_address.
|
||||
*buffered_addr = HeapShared::to_requested_address(requested_obj);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
|
||||
// Note: HeapShared::to_requested_address() is not necessary because
|
||||
// the heap always starts at a deterministic address with UseCompressedOops==true.
|
||||
narrowOop val = CompressedOops::encode_not_null(requested_obj);
|
||||
*buffered_addr = val;
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
|
||||
return *buffered_addr;
|
||||
}
|
||||
|
||||
oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
|
||||
return CompressedOops::decode(*buffered_addr);
|
||||
}
|
||||
|
||||
template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer) {
|
||||
oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
|
||||
if (!CompressedOops::is_null(source_referent)) {
|
||||
oop request_referent = source_obj_to_requested_obj(source_referent);
|
||||
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
|
||||
mark_oop_pointer<T>(field_addr_in_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr) {
|
||||
T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
|
||||
ResourceBitMap* oopmap;
|
||||
address requested_region_bottom;
|
||||
|
||||
if (request_p >= (T*)_requested_closed_region_bottom) {
|
||||
assert(request_p < (T*)_requested_closed_region_top, "sanity");
|
||||
oopmap = _closed_oopmap;
|
||||
requested_region_bottom = _requested_closed_region_bottom;
|
||||
} else {
|
||||
assert(request_p >= (T*)_requested_open_region_bottom, "sanity");
|
||||
assert(request_p < (T*)_requested_open_region_top, "sanity");
|
||||
oopmap = _open_oopmap;
|
||||
requested_region_bottom = _requested_open_region_bottom;
|
||||
}
|
||||
|
||||
// Mark the pointer in the oopmap
|
||||
T* region_bottom = (T*)requested_region_bottom;
|
||||
assert(request_p >= region_bottom, "must be");
|
||||
BitMap::idx_t idx = request_p - region_bottom;
|
||||
assert(idx < oopmap->size(), "overflow");
|
||||
oopmap->set_bit(idx);
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
|
||||
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
|
||||
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
|
||||
address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
|
||||
|
||||
oop fake_oop = cast_to_oop(buffered_addr);
|
||||
fake_oop->set_narrow_klass(nk);
|
||||
|
||||
// We need to retain the identity_hash, because it may have been used by some hashtables
|
||||
// in the shared heap. This also has the side effect of pre-initializing the
|
||||
// identity_hash for all shared objects, so they are less likely to be written
|
||||
// into during run time, increasing the potential of memory sharing.
|
||||
if (src_obj != nullptr) {
|
||||
int src_hash = src_obj->identity_hash();
|
||||
fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
|
||||
assert(fake_oop->mark().is_unlocked(), "sanity");
|
||||
|
||||
DEBUG_ONLY(int archived_hash = fake_oop->identity_hash());
|
||||
assert(src_hash == archived_hash, "Different hash codes: original %x, archived %x", src_hash, archived_hash);
|
||||
}
|
||||
}
|
||||
|
||||
// Relocate an element in the buffered copy of HeapShared::roots()
|
||||
template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index) {
|
||||
size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
|
||||
relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset));
|
||||
}
|
||||
|
||||
class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
|
||||
oop _src_obj;
|
||||
address _buffered_obj;
|
||||
|
||||
public:
|
||||
EmbeddedOopRelocator(oop src_obj, address buffered_obj) :
|
||||
_src_obj(src_obj), _buffered_obj(buffered_obj) {}
|
||||
|
||||
void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
|
||||
void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
|
||||
|
||||
private:
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
|
||||
ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset));
|
||||
}
|
||||
};
|
||||
|
||||
// Update all oop fields embedded in the buffered objects
|
||||
void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
|
||||
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
|
||||
size_t closed_region_byte_size = _closed_top - _closed_bottom;
|
||||
size_t open_region_byte_size = _open_top - _open_bottom;
|
||||
ResourceBitMap closed_oopmap(closed_region_byte_size / oopmap_unit);
|
||||
ResourceBitMap open_oopmap (open_region_byte_size / oopmap_unit);
|
||||
|
||||
_closed_oopmap = &closed_oopmap;
|
||||
_open_oopmap = &open_oopmap;
|
||||
|
||||
auto iterator = [&] (oop src_obj, HeapShared::CachedOopInfo& info) {
|
||||
oop requested_obj = requested_obj_from_buffer_offset(info.buffer_offset());
|
||||
update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
|
||||
|
||||
address buffered_obj = offset_to_buffered_address<address>(info.buffer_offset());
|
||||
EmbeddedOopRelocator relocator(src_obj, buffered_obj);
|
||||
|
||||
src_obj->oop_iterate(&relocator);
|
||||
};
|
||||
HeapShared::archived_object_cache()->iterate_all(iterator);
|
||||
|
||||
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
|
||||
// doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
|
||||
oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom);
|
||||
update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlassObj());
|
||||
int length = roots != nullptr ? roots->length() : 0;
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (UseCompressedOops) {
|
||||
relocate_root_at<narrowOop>(requested_roots, i);
|
||||
} else {
|
||||
relocate_root_at<oop>(requested_roots, i);
|
||||
}
|
||||
}
|
||||
|
||||
closed_bitmaps->append(make_bitmap_info(&closed_oopmap, /*is_open=*/false, /*is_oopmap=*/true));
|
||||
open_bitmaps ->append(make_bitmap_info(&open_oopmap, /*is_open=*/false, /*is_oopmap=*/true));
|
||||
|
||||
closed_bitmaps->append(compute_ptrmap(/*is_open=*/false));
|
||||
open_bitmaps ->append(compute_ptrmap(/*is_open=*/true));
|
||||
|
||||
_closed_oopmap = nullptr;
|
||||
_open_oopmap = nullptr;
|
||||
}
|
||||
|
||||
ArchiveHeapBitmapInfo ArchiveHeapWriter::make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap) {
|
||||
size_t size_in_bits = bitmap->size();
|
||||
size_t size_in_bytes;
|
||||
uintptr_t* buffer;
|
||||
|
||||
if (size_in_bits > 0) {
|
||||
size_in_bytes = bitmap->size_in_bytes();
|
||||
buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
|
||||
bitmap->write_to(buffer, size_in_bytes);
|
||||
} else {
|
||||
size_in_bytes = 0;
|
||||
buffer = nullptr;
|
||||
}
|
||||
|
||||
log_info(cds, heap)("%s @ " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for %s heap region",
|
||||
is_oopmap ? "Oopmap" : "Ptrmap",
|
||||
p2i(buffer), size_in_bytes,
|
||||
is_open? "open" : "closed");
|
||||
|
||||
ArchiveHeapBitmapInfo info;
|
||||
info._map = (address)buffer;
|
||||
info._size_in_bits = size_in_bits;
|
||||
info._size_in_bytes = size_in_bytes;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
|
||||
Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
|
||||
if (ptr != nullptr) {
|
||||
NativePointerInfo info;
|
||||
info._src_obj = src_obj;
|
||||
info._field_offset = field_offset;
|
||||
_native_pointers->append(info);
|
||||
}
|
||||
}
|
||||
|
||||
ArchiveHeapBitmapInfo ArchiveHeapWriter::compute_ptrmap(bool is_open) {
|
||||
int num_non_null_ptrs = 0;
|
||||
Metadata** bottom = (Metadata**) (is_open ? _requested_open_region_bottom: _requested_closed_region_bottom);
|
||||
Metadata** top = (Metadata**) (is_open ? _requested_open_region_top: _requested_closed_region_top); // exclusive
|
||||
ResourceBitMap ptrmap(top - bottom);
|
||||
|
||||
for (int i = 0; i < _native_pointers->length(); i++) {
|
||||
NativePointerInfo info = _native_pointers->at(i);
|
||||
oop src_obj = info._src_obj;
|
||||
int field_offset = info._field_offset;
|
||||
HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
|
||||
if (p->in_open_region() == is_open) {
|
||||
// requested_field_addr = the address of this field in the requested space
|
||||
oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
|
||||
Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
|
||||
assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
|
||||
|
||||
// Mark this field in the bitmap
|
||||
BitMap::idx_t idx = requested_field_addr - bottom;
|
||||
ptrmap.set_bit(idx);
|
||||
num_non_null_ptrs ++;
|
||||
|
||||
// Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
|
||||
// this address if the RO/RW regions are mapped at the default location).
|
||||
|
||||
Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
|
||||
Metadata* native_ptr = *buffered_field_addr;
|
||||
assert(native_ptr != nullptr, "sanity");
|
||||
|
||||
address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
|
||||
address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
|
||||
*buffered_field_addr = (Metadata*)requested_native_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
log_info(cds, heap)("compute_ptrmap: marked %d non-null native pointers for %s heap region",
|
||||
num_non_null_ptrs, is_open ? "open" : "closed");
|
||||
|
||||
if (num_non_null_ptrs == 0) {
|
||||
ResourceBitMap empty;
|
||||
return make_bitmap_info(&empty, is_open, /*is_oopmap=*/ false);
|
||||
} else {
|
||||
return make_bitmap_info(&ptrmap, is_open, /*is_oopmap=*/ false);
|
||||
}
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
202
src/hotspot/share/cds/archiveHeapWriter.hpp
Normal file
202
src/hotspot/share/cds/archiveHeapWriter.hpp
Normal file
@@ -0,0 +1,202 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_CDS_ARCHIVEHEAPWRITER_HPP
|
||||
#define SHARE_CDS_ARCHIVEHEAPWRITER_HPP
|
||||
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
struct ArchiveHeapBitmapInfo;
|
||||
class MemRegion;
|
||||
|
||||
class ArchiveHeapWriter : AllStatic {
|
||||
class EmbeddedOopRelocator;
|
||||
struct NativePointerInfo {
|
||||
oop _src_obj;
|
||||
int _field_offset;
|
||||
};
|
||||
|
||||
// The minimum region size of all collectors that are supported by CDS in
|
||||
// ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
|
||||
// depends on -Xmx, but can never be smaller than 1 * M.
|
||||
// (TODO: Perhaps change to 256K to be compatible with Shenandoah)
|
||||
static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
|
||||
|
||||
// "source" vs "buffered" vs "requested"
|
||||
//
|
||||
// [1] HeapShared::archive_objects() identifies all of the oops that need to be stored
|
||||
// into the CDS archive. These are entered into HeapShared::archived_object_cache().
|
||||
// These are called "source objects"
|
||||
//
|
||||
// [2] ArchiveHeapWriter::write() copies all source objects into ArchiveHeapWriter::_buffer,
|
||||
// which is a GrowableArray that sites outside of the valid heap range. Therefore
|
||||
// we avoid using the addresses of these copies as oops. They are usually
|
||||
// called "buffered_addr" in the code (of the type "address").
|
||||
//
|
||||
// [3] Each archived object has a "requested address" -- at run time, if the object
|
||||
// can be mapped at this address, we can avoid relocation.
|
||||
//
|
||||
// Note: the design and convention is the same as for the archiving of Metaspace objects.
|
||||
// See archiveBuilder.hpp.
|
||||
|
||||
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
|
||||
|
||||
// The exclusive top of the last object that has been copied into this->_buffer.
|
||||
static size_t _buffer_top;
|
||||
|
||||
// The bounds of the open region inside this->_buffer.
|
||||
static size_t _open_bottom; // inclusive
|
||||
static size_t _open_top; // exclusive
|
||||
|
||||
// The bounds of the closed region inside this->_buffer.
|
||||
static size_t _closed_bottom; // inclusive
|
||||
static size_t _closed_top; // exclusive
|
||||
|
||||
// The bottom of the copy of Heap::roots() inside this->_buffer.
|
||||
static size_t _heap_roots_bottom;
|
||||
static size_t _heap_roots_word_size;
|
||||
|
||||
static address _requested_open_region_bottom;
|
||||
static address _requested_open_region_top;
|
||||
static address _requested_closed_region_bottom;
|
||||
static address _requested_closed_region_top;
|
||||
|
||||
static ResourceBitMap* _closed_oopmap;
|
||||
static ResourceBitMap* _open_oopmap;
|
||||
|
||||
static ArchiveHeapBitmapInfo _closed_oopmap_info;
|
||||
static ArchiveHeapBitmapInfo _open_oopmap_info;
|
||||
|
||||
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
|
||||
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
|
||||
|
||||
typedef ResourceHashtable<size_t, oop,
|
||||
36137, // prime number
|
||||
AnyObj::C_HEAP,
|
||||
mtClassShared> BufferOffsetToSourceObjectTable;
|
||||
static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
|
||||
|
||||
static void allocate_buffer();
|
||||
static void ensure_buffer_space(size_t min_bytes);
|
||||
|
||||
// Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
|
||||
static int to_array_index(size_t i) {
|
||||
assert(i <= (size_t)max_jint, "must be");
|
||||
return (size_t)i;
|
||||
}
|
||||
static int to_array_length(size_t n) {
|
||||
return to_array_index(n);
|
||||
}
|
||||
|
||||
template <typename T> static T offset_to_buffered_address(size_t offset) {
|
||||
return (T)(_buffer->adr_at(to_array_index(offset)));
|
||||
}
|
||||
|
||||
static address buffer_bottom() {
|
||||
return offset_to_buffered_address<address>(0);
|
||||
}
|
||||
|
||||
static address buffer_top() {
|
||||
return buffer_bottom() + _buffer_top;
|
||||
}
|
||||
|
||||
static bool in_buffer(address buffered_addr) {
|
||||
return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
|
||||
}
|
||||
|
||||
static size_t buffered_address_to_offset(address buffered_addr) {
|
||||
assert(in_buffer(buffered_addr), "sanity");
|
||||
return buffered_addr - buffer_bottom();
|
||||
}
|
||||
|
||||
static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
|
||||
static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
|
||||
static void copy_source_objs_to_buffer_by_region(bool copy_open_region);
|
||||
static size_t copy_one_source_obj_to_buffer(oop src_obj);
|
||||
|
||||
static void maybe_fill_gc_region_gap(size_t required_byte_size);
|
||||
static size_t filler_array_byte_size(int length);
|
||||
static int filler_array_length(size_t fill_bytes);
|
||||
static void init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
|
||||
|
||||
static void set_requested_address_for_regions(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions);
|
||||
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||
static ArchiveHeapBitmapInfo compute_ptrmap(bool is_open);
|
||||
static ArchiveHeapBitmapInfo make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap);
|
||||
static bool is_in_requested_regions(oop o);
|
||||
static oop requested_obj_from_buffer_offset(size_t offset);
|
||||
|
||||
static oop load_oop_from_buffer(oop* buffered_addr);
|
||||
static oop load_oop_from_buffer(narrowOop* buffered_addr);
|
||||
static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
|
||||
static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
|
||||
|
||||
template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
|
||||
template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
|
||||
|
||||
template <typename T> static T* requested_addr_to_buffered_addr(T* p);
|
||||
template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer);
|
||||
template <typename T> static void mark_oop_pointer(T* buffered_addr);
|
||||
template <typename T> static void relocate_root_at(oop requested_roots, int index);
|
||||
|
||||
static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
|
||||
public:
|
||||
static void init() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void add_source_obj(oop src_obj);
|
||||
static bool is_too_large_to_archive(size_t size);
|
||||
static bool is_too_large_to_archive(oop obj);
|
||||
static bool is_string_too_large_to_archive(oop string);
|
||||
static void write(GrowableArrayCHeap<oop, mtClassShared>*,
|
||||
GrowableArray<MemRegion>* closed_regions, GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||
static address heap_region_requested_bottom(int heap_region_idx);
|
||||
static oop heap_roots_requested_address();
|
||||
static address buffered_heap_roots_addr() {
|
||||
return offset_to_buffered_address<address>(_heap_roots_bottom);
|
||||
}
|
||||
static size_t heap_roots_word_size() {
|
||||
return _heap_roots_word_size;
|
||||
}
|
||||
|
||||
static void mark_native_pointer(oop src_obj, int offset);
|
||||
static oop source_obj_to_requested_obj(oop src_obj);
|
||||
static oop buffered_addr_to_source_obj(address buffered_addr);
|
||||
static address buffered_addr_to_requested_addr(address buffered_addr);
|
||||
};
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
#endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP
|
||||
@@ -276,10 +276,10 @@ void CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj) {
|
||||
|
||||
int CDSHeapVerifier::trace_to_root(outputStream* st, oop orig_obj, oop orig_field, HeapShared::CachedOopInfo* info) {
|
||||
int level = 0;
|
||||
if (info->_referrer != nullptr) {
|
||||
HeapShared::CachedOopInfo* ref = HeapShared::archived_object_cache()->get(info->_referrer);
|
||||
if (info->orig_referrer() != nullptr) {
|
||||
HeapShared::CachedOopInfo* ref = HeapShared::archived_object_cache()->get(info->orig_referrer());
|
||||
assert(ref != nullptr, "sanity");
|
||||
level = trace_to_root(st, info->_referrer, orig_obj, ref) + 1;
|
||||
level = trace_to_root(st, info->orig_referrer(), orig_obj, ref) + 1;
|
||||
} else if (java_lang_String::is_instance(orig_obj)) {
|
||||
st->print_cr("[%2d] (shared string table)", level++);
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.inline.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveUtils.inline.hpp"
|
||||
#include "cds/cds_globals.hpp"
|
||||
#include "cds/dynamicArchive.hpp"
|
||||
@@ -1632,16 +1633,19 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
// This is an unused region (e.g., a heap region when !INCLUDE_CDS_JAVA_HEAP)
|
||||
requested_base = nullptr;
|
||||
} else if (HeapShared::is_heap_region(region)) {
|
||||
assert(HeapShared::can_write(), "sanity");
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
assert(!DynamicDumpSharedSpaces, "must be");
|
||||
requested_base = base;
|
||||
requested_base = (char*)ArchiveHeapWriter::heap_region_requested_bottom(region);
|
||||
if (UseCompressedOops) {
|
||||
mapping_offset = (size_t)((address)base - CompressedOops::base());
|
||||
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
|
||||
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
|
||||
} else {
|
||||
#if INCLUDE_G1GC
|
||||
mapping_offset = requested_base - (char*)G1CollectedHeap::heap()->reserved().start();
|
||||
#endif
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
} else {
|
||||
char* requested_SharedBaseAddress = (char*)MetaspaceShared::requested_base_address();
|
||||
requested_base = ArchiveBuilder::current()->to_requested(base);
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/archiveUtils.hpp"
|
||||
#include "cds/cdsHeapVerifier.hpp"
|
||||
#include "cds/heapShared.hpp"
|
||||
@@ -61,7 +62,6 @@
|
||||
#include "utilities/copy.hpp"
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
@@ -82,8 +82,8 @@ struct ArchivableStaticFieldInfo {
|
||||
};
|
||||
|
||||
bool HeapShared::_disable_writing = false;
|
||||
bool HeapShared::_copying_open_region_objects = false;
|
||||
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
|
||||
GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = nullptr;
|
||||
|
||||
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
|
||||
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
|
||||
@@ -143,14 +143,6 @@ OopHandle HeapShared::_roots;
|
||||
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
|
||||
KlassToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
|
||||
|
||||
#ifdef ASSERT
|
||||
bool HeapShared::is_archived_object_during_dumptime(oop p) {
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
||||
return Universe::heap()->is_archived_object(p);
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
|
||||
for (int i = 0; fields[i].valid(); i++) {
|
||||
if (fields[i].klass == ik) {
|
||||
@@ -220,16 +212,10 @@ void HeapShared::reset_archived_object_states(TRAPS) {
|
||||
}
|
||||
|
||||
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
|
||||
HeapShared::OriginalObjectTable* HeapShared::_original_object_table = nullptr;
|
||||
oop HeapShared::find_archived_heap_object(oop obj) {
|
||||
|
||||
bool HeapShared::has_been_archived(oop obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
CachedOopInfo* p = cache->get(obj);
|
||||
if (p != nullptr) {
|
||||
return p->_obj;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
return archived_object_cache()->get(obj) != nullptr;
|
||||
}
|
||||
|
||||
int HeapShared::append_root(oop obj) {
|
||||
@@ -263,19 +249,13 @@ objArrayOop HeapShared::roots() {
|
||||
// Returns an objArray that contains all the roots of the archived objects
|
||||
oop HeapShared::get_root(int index, bool clear) {
|
||||
assert(index >= 0, "sanity");
|
||||
if (DumpSharedSpaces) {
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(_pending_roots != nullptr, "sanity");
|
||||
return _pending_roots->at(index);
|
||||
} else {
|
||||
assert(UseSharedSpaces, "must be");
|
||||
assert(!_roots.is_empty(), "must have loaded shared heap");
|
||||
oop result = roots()->obj_at(index);
|
||||
if (clear) {
|
||||
clear_root(index);
|
||||
}
|
||||
return result;
|
||||
assert(!DumpSharedSpaces && UseSharedSpaces, "runtime only");
|
||||
assert(!_roots.is_empty(), "must have loaded shared heap");
|
||||
oop result = roots()->obj_at(index);
|
||||
if (clear) {
|
||||
clear_root(index);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void HeapShared::clear_root(int index) {
|
||||
@@ -290,71 +270,47 @@ void HeapShared::clear_root(int index) {
|
||||
}
|
||||
}
|
||||
|
||||
bool HeapShared::is_too_large_to_archive(oop o) {
|
||||
// TODO: To make the CDS heap mappable for all collectors, this function should
|
||||
// reject objects that may be too large for *any* collector.
|
||||
assert(UseG1GC, "implementation limitation");
|
||||
size_t sz = align_up(o->size() * HeapWordSize, ObjectAlignmentInBytes);
|
||||
size_t max = /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize;
|
||||
return (sz > max);
|
||||
}
|
||||
|
||||
oop HeapShared::archive_object(oop obj) {
|
||||
bool HeapShared::archive_object(oop obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
|
||||
assert(!obj->is_stackChunk(), "do not archive stack chunks");
|
||||
|
||||
oop ao = find_archived_heap_object(obj);
|
||||
if (ao != nullptr) {
|
||||
// already archived
|
||||
return ao;
|
||||
if (has_been_archived(obj)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int len = obj->size();
|
||||
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
|
||||
if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
|
||||
log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
|
||||
p2i(obj), (size_t)obj->size());
|
||||
return nullptr;
|
||||
}
|
||||
p2i(obj), obj->size());
|
||||
return false;
|
||||
} else {
|
||||
count_allocation(obj->size());
|
||||
ArchiveHeapWriter::add_source_obj(obj);
|
||||
|
||||
oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
|
||||
if (archived_oop != nullptr) {
|
||||
count_allocation(len);
|
||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
|
||||
// Reinitialize markword to remove age/marking/locking/etc.
|
||||
//
|
||||
// We need to retain the identity_hash, because it may have been used by some hashtables
|
||||
// in the shared heap. This also has the side effect of pre-initializing the
|
||||
// identity_hash for all shared objects, so they are less likely to be written
|
||||
// into during run time, increasing the potential of memory sharing.
|
||||
int hash_original = obj->identity_hash();
|
||||
archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
|
||||
assert(archived_oop->mark().is_unlocked(), "sanity");
|
||||
CachedOopInfo info = make_cached_oop_info();
|
||||
archived_object_cache()->put(obj, info);
|
||||
mark_native_pointers(obj);
|
||||
|
||||
DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
|
||||
assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
|
||||
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
CachedOopInfo info = make_cached_oop_info(archived_oop);
|
||||
cache->put(obj, info);
|
||||
if (_original_object_table != nullptr) {
|
||||
_original_object_table->put(archived_oop, obj);
|
||||
}
|
||||
mark_native_pointers(obj, archived_oop);
|
||||
if (log_is_enabled(Debug, cds, heap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
|
||||
p2i(obj), p2i(archived_oop), obj->klass()->external_name());
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " : %s",
|
||||
p2i(obj), obj->klass()->external_name());
|
||||
}
|
||||
} else {
|
||||
log_error(cds, heap)(
|
||||
"Cannot allocate space for object " PTR_FORMAT " in archived heap region",
|
||||
p2i(obj));
|
||||
log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
|
||||
SIZE_FORMAT "M", MaxHeapSize/M);
|
||||
os::_exit(-1);
|
||||
|
||||
if (java_lang_Module::is_instance(obj)) {
|
||||
if (Modules::check_module_oop(obj)) {
|
||||
Modules::update_oops_in_archived_module(obj, append_root(obj));
|
||||
}
|
||||
java_lang_Module::set_module_entry(obj, nullptr);
|
||||
} else if (java_lang_ClassLoader::is_instance(obj)) {
|
||||
// class_data will be restored explicitly at run time.
|
||||
guarantee(obj == SystemDictionary::java_platform_loader() ||
|
||||
obj == SystemDictionary::java_system_loader() ||
|
||||
java_lang_ClassLoader::loader_data(obj) == nullptr, "must be");
|
||||
java_lang_ClassLoader::release_set_loader_data(obj, nullptr);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
return archived_oop;
|
||||
}
|
||||
|
||||
class KlassToOopHandleTable: public ResourceHashtable<Klass*, OopHandle,
|
||||
@@ -424,14 +380,14 @@ void HeapShared::archive_java_mirrors() {
|
||||
if (!is_reference_type(bt)) {
|
||||
oop m = _scratch_basic_type_mirrors[i].resolve();
|
||||
assert(m != nullptr, "sanity");
|
||||
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
assert(archived_m != nullptr, "sanity");
|
||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
assert(success, "sanity");
|
||||
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
type2name(bt), p2i(m), p2i(archived_m));
|
||||
"Archived %s mirror object from " PTR_FORMAT,
|
||||
type2name(bt), p2i(m));
|
||||
|
||||
Universe::set_archived_basic_type_mirror_index(bt, append_root(archived_m));
|
||||
Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -442,23 +398,23 @@ void HeapShared::archive_java_mirrors() {
|
||||
oop m = scratch_java_mirror(orig_k);
|
||||
if (m != nullptr) {
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
|
||||
oop archived_m = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
guarantee(archived_m != nullptr, "scratch mirrors should not point to any unachivable objects");
|
||||
buffered_k->set_archived_java_mirror(append_root(archived_m));
|
||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||
guarantee(success, "scratch mirrors should not point to any unachivable objects");
|
||||
buffered_k->set_archived_java_mirror(append_root(m));
|
||||
ResourceMark rm;
|
||||
log_trace(cds, heap, mirror)(
|
||||
"Archived %s mirror object from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
buffered_k->external_name(), p2i(m), p2i(archived_m));
|
||||
"Archived %s mirror object from " PTR_FORMAT,
|
||||
buffered_k->external_name(), p2i(m));
|
||||
|
||||
// archive the resolved_referenes array
|
||||
if (buffered_k->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(buffered_k);
|
||||
oop rr = ik->constants()->prepare_resolved_references_for_archiving();
|
||||
if (rr != nullptr && !is_too_large_to_archive(rr)) {
|
||||
oop archived_obj = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr,
|
||||
/*is_closed_archive=*/false);
|
||||
assert(archived_obj != nullptr, "already checked not too large to archive");
|
||||
int root_index = append_root(archived_obj);
|
||||
if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
|
||||
bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr,
|
||||
/*is_closed_archive=*/false);
|
||||
assert(success, "must be");
|
||||
int root_index = append_root(rr);
|
||||
ik->constants()->cache()->set_archived_references(root_index);
|
||||
}
|
||||
}
|
||||
@@ -468,29 +424,10 @@ void HeapShared::archive_java_mirrors() {
|
||||
delete_seen_objects_table();
|
||||
}
|
||||
|
||||
void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) {
|
||||
void HeapShared::mark_native_pointers(oop orig_obj) {
|
||||
if (java_lang_Class::is_instance(orig_obj)) {
|
||||
mark_one_native_pointer(archived_obj, java_lang_Class::klass_offset());
|
||||
mark_one_native_pointer(archived_obj, java_lang_Class::array_klass_offset());
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) {
|
||||
Metadata* ptr = archived_obj->metadata_field_acquire(offset);
|
||||
if (ptr != nullptr) {
|
||||
// Set the native pointer to the requested address (at runtime, if the metadata
|
||||
// is mapped at the default location, it will be at this address).
|
||||
address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr);
|
||||
address requested_addr = ArchiveBuilder::current()->to_requested(buffer_addr);
|
||||
archived_obj->metadata_field_put(offset, (Metadata*)requested_addr);
|
||||
|
||||
// Remember this pointer. At runtime, if the metadata is mapped at a non-default
|
||||
// location, the pointer needs to be patched (see ArchiveHeapLoader::patch_native_pointers()).
|
||||
_native_pointers->append(archived_obj->field_addr<Metadata*>(offset));
|
||||
|
||||
log_debug(cds, heap, mirror)(
|
||||
"Marked metadata field at %d: " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
offset, p2i(ptr), p2i(requested_addr));
|
||||
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
|
||||
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -517,6 +454,7 @@ void HeapShared::check_enum_obj(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
assert(level > 1, "must never be called at the first (outermost) level");
|
||||
Klass* k = orig_obj->klass();
|
||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
|
||||
if (!k->is_instance_klass()) {
|
||||
@@ -544,11 +482,12 @@ void HeapShared::check_enum_obj(int level,
|
||||
guarantee(false, "static field %s::%s is of the wrong type",
|
||||
ik->external_name(), fd.name()->as_C_string());
|
||||
}
|
||||
oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
|
||||
int root_index = append_root(archived_oop_field);
|
||||
log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")",
|
||||
bool success = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
|
||||
assert(success, "VM should have exited with unarchivable objects for _level > 1");
|
||||
int root_index = append_root(oop_field);
|
||||
log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT ")",
|
||||
root_index, ik->external_name(), fd.name()->as_C_string(),
|
||||
p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field));
|
||||
p2i((oopDesc*)oop_field));
|
||||
SystemDictionaryShared::add_enum_klass_static_field(ik, root_index);
|
||||
}
|
||||
}
|
||||
@@ -582,37 +521,17 @@ bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void HeapShared::run_full_gc_in_vm_thread() {
|
||||
if (HeapShared::can_write()) {
|
||||
// Avoid fragmentation while archiving heap objects.
|
||||
// We do this inside a safepoint, so that no further allocation can happen after GC
|
||||
// has finished.
|
||||
if (GCLocker::is_active()) {
|
||||
// Just checking for safety ...
|
||||
// This should not happen during -Xshare:dump. If you see this, probably the Java core lib
|
||||
// has been modified such that JNI code is executed in some clean up threads after
|
||||
// we have finished class loading.
|
||||
log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
|
||||
} else {
|
||||
log_info(cds)("Run GC ...");
|
||||
Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
|
||||
log_info(cds)("Run GC done");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions) {
|
||||
|
||||
G1HeapVerifier::verify_ready_for_archiving();
|
||||
|
||||
GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
_default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
|
||||
|
||||
// Cache for recording where the archived objects are copied to
|
||||
create_archived_object_cache(log_is_enabled(Info, cds, map));
|
||||
create_archived_object_cache();
|
||||
|
||||
log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
|
||||
UseCompressedOops ? p2i(CompressedOops::begin()) :
|
||||
@@ -620,16 +539,18 @@ void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
UseCompressedOops ? p2i(CompressedOops::end()) :
|
||||
p2i((address)G1CollectedHeap::heap()->reserved().end()));
|
||||
log_info(cds)("Dumping objects to closed archive heap region ...");
|
||||
copy_closed_objects(closed_regions);
|
||||
copy_closed_objects();
|
||||
|
||||
_copying_open_region_objects = true;
|
||||
|
||||
log_info(cds)("Dumping objects to open archive heap region ...");
|
||||
copy_open_objects(open_regions);
|
||||
copy_open_objects();
|
||||
|
||||
CDSHeapVerifier::verify();
|
||||
check_default_subgraph_classes();
|
||||
}
|
||||
|
||||
G1HeapVerifier::verify_archive_regions();
|
||||
ArchiveHeapWriter::write(_pending_roots, closed_regions, open_regions, closed_bitmaps, open_bitmaps);
|
||||
StringTable::write_shared_table(_dumped_interned_strings);
|
||||
}
|
||||
|
||||
@@ -638,14 +559,13 @@ void HeapShared::copy_interned_strings() {
|
||||
|
||||
auto copier = [&] (oop s, bool value_ignored) {
|
||||
assert(s != nullptr, "sanity");
|
||||
typeArrayOop value = java_lang_String::value_no_keepalive(s);
|
||||
if (!HeapShared::is_too_large_to_archive(value)) {
|
||||
oop archived_s = archive_reachable_objects_from(1, _default_subgraph_info,
|
||||
s, /*is_closed_archive=*/true);
|
||||
assert(archived_s != nullptr, "already checked not too large to archive");
|
||||
if (!ArchiveHeapWriter::is_string_too_large_to_archive(s)) {
|
||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info,
|
||||
s, /*is_closed_archive=*/true);
|
||||
assert(success, "must be");
|
||||
// Prevent string deduplication from changing the value field to
|
||||
// something not in the archive.
|
||||
java_lang_String::set_deduplication_forbidden(archived_s);
|
||||
java_lang_String::set_deduplication_forbidden(s);
|
||||
}
|
||||
};
|
||||
_dumped_interned_strings->iterate_all(copier);
|
||||
@@ -653,27 +573,20 @@ void HeapShared::copy_interned_strings() {
|
||||
delete_seen_objects_table();
|
||||
}
|
||||
|
||||
void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
|
||||
void HeapShared::copy_closed_objects() {
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
|
||||
// Archive interned string objects
|
||||
copy_interned_strings();
|
||||
|
||||
archive_object_subgraphs(closed_archive_subgraph_entry_fields,
|
||||
true /* is_closed_archive */,
|
||||
false /* is_full_module_graph */);
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
|
||||
void HeapShared::copy_open_objects() {
|
||||
assert(HeapShared::can_write(), "must be");
|
||||
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
|
||||
|
||||
archive_java_mirrors();
|
||||
|
||||
archive_object_subgraphs(open_archive_subgraph_entry_fields,
|
||||
@@ -685,43 +598,6 @@ void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
|
||||
true /* is_full_module_graph */);
|
||||
Modules::verify_archived_modules();
|
||||
}
|
||||
|
||||
copy_roots();
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
// Copy _pending_archive_roots into an objArray
|
||||
void HeapShared::copy_roots() {
|
||||
// HeapShared::roots() points into an ObjArray in the open archive region. A portion of the
|
||||
// objects in this array are discovered during HeapShared::archive_objects(). For example,
|
||||
// in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj().
|
||||
// However, HeapShared::archive_objects() happens inside a safepoint, so we can't
|
||||
// allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
|
||||
// Instead, we have to roll our own alloc/copy routine here.
|
||||
int length = _pending_roots != nullptr ? _pending_roots->length() : 0;
|
||||
size_t size = objArrayOopDesc::object_size(length);
|
||||
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
|
||||
HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
|
||||
|
||||
memset(mem, 0, size * BytesPerWord);
|
||||
{
|
||||
// This is copied from MemAllocator::finish
|
||||
oopDesc::set_mark(mem, markWord::prototype());
|
||||
oopDesc::release_set_klass(mem, k);
|
||||
}
|
||||
{
|
||||
// This is copied from ObjArrayAllocator::initialize
|
||||
arrayOopDesc::set_length(mem, length);
|
||||
}
|
||||
|
||||
_roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
|
||||
for (int i = 0; i < length; i++) {
|
||||
roots()->obj_at_put(i, _pending_roots->at(i));
|
||||
}
|
||||
log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
|
||||
count_allocation(roots()->size());
|
||||
}
|
||||
|
||||
//
|
||||
@@ -985,7 +861,9 @@ void HeapShared::serialize_root(SerializeClosure* soc) {
|
||||
}
|
||||
} else {
|
||||
// writing
|
||||
roots_oop = roots();
|
||||
if (HeapShared::can_write()) {
|
||||
roots_oop = ArchiveHeapWriter::heap_roots_requested_address();
|
||||
}
|
||||
soc->do_oop(&roots_oop); // write to archive
|
||||
}
|
||||
}
|
||||
@@ -1223,8 +1101,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
bool _is_closed_archive;
|
||||
bool _record_klasses_only;
|
||||
KlassSubGraphInfo* _subgraph_info;
|
||||
oop _orig_referencing_obj;
|
||||
oop _archived_referencing_obj;
|
||||
oop _referencing_obj;
|
||||
|
||||
// The following are for maintaining a stack for determining
|
||||
// CachedOopInfo::_referrer
|
||||
@@ -1235,11 +1112,11 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
bool is_closed_archive,
|
||||
bool record_klasses_only,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig, oop archived) :
|
||||
oop orig) :
|
||||
_level(level), _is_closed_archive(is_closed_archive),
|
||||
_record_klasses_only(record_klasses_only),
|
||||
_subgraph_info(subgraph_info),
|
||||
_orig_referencing_obj(orig), _archived_referencing_obj(archived) {
|
||||
_referencing_obj(orig) {
|
||||
_last = _current;
|
||||
_current = this;
|
||||
}
|
||||
@@ -1253,16 +1130,12 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
assert(!HeapShared::is_archived_object_during_dumptime(obj),
|
||||
"original objects must not point to archived objects");
|
||||
|
||||
size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
|
||||
T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
|
||||
size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
|
||||
|
||||
if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
|
||||
ResourceMark rm;
|
||||
log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
|
||||
_orig_referencing_obj->klass()->external_name(), field_delta,
|
||||
_referencing_obj->klass()->external_name(), field_delta,
|
||||
p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
|
||||
if (log_is_enabled(Trace, cds, heap)) {
|
||||
LogTarget(Trace, cds, heap) log;
|
||||
@@ -1271,37 +1144,24 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
||||
}
|
||||
}
|
||||
|
||||
oop archived = HeapShared::archive_reachable_objects_from(
|
||||
bool success = HeapShared::archive_reachable_objects_from(
|
||||
_level + 1, _subgraph_info, obj, _is_closed_archive);
|
||||
assert(archived != nullptr, "VM should have exited with unarchivable objects for _level > 1");
|
||||
assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
|
||||
|
||||
if (!_record_klasses_only) {
|
||||
// Update the reference in the archived copy of the referencing object.
|
||||
log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
_level, p2i(new_p), p2i(obj), p2i(archived));
|
||||
RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
|
||||
}
|
||||
assert(success, "VM should have exited with unarchivable objects for _level > 1");
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
static WalkOopAndArchiveClosure* current() { return _current; }
|
||||
oop orig_referencing_obj() { return _orig_referencing_obj; }
|
||||
oop referencing_obj() { return _referencing_obj; }
|
||||
KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
|
||||
};
|
||||
|
||||
WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
|
||||
|
||||
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) {
|
||||
CachedOopInfo info;
|
||||
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info() {
|
||||
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
|
||||
|
||||
info._subgraph_info = (walker == nullptr) ? nullptr : walker->subgraph_info();
|
||||
info._referrer = (walker == nullptr) ? nullptr : walker->orig_referencing_obj();
|
||||
info._obj = orig_obj;
|
||||
|
||||
return info;
|
||||
oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
|
||||
return CachedOopInfo(referrer, _copying_open_region_objects);
|
||||
}
|
||||
|
||||
void HeapShared::check_closed_region_object(InstanceKlass* k) {
|
||||
@@ -1324,12 +1184,11 @@ void HeapShared::check_closed_region_object(InstanceKlass* k) {
|
||||
// (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
|
||||
// trace all objects that are reachable from it, and make sure these objects are archived.
|
||||
// (3) Record the klasses of all orig_obj and all reachable objects.
|
||||
oop HeapShared::archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
bool HeapShared::archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive) {
|
||||
assert(orig_obj != nullptr, "must be");
|
||||
assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
|
||||
|
||||
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
|
||||
// This object has injected fields that cannot be supported easily, so we disallow them for now.
|
||||
@@ -1350,25 +1209,18 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||
os::_exit(1);
|
||||
}
|
||||
|
||||
oop archived_obj = find_archived_heap_object(orig_obj);
|
||||
if (java_lang_String::is_instance(orig_obj) && archived_obj != nullptr) {
|
||||
// To save time, don't walk strings that are already archived. They just contain
|
||||
// pointers to a type array, whose klass doesn't need to be recorded.
|
||||
return archived_obj;
|
||||
}
|
||||
|
||||
if (has_been_seen_during_subgraph_recording(orig_obj)) {
|
||||
// orig_obj has already been archived and traced. Nothing more to do.
|
||||
return archived_obj;
|
||||
return true;
|
||||
} else {
|
||||
set_has_been_seen_during_subgraph_recording(orig_obj);
|
||||
}
|
||||
|
||||
bool record_klasses_only = (archived_obj != nullptr);
|
||||
if (archived_obj == nullptr) {
|
||||
bool already_archived = has_been_archived(orig_obj);
|
||||
bool record_klasses_only = already_archived;
|
||||
if (!already_archived) {
|
||||
++_num_new_archived_objs;
|
||||
archived_obj = archive_object(orig_obj);
|
||||
if (archived_obj == nullptr) {
|
||||
if (!archive_object(orig_obj)) {
|
||||
// Skip archiving the sub-graph referenced from the current entry field.
|
||||
ResourceMark rm;
|
||||
log_error(cds, heap)(
|
||||
@@ -1378,7 +1230,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||
if (level == 1) {
|
||||
// Don't archive a subgraph root that's too big. For archives static fields, that's OK
|
||||
// as the Java code will take care of initializing this field dynamically.
|
||||
return nullptr;
|
||||
return false;
|
||||
} else {
|
||||
// We don't know how to handle an object that has been archived, but some of its reachable
|
||||
// objects cannot be archived. Bail out for now. We might need to fix this in the future if
|
||||
@@ -1386,34 +1238,20 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||
os::_exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (java_lang_Module::is_instance(orig_obj)) {
|
||||
if (Modules::check_module_oop(orig_obj)) {
|
||||
Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj));
|
||||
}
|
||||
java_lang_Module::set_module_entry(archived_obj, nullptr);
|
||||
} else if (java_lang_ClassLoader::is_instance(orig_obj)) {
|
||||
// class_data will be restored explicitly at run time.
|
||||
guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
|
||||
orig_obj == SystemDictionary::java_system_loader() ||
|
||||
java_lang_ClassLoader::loader_data(orig_obj) == nullptr, "must be");
|
||||
java_lang_ClassLoader::release_set_loader_data(archived_obj, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
assert(archived_obj != nullptr, "must be");
|
||||
Klass *orig_k = orig_obj->klass();
|
||||
subgraph_info->add_subgraph_object_klass(orig_k);
|
||||
|
||||
WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
|
||||
subgraph_info, orig_obj, archived_obj);
|
||||
subgraph_info, orig_obj);
|
||||
orig_obj->oop_iterate(&walker);
|
||||
if (is_closed_archive && orig_k->is_instance_klass()) {
|
||||
check_closed_region_object(InstanceKlass::cast(orig_k));
|
||||
}
|
||||
|
||||
check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive);
|
||||
return archived_obj;
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
@@ -1472,17 +1310,17 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
|
||||
f->print_on(&out);
|
||||
}
|
||||
|
||||
oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
|
||||
bool success = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
|
||||
|
||||
if (af == nullptr) {
|
||||
if (!success) {
|
||||
log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
|
||||
klass_name, field_name);
|
||||
} else {
|
||||
// Note: the field value is not preserved in the archived mirror.
|
||||
// Record the field as a new subGraph entry point. The recorded
|
||||
// information is restored from the archive at runtime.
|
||||
subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
|
||||
log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
|
||||
subgraph_info->add_subgraph_entry_field(field_offset, f, is_closed_archive);
|
||||
log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f));
|
||||
}
|
||||
} else {
|
||||
// The field contains null, we still need to record the entry point,
|
||||
@@ -1493,12 +1331,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
|
||||
|
||||
#ifndef PRODUCT
|
||||
class VerifySharedOopClosure: public BasicOopIterateClosure {
|
||||
private:
|
||||
bool _is_archived;
|
||||
|
||||
public:
|
||||
VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
|
||||
|
||||
void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
|
||||
void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); }
|
||||
|
||||
@@ -1506,7 +1339,7 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
HeapShared::verify_reachable_objects_from(obj, _is_archived);
|
||||
HeapShared::verify_reachable_objects_from(obj);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1523,8 +1356,7 @@ void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_o
|
||||
}
|
||||
|
||||
void HeapShared::verify_subgraph_from(oop orig_obj) {
|
||||
oop archived_obj = find_archived_heap_object(orig_obj);
|
||||
if (archived_obj == nullptr) {
|
||||
if (!has_been_archived(orig_obj)) {
|
||||
// It's OK for the root of a subgraph to be not archived. See comments in
|
||||
// archive_reachable_objects_from().
|
||||
return;
|
||||
@@ -1532,32 +1364,16 @@ void HeapShared::verify_subgraph_from(oop orig_obj) {
|
||||
|
||||
// Verify that all objects reachable from orig_obj are archived.
|
||||
init_seen_objects_table();
|
||||
verify_reachable_objects_from(orig_obj, false);
|
||||
verify_reachable_objects_from(orig_obj);
|
||||
delete_seen_objects_table();
|
||||
|
||||
// Note: we could also verify that all objects reachable from the archived
|
||||
// copy of orig_obj can only point to archived objects, with:
|
||||
// init_seen_objects_table();
|
||||
// verify_reachable_objects_from(archived_obj, true);
|
||||
// init_seen_objects_table();
|
||||
// but that's already done in G1HeapVerifier::verify_archive_regions so we
|
||||
// won't do it here.
|
||||
}
|
||||
|
||||
void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
|
||||
void HeapShared::verify_reachable_objects_from(oop obj) {
|
||||
_num_total_verifications ++;
|
||||
if (!has_been_seen_during_subgraph_recording(obj)) {
|
||||
set_has_been_seen_during_subgraph_recording(obj);
|
||||
|
||||
if (is_archived) {
|
||||
assert(is_archived_object_during_dumptime(obj), "must be");
|
||||
assert(find_archived_heap_object(obj) == nullptr, "must be");
|
||||
} else {
|
||||
assert(!is_archived_object_during_dumptime(obj), "must be");
|
||||
assert(find_archived_heap_object(obj) != nullptr, "must be");
|
||||
}
|
||||
|
||||
VerifySharedOopClosure walker(is_archived);
|
||||
assert(has_been_archived(obj), "must be");
|
||||
VerifySharedOopClosure walker;
|
||||
obj->oop_iterate(&walker);
|
||||
}
|
||||
}
|
||||
@@ -1811,7 +1627,6 @@ void HeapShared::init_for_dumping(TRAPS) {
|
||||
if (HeapShared::can_write()) {
|
||||
setup_test_class(ArchiveHeapTestClass);
|
||||
_dumped_interned_strings = new (mtClass)DumpedInternedStrings();
|
||||
_native_pointers = new GrowableArrayCHeap<Metadata**, mtClassShared>(2048);
|
||||
init_subgraph_entry_fields(CHECK);
|
||||
}
|
||||
}
|
||||
@@ -1877,10 +1692,12 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
|
||||
// [2] included in the SharedArchiveConfigFile.
|
||||
void HeapShared::add_to_dumped_interned_strings(oop string) {
|
||||
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
|
||||
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
|
||||
bool created;
|
||||
_dumped_interned_strings->put_if_absent(string, true, &created);
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// At dump-time, find the location of all the non-null oop pointers in an archived heap
|
||||
// region. This way we can quickly relocate all the pointers without using
|
||||
// BasicOopIterateClosure at runtime.
|
||||
@@ -1912,10 +1729,6 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
|
||||
if ((*p) != nullptr) {
|
||||
size_t idx = p - (oop*)_start;
|
||||
_oopmap->set_bit(idx);
|
||||
if (DumpSharedSpaces) {
|
||||
// Make heap content deterministic.
|
||||
*p = HeapShared::to_requested_address(*p);
|
||||
}
|
||||
} else {
|
||||
_num_null_oops ++;
|
||||
}
|
||||
@@ -1923,7 +1736,7 @@ class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
|
||||
int num_total_oops() const { return _num_total_oops; }
|
||||
int num_null_oops() const { return _num_null_oops; }
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
address HeapShared::to_requested_address(address dumptime_addr) {
|
||||
assert(DumpSharedSpaces, "static dump time only");
|
||||
@@ -1952,6 +1765,7 @@ address HeapShared::to_requested_address(address dumptime_addr) {
|
||||
return requested_addr;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
|
||||
ResourceBitMap oopmap(num_bits);
|
||||
@@ -1959,16 +1773,12 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
HeapWord* p = region.start();
|
||||
HeapWord* end = region.end();
|
||||
FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
|
||||
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : nullptr;
|
||||
|
||||
int num_objs = 0;
|
||||
while (p < end) {
|
||||
oop o = cast_to_oop(p);
|
||||
o->oop_iterate(&finder);
|
||||
p += o->size();
|
||||
if (DumpSharedSpaces) {
|
||||
builder->relocate_klass_ptr_of_oop(o);
|
||||
}
|
||||
++ num_objs;
|
||||
}
|
||||
|
||||
@@ -1977,34 +1787,7 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||
return oopmap;
|
||||
}
|
||||
|
||||
|
||||
ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) {
|
||||
size_t num_bits = region.byte_size() / sizeof(Metadata*);
|
||||
ResourceBitMap oopmap(num_bits);
|
||||
|
||||
Metadata** start = (Metadata**)region.start();
|
||||
Metadata** end = (Metadata**)region.end();
|
||||
|
||||
int num_non_null_ptrs = 0;
|
||||
int len = _native_pointers->length();
|
||||
for (int i = 0; i < len; i++) {
|
||||
Metadata** p = _native_pointers->at(i);
|
||||
if (start <= p && p < end) {
|
||||
assert(*p != nullptr, "must be non-null");
|
||||
num_non_null_ptrs ++;
|
||||
size_t idx = p - start;
|
||||
oopmap.set_bit(idx);
|
||||
}
|
||||
}
|
||||
|
||||
log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers out of "
|
||||
SIZE_FORMAT " possible locations", num_non_null_ptrs, num_bits);
|
||||
if (num_non_null_ptrs > 0) {
|
||||
return oopmap;
|
||||
} else {
|
||||
return ResourceBitMap(0);
|
||||
}
|
||||
}
|
||||
#endif // !PRODUCT
|
||||
|
||||
void HeapShared::count_allocation(size_t size) {
|
||||
_total_obj_count ++;
|
||||
|
||||
@@ -165,8 +165,8 @@ public:
|
||||
private:
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
static bool _disable_writing;
|
||||
static bool _copying_open_region_objects;
|
||||
static DumpedInternedStrings *_dumped_interned_strings;
|
||||
static GrowableArrayCHeap<Metadata**, mtClassShared>* _native_pointers;
|
||||
|
||||
// statistics
|
||||
constexpr static int ALLOC_STAT_SLOTS = 16;
|
||||
@@ -183,11 +183,21 @@ public:
|
||||
return java_lang_String::hash_code(string);
|
||||
}
|
||||
|
||||
struct CachedOopInfo {
|
||||
KlassSubGraphInfo* _subgraph_info;
|
||||
oop _referrer;
|
||||
oop _obj;
|
||||
CachedOopInfo() :_subgraph_info(), _referrer(), _obj() {}
|
||||
class CachedOopInfo {
|
||||
// See "TEMP notes: What are these?" in archiveHeapWriter.hpp
|
||||
oop _orig_referrer;
|
||||
|
||||
// The location of this object inside ArchiveHeapWriter::_buffer
|
||||
size_t _buffer_offset;
|
||||
bool _in_open_region;
|
||||
public:
|
||||
CachedOopInfo(oop orig_referrer, bool in_open_region)
|
||||
: _orig_referrer(orig_referrer),
|
||||
_buffer_offset(0), _in_open_region(in_open_region) {}
|
||||
oop orig_referrer() const { return _orig_referrer; }
|
||||
bool in_open_region() const { return _in_open_region; }
|
||||
void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
|
||||
size_t buffer_offset() const { return _buffer_offset; }
|
||||
};
|
||||
|
||||
private:
|
||||
@@ -203,13 +213,6 @@ private:
|
||||
HeapShared::oop_hash> ArchivedObjectCache;
|
||||
static ArchivedObjectCache* _archived_object_cache;
|
||||
|
||||
typedef ResourceHashtable<oop, oop,
|
||||
36137, // prime number
|
||||
AnyObj::C_HEAP,
|
||||
mtClassShared,
|
||||
HeapShared::oop_hash> OriginalObjectTable;
|
||||
static OriginalObjectTable* _original_object_table;
|
||||
|
||||
class DumpTimeKlassSubGraphInfoTable
|
||||
: public ResourceHashtable<Klass*, KlassSubGraphInfo,
|
||||
137, // prime number
|
||||
@@ -237,7 +240,7 @@ private:
|
||||
static RunTimeKlassSubGraphInfoTable _run_time_subgraph_info_table;
|
||||
|
||||
static void check_closed_region_object(InstanceKlass* k);
|
||||
static CachedOopInfo make_cached_oop_info(oop orig_obj);
|
||||
static CachedOopInfo make_cached_oop_info();
|
||||
static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
|
||||
bool is_closed_archive,
|
||||
bool is_full_module_graph);
|
||||
@@ -251,7 +254,7 @@ private:
|
||||
|
||||
static void verify_subgraph_from_static_field(
|
||||
InstanceKlass* k, int field_offset) PRODUCT_RETURN;
|
||||
static void verify_reachable_objects_from(oop obj, bool is_archived) PRODUCT_RETURN;
|
||||
static void verify_reachable_objects_from(oop obj) PRODUCT_RETURN;
|
||||
static void verify_subgraph_from(oop orig_obj) PRODUCT_RETURN;
|
||||
static void check_default_subgraph_classes();
|
||||
|
||||
@@ -316,7 +319,7 @@ private:
|
||||
|
||||
static bool has_been_seen_during_subgraph_recording(oop obj);
|
||||
static void set_has_been_seen_during_subgraph_recording(oop obj);
|
||||
static oop archive_object(oop obj);
|
||||
static bool archive_object(oop obj);
|
||||
|
||||
static void copy_interned_strings();
|
||||
static void copy_roots();
|
||||
@@ -338,58 +341,36 @@ private:
|
||||
static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info,
|
||||
int num_loaded_regions);
|
||||
static void fill_failed_loaded_region();
|
||||
static void mark_native_pointers(oop orig_obj, oop archived_obj);
|
||||
static void mark_one_native_pointer(oop archived_obj, int offset);
|
||||
static void mark_native_pointers(oop orig_obj);
|
||||
static bool has_been_archived(oop orig_obj);
|
||||
static void archive_java_mirrors();
|
||||
public:
|
||||
static void reset_archived_object_states(TRAPS);
|
||||
static void create_archived_object_cache(bool create_orig_table) {
|
||||
static void create_archived_object_cache() {
|
||||
_archived_object_cache =
|
||||
new (mtClass)ArchivedObjectCache();
|
||||
if (create_orig_table) {
|
||||
_original_object_table =
|
||||
new (mtClass)OriginalObjectTable();
|
||||
} else {
|
||||
_original_object_table = nullptr;
|
||||
}
|
||||
}
|
||||
static void destroy_archived_object_cache() {
|
||||
delete _archived_object_cache;
|
||||
_archived_object_cache = nullptr;
|
||||
if (_original_object_table != nullptr) {
|
||||
delete _original_object_table;
|
||||
_original_object_table = nullptr;
|
||||
}
|
||||
}
|
||||
static ArchivedObjectCache* archived_object_cache() {
|
||||
return _archived_object_cache;
|
||||
}
|
||||
static oop get_original_object(oop archived_object) {
|
||||
assert(_original_object_table != nullptr, "sanity");
|
||||
oop* r = _original_object_table->get(archived_object);
|
||||
if (r == nullptr) {
|
||||
return nullptr;
|
||||
} else {
|
||||
return *r;
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_too_large_to_archive(oop o);
|
||||
static oop find_archived_heap_object(oop obj);
|
||||
|
||||
static void archive_java_mirrors();
|
||||
|
||||
static void archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||
GrowableArray<MemRegion>* open_regions);
|
||||
static void copy_closed_objects(GrowableArray<MemRegion>* closed_regions);
|
||||
static void copy_open_objects(GrowableArray<MemRegion>* open_regions);
|
||||
GrowableArray<MemRegion>* open_regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||
static void copy_closed_objects();
|
||||
static void copy_open_objects();
|
||||
|
||||
static oop archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive);
|
||||
static bool archive_reachable_objects_from(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj,
|
||||
bool is_closed_archive);
|
||||
|
||||
static ResourceBitMap calculate_oopmap(MemRegion region); // marks all the oop pointers
|
||||
static ResourceBitMap calculate_ptrmap(MemRegion region); // marks all the native pointers
|
||||
static void add_to_dumped_interned_strings(oop string);
|
||||
|
||||
// Scratch objects for archiving Klass::java_mirror()
|
||||
@@ -426,16 +407,12 @@ private:
|
||||
|
||||
public:
|
||||
static void init_scratch_objects(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void run_full_gc_in_vm_thread() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
static bool is_heap_region(int idx) {
|
||||
CDS_JAVA_HEAP_ONLY(return (idx >= MetaspaceShared::first_closed_heap_region &&
|
||||
idx <= MetaspaceShared::last_open_heap_region);)
|
||||
NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
}
|
||||
|
||||
static bool is_archived_object_during_dumptime(oop p) NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
|
||||
static void resolve_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void initialize_from_archived_subgraph(JavaThread* current, Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/cds_globals.hpp"
|
||||
#include "cds/cdsProtectionDomain.hpp"
|
||||
#include "cds/classListWriter.hpp"
|
||||
@@ -82,9 +83,6 @@
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#endif
|
||||
|
||||
ReservedSpace MetaspaceShared::_symbol_rs;
|
||||
VirtualSpace MetaspaceShared::_symbol_vs;
|
||||
@@ -331,22 +329,16 @@ void MetaspaceShared::read_extra_data(JavaThread* current, const char* filename)
|
||||
reader.last_line_no(), utf8_length);
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
} else {
|
||||
#if INCLUDE_G1GC
|
||||
if (UseG1GC) {
|
||||
typeArrayOop body = java_lang_String::value(str);
|
||||
const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body);
|
||||
if (hr->is_humongous()) {
|
||||
// Don't keep it alive, so it will be GC'ed before we dump the strings, in order
|
||||
// to maximize free heap space and minimize fragmentation.
|
||||
log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
|
||||
reader.last_line_no(), utf8_length);
|
||||
continue;
|
||||
}
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (ArchiveHeapWriter::is_string_too_large_to_archive(str)) {
|
||||
log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d",
|
||||
reader.last_line_no(), utf8_length);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
// Make sure this string is included in the dumped interned string table.
|
||||
assert(str != nullptr, "must succeed");
|
||||
_extra_interned_strings->append(OopHandle(Universe::vm_global(), str));
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -436,7 +428,7 @@ void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread
|
||||
}
|
||||
}
|
||||
|
||||
class VM_PopulateDumpSharedSpace : public VM_GC_Operation {
|
||||
class VM_PopulateDumpSharedSpace : public VM_Operation {
|
||||
private:
|
||||
GrowableArray<MemRegion> *_closed_heap_regions;
|
||||
GrowableArray<MemRegion> *_open_heap_regions;
|
||||
@@ -445,11 +437,6 @@ private:
|
||||
GrowableArray<ArchiveHeapBitmapInfo> *_open_heap_bitmaps;
|
||||
|
||||
void dump_java_heap_objects(GrowableArray<Klass*>* klasses) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void dump_heap_bitmaps() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
void dump_heap_bitmaps(GrowableArray<MemRegion>* regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps);
|
||||
void dump_one_heap_bitmap(MemRegion region, GrowableArray<ArchiveHeapBitmapInfo>* bitmaps,
|
||||
ResourceBitMap bitmap, bool is_oopmap);
|
||||
void dump_shared_symbol_table(GrowableArray<Symbol*>* symbols) {
|
||||
log_info(cds)("Dumping symbol table ...");
|
||||
SymbolTable::write_to_archive(symbols);
|
||||
@@ -458,8 +445,7 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
VM_PopulateDumpSharedSpace() :
|
||||
VM_GC_Operation(0 /* total collections, ignored */, GCCause::_archive_time_gc),
|
||||
VM_PopulateDumpSharedSpace() : VM_Operation(),
|
||||
_closed_heap_regions(nullptr),
|
||||
_open_heap_regions(nullptr),
|
||||
_closed_heap_bitmaps(nullptr),
|
||||
@@ -508,15 +494,10 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
WriteClosure wc(ro_region);
|
||||
MetaspaceShared::serialize(&wc);
|
||||
|
||||
// Write the bitmaps for patching the archive heap regions
|
||||
dump_heap_bitmaps();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::doit() {
|
||||
HeapShared::run_full_gc_in_vm_thread();
|
||||
|
||||
DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm);
|
||||
|
||||
FileMapInfo::check_nonempty_dir_in_shared_path_table();
|
||||
@@ -820,9 +801,10 @@ void MetaspaceShared::preload_and_dump_impl(TRAPS) {
|
||||
log_info(cds)("Rewriting and linking classes: done");
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (use_full_module_graph()) {
|
||||
HeapShared::reset_archived_object_states(CHECK);
|
||||
}
|
||||
ArchiveHeapWriter::init();
|
||||
if (use_full_module_graph()) {
|
||||
HeapShared::reset_archived_object_states(CHECK);
|
||||
}
|
||||
#endif
|
||||
|
||||
VM_PopulateDumpSharedSpace op;
|
||||
@@ -895,60 +877,13 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* k
|
||||
// See FileMapInfo::write_heap_regions() for details.
|
||||
_closed_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
_open_heap_regions = new GrowableArray<MemRegion>(2);
|
||||
HeapShared::archive_objects(_closed_heap_regions, _open_heap_regions);
|
||||
_closed_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
_open_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
HeapShared::archive_objects(_closed_heap_regions, _open_heap_regions,
|
||||
_closed_heap_bitmaps, _open_heap_bitmaps);
|
||||
ArchiveBuilder::OtherROAllocMark mark;
|
||||
HeapShared::write_subgraph_info_table();
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_heap_bitmaps() {
|
||||
if (HeapShared::can_write()) {
|
||||
_closed_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
dump_heap_bitmaps(_closed_heap_regions, _closed_heap_bitmaps);
|
||||
|
||||
_open_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
|
||||
dump_heap_bitmaps(_open_heap_regions, _open_heap_bitmaps);
|
||||
}
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_heap_bitmaps(GrowableArray<MemRegion>* regions,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps) {
|
||||
for (int i = 0; i < regions->length(); i++) {
|
||||
MemRegion region = regions->at(i);
|
||||
ResourceBitMap oopmap = HeapShared::calculate_oopmap(region);
|
||||
ResourceBitMap ptrmap = HeapShared::calculate_ptrmap(region);
|
||||
dump_one_heap_bitmap(region, bitmaps, oopmap, true);
|
||||
dump_one_heap_bitmap(region, bitmaps, ptrmap, false);
|
||||
}
|
||||
}
|
||||
|
||||
void VM_PopulateDumpSharedSpace::dump_one_heap_bitmap(MemRegion region,
|
||||
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps,
|
||||
ResourceBitMap bitmap, bool is_oopmap) {
|
||||
size_t size_in_bits = bitmap.size();
|
||||
size_t size_in_bytes;
|
||||
uintptr_t* buffer;
|
||||
|
||||
if (size_in_bits > 0) {
|
||||
size_in_bytes = bitmap.size_in_bytes();
|
||||
buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
|
||||
bitmap.write_to(buffer, size_in_bytes);
|
||||
} else {
|
||||
size_in_bytes = 0;
|
||||
buffer = nullptr;
|
||||
}
|
||||
|
||||
log_info(cds, heap)("%s = " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for heap region "
|
||||
INTPTR_FORMAT " (" SIZE_FORMAT_W(8) " bytes)",
|
||||
is_oopmap ? "Oopmap" : "Ptrmap",
|
||||
p2i(buffer), size_in_bytes,
|
||||
p2i(region.start()), region.byte_size());
|
||||
|
||||
ArchiveHeapBitmapInfo info;
|
||||
info._map = (address)buffer;
|
||||
info._size_in_bits = size_in_bits;
|
||||
info._size_in_bytes = size_in_bytes;
|
||||
bitmaps->append(info);
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/archiveHeapLoader.inline.hpp"
|
||||
#include "cds/archiveHeapWriter.hpp"
|
||||
#include "cds/filemap.hpp"
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "classfile/altHashing.hpp"
|
||||
@@ -770,14 +771,14 @@ public:
|
||||
EncodeSharedStringsAsOffsets(CompactHashtableWriter* writer) : _writer(writer) {}
|
||||
bool do_entry(oop s, bool value_ignored) {
|
||||
assert(s != nullptr, "sanity");
|
||||
oop new_s = HeapShared::find_archived_heap_object(s);
|
||||
if (new_s != nullptr) { // could be null if the string is too big
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
if (UseCompressedOops) {
|
||||
_writer->add(hash, CompressedOops::narrow_oop_value(new_s));
|
||||
} else {
|
||||
_writer->add(hash, compute_delta(new_s));
|
||||
}
|
||||
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "must be");
|
||||
oop req_s = ArchiveHeapWriter::source_obj_to_requested_obj(s);
|
||||
assert(req_s != nullptr, "must have been archived");
|
||||
unsigned int hash = java_lang_String::hash_code(s);
|
||||
if (UseCompressedOops) {
|
||||
_writer->add(hash, CompressedOops::narrow_oop_value(req_s));
|
||||
} else {
|
||||
_writer->add(hash, compute_delta(req_s));
|
||||
}
|
||||
return true; // keep iterating
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -484,153 +484,3 @@ size_t G1PLABAllocator::undo_waste() const {
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) {
|
||||
return new G1ArchiveAllocator(g1h, open);
|
||||
}
|
||||
|
||||
bool G1ArchiveAllocator::alloc_new_region() {
|
||||
// Allocate the highest free region in the reserved heap,
|
||||
// and add it to our list of allocated regions. It is marked
|
||||
// archive and added to the old set.
|
||||
HeapRegion* hr = _g1h->alloc_highest_free_region();
|
||||
if (hr == NULL) {
|
||||
return false;
|
||||
}
|
||||
assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index());
|
||||
if (_open) {
|
||||
hr->set_open_archive();
|
||||
} else {
|
||||
hr->set_closed_archive();
|
||||
}
|
||||
_g1h->policy()->remset_tracker()->update_at_allocate(hr);
|
||||
_g1h->archive_set_add(hr);
|
||||
_g1h->hr_printer()->alloc(hr);
|
||||
_allocated_regions.append(hr);
|
||||
_allocation_region = hr;
|
||||
|
||||
// Set up _bottom and _max to begin allocating in the lowest
|
||||
// min_region_size'd chunk of the allocated G1 region.
|
||||
_bottom = hr->bottom();
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
|
||||
// Since we've modified the old set, call update_sizes.
|
||||
_g1h->monitoring_support()->update_sizes();
|
||||
return true;
|
||||
}
|
||||
|
||||
HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) {
|
||||
assert(word_size != 0, "size must not be zero");
|
||||
if (_allocation_region == NULL) {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
HeapWord* old_top = _allocation_region->top();
|
||||
assert(_bottom >= _allocation_region->bottom(),
|
||||
"inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(_allocation_region->bottom()));
|
||||
assert(_max <= _allocation_region->end(),
|
||||
"inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT,
|
||||
p2i(_max), p2i(_allocation_region->end()));
|
||||
assert(_bottom <= old_top && old_top <= _max,
|
||||
"inconsistent allocation state: expected "
|
||||
PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT,
|
||||
p2i(_bottom), p2i(old_top), p2i(_max));
|
||||
|
||||
// Try to allocate word_size in the current allocation chunk. Two cases
|
||||
// require special treatment:
|
||||
// 1. no enough space for word_size
|
||||
// 2. after allocating word_size, there's non-zero space left, but too small for the minimal filler
|
||||
// In both cases, we retire the current chunk and move on to the next one.
|
||||
size_t free_words = pointer_delta(_max, old_top);
|
||||
if (free_words < word_size ||
|
||||
((free_words - word_size != 0) && (free_words - word_size < CollectedHeap::min_fill_size()))) {
|
||||
// Retiring the current chunk
|
||||
if (old_top != _max) {
|
||||
// Non-zero space; need to insert the filler
|
||||
size_t fill_size = free_words;
|
||||
CollectedHeap::fill_with_object(old_top, fill_size);
|
||||
}
|
||||
// Set the current chunk as "full"
|
||||
_allocation_region->set_top(_max);
|
||||
|
||||
// Check if we've just used up the last min_region_size'd chunk
|
||||
// in the current region, and if so, allocate a new one.
|
||||
if (_max != _allocation_region->end()) {
|
||||
// Shift to the next chunk
|
||||
old_top = _bottom = _max;
|
||||
_max = _bottom + HeapRegion::min_region_size_in_words();
|
||||
} else {
|
||||
if (!alloc_new_region()) {
|
||||
return NULL;
|
||||
}
|
||||
old_top = _allocation_region->bottom();
|
||||
}
|
||||
}
|
||||
assert(pointer_delta(_max, old_top) >= word_size, "enough space left");
|
||||
_allocation_region->set_top(old_top + word_size);
|
||||
|
||||
return old_top;
|
||||
}
|
||||
|
||||
void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(),
|
||||
"alignment " SIZE_FORMAT " too large", end_alignment_in_bytes);
|
||||
assert(is_aligned(end_alignment_in_bytes, HeapWordSize),
|
||||
"alignment " SIZE_FORMAT " is not HeapWord (%u) aligned", end_alignment_in_bytes, HeapWordSize);
|
||||
|
||||
// If we've allocated nothing, simply return.
|
||||
if (_allocation_region == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If an end alignment was requested, insert filler objects.
|
||||
if (end_alignment_in_bytes != 0) {
|
||||
HeapWord* currtop = _allocation_region->top();
|
||||
HeapWord* newtop = align_up(currtop, end_alignment_in_bytes);
|
||||
size_t fill_size = pointer_delta(newtop, currtop);
|
||||
if (fill_size != 0) {
|
||||
if (fill_size < CollectedHeap::min_fill_size()) {
|
||||
// If the required fill is smaller than we can represent,
|
||||
// bump up to the next aligned address. We know we won't exceed the current
|
||||
// region boundary because the max supported alignment is smaller than the min
|
||||
// region size, and because the allocation code never leaves space smaller than
|
||||
// the min_fill_size at the top of the current allocation region.
|
||||
newtop = align_up(currtop + CollectedHeap::min_fill_size(),
|
||||
end_alignment_in_bytes);
|
||||
fill_size = pointer_delta(newtop, currtop);
|
||||
}
|
||||
HeapWord* fill = archive_mem_allocate(fill_size);
|
||||
CollectedHeap::fill_with_objects(fill, fill_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Loop through the allocated regions, and create MemRegions summarizing
|
||||
// the allocated address range, combining contiguous ranges. Add the
|
||||
// MemRegions to the GrowableArray provided by the caller.
|
||||
int index = _allocated_regions.length() - 1;
|
||||
assert(_allocated_regions.at(index) == _allocation_region,
|
||||
"expected region %u at end of array, found %u",
|
||||
_allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index());
|
||||
HeapWord* base_address = _allocation_region->bottom();
|
||||
HeapWord* top = base_address;
|
||||
|
||||
while (index >= 0) {
|
||||
HeapRegion* next = _allocated_regions.at(index);
|
||||
HeapWord* new_base = next->bottom();
|
||||
HeapWord* new_top = next->top();
|
||||
if (new_base != top) {
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
base_address = new_base;
|
||||
}
|
||||
top = new_top;
|
||||
index = index - 1;
|
||||
}
|
||||
|
||||
assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address));
|
||||
ranges->append(MemRegion(base_address, pointer_delta(top, base_address)));
|
||||
_allocated_regions.clear();
|
||||
_allocation_region = NULL;
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -225,60 +225,4 @@ public:
|
||||
void undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz, uint node_index);
|
||||
};
|
||||
|
||||
// G1ArchiveAllocator is used to allocate memory in archive
|
||||
// regions. Such regions are not scavenged nor compacted by GC.
|
||||
// There are two types of archive regions, which are
|
||||
// differ in the kind of references allowed for the contained objects:
|
||||
//
|
||||
// - 'Closed' archive region contain no references outside of other
|
||||
// closed archive regions. The region is immutable by GC. GC does
|
||||
// not mark object header in 'closed' archive region.
|
||||
// - An 'open' archive region allow references to any other regions,
|
||||
// including closed archive, open archive and other java heap regions.
|
||||
// GC can adjust pointers and mark object header in 'open' archive region.
|
||||
class G1ArchiveAllocator : public CHeapObj<mtGC> {
|
||||
protected:
|
||||
bool _open; // Indicate if the region is 'open' archive.
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
// The current allocation region
|
||||
HeapRegion* _allocation_region;
|
||||
|
||||
// Regions allocated for the current archive range.
|
||||
GrowableArrayCHeap<HeapRegion*, mtGC> _allocated_regions;
|
||||
|
||||
// Current allocation window within the current region.
|
||||
HeapWord* _bottom;
|
||||
HeapWord* _top;
|
||||
HeapWord* _max;
|
||||
|
||||
// Allocate a new region for this archive allocator.
|
||||
// Allocation is from the top of the reserved heap downward.
|
||||
bool alloc_new_region();
|
||||
|
||||
public:
|
||||
G1ArchiveAllocator(G1CollectedHeap* g1h, bool open) :
|
||||
_open(open),
|
||||
_g1h(g1h),
|
||||
_allocation_region(NULL),
|
||||
_allocated_regions(2),
|
||||
_bottom(NULL),
|
||||
_top(NULL),
|
||||
_max(NULL) { }
|
||||
|
||||
virtual ~G1ArchiveAllocator() {
|
||||
assert(_allocation_region == NULL, "_allocation_region not NULL");
|
||||
}
|
||||
|
||||
static G1ArchiveAllocator* create_allocator(G1CollectedHeap* g1h, bool open);
|
||||
|
||||
// Allocate memory for an individual object.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Return the memory ranges used in the current archive, after
|
||||
// aligning to the requested alignment.
|
||||
void complete_archive(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1ALLOCATOR_HPP
|
||||
|
||||
@@ -78,7 +78,7 @@ class G1BarrierSet: public CardTableBarrierSet {
|
||||
void write_ref_array_work(MemRegion mr) { invalidate(mr); }
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
void write_ref_field_post(T* field, oop new_val);
|
||||
void write_ref_field_post(T* field);
|
||||
void write_ref_field_post_slow(volatile CardValue* byte);
|
||||
|
||||
virtual void on_thread_create(Thread* thread);
|
||||
|
||||
@@ -68,7 +68,7 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void G1BarrierSet::write_ref_field_post(T* field, oop new_val) {
|
||||
inline void G1BarrierSet::write_ref_field_post(T* field) {
|
||||
volatile CardValue* byte = _card_table->byte_for(field);
|
||||
if (*byte != G1CardTable::g1_young_card_val()) {
|
||||
// Take a slow path for cards in old
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CardCounts.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
void G1CardCountsMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
|
||||
if (zero_filled) {
|
||||
return;
|
||||
}
|
||||
MemRegion mr(G1CollectedHeap::heap()->bottom_addr_for_region(start_idx), num_regions * HeapRegion::GrainWords);
|
||||
_counts->clear_range(mr);
|
||||
}
|
||||
|
||||
size_t G1CardCounts::compute_size(size_t mem_region_size_in_words) {
|
||||
// We keep card counts for every card, so the size of the card counts table must
|
||||
// be the same as the card table.
|
||||
return G1CardTable::compute_size(mem_region_size_in_words);
|
||||
}
|
||||
|
||||
size_t G1CardCounts::heap_map_factor() {
|
||||
// See G1CardCounts::compute_size() why we reuse the card table value.
|
||||
return G1CardTable::heap_map_factor();
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
|
||||
if (has_count_table()) {
|
||||
assert(from_card_num < to_card_num,
|
||||
"Wrong order? from: " SIZE_FORMAT ", to: " SIZE_FORMAT,
|
||||
from_card_num, to_card_num);
|
||||
Copy::fill_to_bytes(&_card_counts[from_card_num], (to_card_num - from_card_num));
|
||||
}
|
||||
}
|
||||
|
||||
G1CardCounts::G1CardCounts(G1CollectedHeap *g1h):
|
||||
_listener(), _g1h(g1h), _ct(NULL), _card_counts(NULL), _reserved_max_card_num(0), _ct_bot(NULL) {
|
||||
_listener.set_cardcounts(this);
|
||||
}
|
||||
|
||||
void G1CardCounts::initialize(G1RegionToSpaceMapper* mapper) {
|
||||
assert(_g1h->reserved().byte_size() > 0, "initialization order");
|
||||
assert(_g1h->capacity() == 0, "initialization order");
|
||||
|
||||
if (G1ConcRSHotCardLimit > 0) {
|
||||
// The max value we can store in the counts table is
|
||||
// max_jubyte. Guarantee the value of the hot
|
||||
// threshold limit is no more than this.
|
||||
guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
|
||||
|
||||
_ct = _g1h->card_table();
|
||||
_ct_bot = _ct->byte_for_const(_g1h->reserved().start());
|
||||
|
||||
_card_counts = (jubyte*) mapper->reserved().start();
|
||||
_reserved_max_card_num = mapper->reserved().byte_size();
|
||||
mapper->set_mapping_changed_listener(&_listener);
|
||||
}
|
||||
}
|
||||
|
||||
uint G1CardCounts::add_card_count(CardValue* card_ptr) {
|
||||
// Returns the number of times the card has been refined.
|
||||
// If we failed to reserve/commit the counts table, return 0.
|
||||
// If card_ptr is beyond the committed end of the counts table,
|
||||
// return 0.
|
||||
// Otherwise return the actual count.
|
||||
// Unless G1ConcRSHotCardLimit has been set appropriately,
|
||||
// returning 0 will result in the card being considered
|
||||
// cold and will be refined immediately.
|
||||
uint count = 0;
|
||||
if (has_count_table()) {
|
||||
size_t card_num = ptr_2_card_num(card_ptr);
|
||||
assert(card_num < _reserved_max_card_num,
|
||||
"Card " SIZE_FORMAT " outside of card counts table (max size " SIZE_FORMAT ")",
|
||||
card_num, _reserved_max_card_num);
|
||||
count = (uint) _card_counts[card_num];
|
||||
if (count < G1ConcRSHotCardLimit) {
|
||||
_card_counts[card_num] =
|
||||
(jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit));
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
bool G1CardCounts::is_hot(uint count) {
|
||||
return (count >= G1ConcRSHotCardLimit);
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_region(HeapRegion* hr) {
|
||||
MemRegion mr(hr->bottom(), hr->end());
|
||||
clear_range(mr);
|
||||
}
|
||||
|
||||
void G1CardCounts::clear_range(MemRegion mr) {
|
||||
if (has_count_table()) {
|
||||
const CardValue* from_card_ptr = _ct->byte_for_const(mr.start());
|
||||
// We use the last address in the range as the range could represent the
|
||||
// last region in the heap. In which case trying to find the card will be an
|
||||
// OOB access to the card table.
|
||||
const CardValue* last_card_ptr = _ct->byte_for_const(mr.last());
|
||||
|
||||
#ifdef ASSERT
|
||||
HeapWord* start_addr = _ct->addr_for(from_card_ptr);
|
||||
assert(start_addr == mr.start(), "MemRegion start must be aligned to a card.");
|
||||
HeapWord* last_addr = _ct->addr_for(last_card_ptr);
|
||||
assert((last_addr + G1CardTable::card_size_in_words()) == mr.end(), "MemRegion end must be aligned to a card.");
|
||||
#endif // ASSERT
|
||||
|
||||
// Clear the counts for the (exclusive) card range.
|
||||
size_t from_card_num = ptr_2_card_num(from_card_ptr);
|
||||
size_t to_card_num = ptr_2_card_num(last_card_ptr) + 1;
|
||||
clear_range(from_card_num, to_card_num);
|
||||
}
|
||||
}
|
||||
|
||||
class G1CardCountsClearClosure : public HeapRegionClosure {
|
||||
private:
|
||||
G1CardCounts* _card_counts;
|
||||
public:
|
||||
G1CardCountsClearClosure(G1CardCounts* card_counts) :
|
||||
HeapRegionClosure(), _card_counts(card_counts) { }
|
||||
|
||||
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
_card_counts->clear_region(r);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void G1CardCounts::clear_all() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "don't call this otherwise");
|
||||
G1CardCountsClearClosure cl(this);
|
||||
_g1h->heap_region_iterate(&cl);
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1CARDCOUNTS_HPP
|
||||
#define SHARE_GC_G1_G1CARDCOUNTS_HPP
|
||||
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/g1/g1RegionToSpaceMapper.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class CardTableBarrierSet;
|
||||
class G1CardCounts;
|
||||
class G1CollectedHeap;
|
||||
class G1RegionToSpaceMapper;
|
||||
class HeapRegion;
|
||||
|
||||
class G1CardCountsMappingChangedListener : public G1MappingChangedListener {
|
||||
private:
|
||||
G1CardCounts* _counts;
|
||||
public:
|
||||
void set_cardcounts(G1CardCounts* counts) { _counts = counts; }
|
||||
|
||||
virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
|
||||
};
|
||||
|
||||
// Table to track the number of times a card has been refined. Once
|
||||
// a card has been refined a certain number of times, it is
|
||||
// considered 'hot' and its refinement is delayed by inserting the
|
||||
// card into the hot card cache. The card will then be refined when
|
||||
// it is evicted from the hot card cache, or when the hot card cache
|
||||
// is 'drained' during the next evacuation pause.
|
||||
|
||||
class G1CardCounts: public CHeapObj<mtGC> {
|
||||
public:
|
||||
typedef CardTable::CardValue CardValue;
|
||||
|
||||
private:
|
||||
G1CardCountsMappingChangedListener _listener;
|
||||
|
||||
G1CollectedHeap* _g1h;
|
||||
G1CardTable* _ct;
|
||||
|
||||
// The table of counts
|
||||
uint8_t* _card_counts;
|
||||
|
||||
// Max capacity of the reserved space for the counts table
|
||||
size_t _reserved_max_card_num;
|
||||
|
||||
// CardTable bottom.
|
||||
const CardValue* _ct_bot;
|
||||
|
||||
// Returns true if the card counts table has been reserved.
|
||||
bool has_reserved_count_table() { return _card_counts != NULL; }
|
||||
|
||||
// Returns true if the card counts table has been reserved and committed.
|
||||
bool has_count_table() {
|
||||
return has_reserved_count_table();
|
||||
}
|
||||
|
||||
size_t ptr_2_card_num(const CardValue* card_ptr) {
|
||||
assert(card_ptr >= _ct_bot,
|
||||
"Invalid card pointer: "
|
||||
"card_ptr: " PTR_FORMAT ", "
|
||||
"_ct_bot: " PTR_FORMAT,
|
||||
p2i(card_ptr), p2i(_ct_bot));
|
||||
size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(CardValue));
|
||||
assert(card_num < _reserved_max_card_num,
|
||||
"card pointer out of range: " PTR_FORMAT, p2i(card_ptr));
|
||||
return card_num;
|
||||
}
|
||||
|
||||
CardValue* card_num_2_ptr(size_t card_num) {
|
||||
assert(card_num < _reserved_max_card_num,
|
||||
"card num out of range: " SIZE_FORMAT, card_num);
|
||||
return (CardValue*) (_ct_bot + card_num);
|
||||
}
|
||||
|
||||
// Clear the counts table for the given (exclusive) index range.
|
||||
void clear_range(size_t from_card_num, size_t to_card_num);
|
||||
|
||||
public:
|
||||
G1CardCounts(G1CollectedHeap* g1h);
|
||||
|
||||
// Return the number of slots needed for a card counts table
|
||||
// that covers mem_region_words words.
|
||||
static size_t compute_size(size_t mem_region_size_in_words);
|
||||
|
||||
// Returns how many bytes of the heap a single byte of the card counts table
|
||||
// corresponds to.
|
||||
static size_t heap_map_factor();
|
||||
|
||||
void initialize(G1RegionToSpaceMapper* mapper);
|
||||
|
||||
// Increments the refinement count for the given card.
|
||||
// Returns the pre-increment count value.
|
||||
uint add_card_count(CardValue* card_ptr);
|
||||
|
||||
// Returns true if the given count is high enough to be considered
|
||||
// 'hot'; false otherwise.
|
||||
bool is_hot(uint count);
|
||||
|
||||
// Clears the card counts for the cards spanned by the region
|
||||
void clear_region(HeapRegion* hr);
|
||||
|
||||
// Clears the card counts for the cards spanned by the MemRegion
|
||||
void clear_range(MemRegion mr);
|
||||
|
||||
// Clear the entire card counts table during GC.
|
||||
void clear_all();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1CARDCOUNTS_HPP
|
||||
@@ -50,7 +50,6 @@
|
||||
#include "gc/g1/g1HeapSizingPolicy.hpp"
|
||||
#include "gc/g1/g1HeapTransition.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1InitLogger.hpp"
|
||||
#include "gc/g1/g1MemoryPool.hpp"
|
||||
#include "gc/g1/g1MonotonicArenaFreeMemoryTask.hpp"
|
||||
@@ -490,40 +489,6 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::begin_archive_alloc_range(bool open) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(_archive_allocator == nullptr, "should not be initialized");
|
||||
_archive_allocator = G1ArchiveAllocator::create_allocator(this, open);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_archive_alloc_too_large(size_t word_size) {
|
||||
// Allocations in archive regions cannot be of a size that would be considered
|
||||
// humongous even for a minimum-sized region, because G1 region sizes/boundaries
|
||||
// may be different at archive-restore time.
|
||||
return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
|
||||
}
|
||||
|
||||
HeapWord* G1CollectedHeap::archive_mem_allocate(size_t word_size) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(_archive_allocator != nullptr, "_archive_allocator not initialized");
|
||||
if (is_archive_alloc_too_large(word_size)) {
|
||||
return nullptr;
|
||||
}
|
||||
return _archive_allocator->archive_mem_allocate(word_size);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(_archive_allocator != nullptr, "_archive_allocator not initialized");
|
||||
|
||||
// Call complete_archive to do the real work, filling in the MemRegion
|
||||
// array with the archive regions.
|
||||
_archive_allocator->complete_archive(ranges, end_alignment_in_bytes);
|
||||
delete _archive_allocator;
|
||||
_archive_allocator = nullptr;
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
|
||||
assert(ranges != NULL, "MemRegion array NULL");
|
||||
assert(count != 0, "No MemRegions provided");
|
||||
@@ -1033,10 +998,6 @@ void G1CollectedHeap::prepare_for_mutator_after_full_collection() {
|
||||
}
|
||||
|
||||
void G1CollectedHeap::abort_refinement() {
|
||||
if (G1HotCardCache::use_cache()) {
|
||||
_hot_card_cache->reset_hot_cache();
|
||||
}
|
||||
|
||||
// Discard all remembered set updates and reset refinement statistics.
|
||||
G1BarrierSet::dirty_card_queue_set().abandon_logs_and_stats();
|
||||
assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0,
|
||||
@@ -1421,7 +1382,6 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_verifier(NULL),
|
||||
_summary_bytes_used(0),
|
||||
_bytes_used_during_gc(0),
|
||||
_archive_allocator(nullptr),
|
||||
_survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
|
||||
_old_evac_stats("Old", OldPLABSize, PLABWeight),
|
||||
_monitoring_support(nullptr),
|
||||
@@ -1438,7 +1398,6 @@ G1CollectedHeap::G1CollectedHeap() :
|
||||
_policy(new G1Policy(_gc_timer_stw)),
|
||||
_heap_sizing_policy(NULL),
|
||||
_collection_set(this, _policy),
|
||||
_hot_card_cache(NULL),
|
||||
_rem_set(NULL),
|
||||
_card_set_config(),
|
||||
_card_set_freelist_pool(G1CardSetConfiguration::num_mem_object_types()),
|
||||
@@ -1576,9 +1535,6 @@ jint G1CollectedHeap::initialize() {
|
||||
satbqs.set_buffer_enqueue_threshold_percentage(G1SATBBufferEnqueueingThresholdPercent);
|
||||
}
|
||||
|
||||
// Create the hot card cache.
|
||||
_hot_card_cache = new G1HotCardCache(this);
|
||||
|
||||
// Create space mappers.
|
||||
size_t page_size = heap_rs.page_size();
|
||||
G1RegionToSpaceMapper* heap_storage =
|
||||
@@ -1601,7 +1557,7 @@ jint G1CollectedHeap::initialize() {
|
||||
heap_rs.size());
|
||||
heap_storage->set_mapping_changed_listener(&_listener);
|
||||
|
||||
// Create storage for the BOT, card table, card counts table (hot card cache) and the bitmap.
|
||||
// Create storage for the BOT, card table and the bitmap.
|
||||
G1RegionToSpaceMapper* bot_storage =
|
||||
create_aux_memory_mapper("Block Offset Table",
|
||||
G1BlockOffsetTable::compute_size(heap_rs.size() / HeapWordSize),
|
||||
@@ -1612,21 +1568,13 @@ jint G1CollectedHeap::initialize() {
|
||||
G1CardTable::compute_size(heap_rs.size() / HeapWordSize),
|
||||
G1CardTable::heap_map_factor());
|
||||
|
||||
G1RegionToSpaceMapper* card_counts_storage =
|
||||
create_aux_memory_mapper("Card Counts Table",
|
||||
G1CardCounts::compute_size(heap_rs.size() / HeapWordSize),
|
||||
G1CardCounts::heap_map_factor());
|
||||
|
||||
size_t bitmap_size = G1CMBitMap::compute_size(heap_rs.size());
|
||||
G1RegionToSpaceMapper* bitmap_storage =
|
||||
create_aux_memory_mapper("Mark Bitmap", bitmap_size, G1CMBitMap::heap_map_factor());
|
||||
|
||||
_hrm.initialize(heap_storage, bitmap_storage, bot_storage, cardtable_storage, card_counts_storage);
|
||||
_hrm.initialize(heap_storage, bitmap_storage, bot_storage, cardtable_storage);
|
||||
_card_table->initialize(cardtable_storage);
|
||||
|
||||
// Do later initialization work for concurrent refinement.
|
||||
_hot_card_cache->initialize(card_counts_storage);
|
||||
|
||||
// 6843694 - ensure that the maximum region index can fit
|
||||
// in the remembered set structures.
|
||||
const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
|
||||
@@ -1637,7 +1585,7 @@ jint G1CollectedHeap::initialize() {
|
||||
guarantee((uintptr_t)(heap_rs.base()) >= G1CardTable::card_size(), "Java heap must not start within the first card.");
|
||||
G1FromCardCache::initialize(max_reserved_regions());
|
||||
// Also create a G1 rem set.
|
||||
_rem_set = new G1RemSet(this, _card_table, _hot_card_cache);
|
||||
_rem_set = new G1RemSet(this, _card_table);
|
||||
_rem_set->initialize(max_reserved_regions());
|
||||
|
||||
size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
|
||||
@@ -1817,14 +1765,9 @@ size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
|
||||
return _hrm.total_free_bytes();
|
||||
}
|
||||
|
||||
void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id) {
|
||||
_hot_card_cache->drain(cl, worker_id);
|
||||
}
|
||||
|
||||
// Computes the sum of the storage used by the various regions.
|
||||
size_t G1CollectedHeap::used() const {
|
||||
size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
|
||||
assert(_archive_allocator == nullptr, "must be, should not contribute to used");
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -2846,13 +2789,6 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
|
||||
assert(!hr->is_empty(), "the region should not be empty");
|
||||
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
|
||||
|
||||
// Clear the card counts for this region.
|
||||
// Note: we only need to do this if the region is not young
|
||||
// (since we don't refine cards in young regions).
|
||||
if (!hr->is_young()) {
|
||||
_hot_card_cache->reset_card_counts(hr);
|
||||
}
|
||||
|
||||
// Reset region metadata to allow reuse.
|
||||
hr->hr_clear(true /* clear_space */);
|
||||
_policy->remset_tracker()->update_at_free(hr);
|
||||
@@ -3073,7 +3009,6 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
|
||||
|
||||
if (!free_list_only) {
|
||||
set_used(cl.total_used());
|
||||
assert(_archive_allocator == nullptr, "must be, should not contribute to used");
|
||||
}
|
||||
assert_used_and_recalculate_used_equal(this);
|
||||
}
|
||||
@@ -3272,8 +3207,6 @@ void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) {
|
||||
evac_failure_injector()->reset();
|
||||
|
||||
set_used(recalculate_used());
|
||||
|
||||
assert(_archive_allocator == nullptr, "must be, should not contribute to used");
|
||||
} else {
|
||||
// The "used" of the collection set have already been subtracted
|
||||
// when they were freed. Add in the bytes used.
|
||||
@@ -3281,10 +3214,6 @@ void G1CollectedHeap::update_used_after_gc(bool evacuation_failed) {
|
||||
}
|
||||
}
|
||||
|
||||
void G1CollectedHeap::reset_hot_card_cache() {
|
||||
_hot_card_cache->reset_hot_cache();
|
||||
}
|
||||
|
||||
class RebuildCodeRootClosure: public CodeBlobClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
|
||||
@@ -67,7 +67,6 @@
|
||||
|
||||
// Forward declarations
|
||||
class G1Allocator;
|
||||
class G1ArchiveAllocator;
|
||||
class G1BatchedTask;
|
||||
class G1CardTableEntryClosure;
|
||||
class G1ConcurrentMark;
|
||||
@@ -76,7 +75,6 @@ class G1ConcurrentRefine;
|
||||
class G1GCCounters;
|
||||
class G1GCPhaseTimes;
|
||||
class G1HeapSizingPolicy;
|
||||
class G1HotCardCache;
|
||||
class G1NewTracer;
|
||||
class G1RemSet;
|
||||
class G1ServiceTask;
|
||||
@@ -245,9 +243,6 @@ public:
|
||||
size_t bytes_used_during_gc() const { return _bytes_used_during_gc; }
|
||||
|
||||
private:
|
||||
// Class that handles archive allocation ranges.
|
||||
G1ArchiveAllocator* _archive_allocator;
|
||||
|
||||
// GC allocation statistics policy for survivors.
|
||||
G1EvacStats _survivor_evac_stats;
|
||||
|
||||
@@ -701,26 +696,6 @@ public:
|
||||
void free_humongous_region(HeapRegion* hr,
|
||||
FreeRegionList* free_list);
|
||||
|
||||
// Facility for allocating in 'archive' regions in high heap memory and
|
||||
// recording the allocated ranges. These should all be called from the
|
||||
// VM thread at safepoints, without the heap lock held. They can be used
|
||||
// to create and archive a set of heap regions which can be mapped at the
|
||||
// same fixed addresses in a subsequent JVM invocation.
|
||||
void begin_archive_alloc_range(bool open = false);
|
||||
|
||||
// Check if the requested size would be too large for an archive allocation.
|
||||
bool is_archive_alloc_too_large(size_t word_size);
|
||||
|
||||
// Allocate memory of the requested size from the archive region. This will
|
||||
// return NULL if the size is too large or if no memory is available. It
|
||||
// does not trigger a garbage collection.
|
||||
HeapWord* archive_mem_allocate(size_t word_size);
|
||||
|
||||
// Optionally aligns the end address and returns the allocated ranges in
|
||||
// an array of MemRegions in order of ascending addresses.
|
||||
void end_archive_alloc_range(GrowableArray<MemRegion>* ranges,
|
||||
size_t end_alignment_in_bytes = 0);
|
||||
|
||||
// Facility for allocating a fixed range within the heap and marking
|
||||
// the containing regions as 'archive'. For use at JVM init time, when the
|
||||
// caller may mmap archived heap data at the specified range(s).
|
||||
@@ -799,9 +774,6 @@ public:
|
||||
void record_obj_copy_mem_stats();
|
||||
|
||||
private:
|
||||
// The hot card cache for remembered set insertion optimization.
|
||||
G1HotCardCache* _hot_card_cache;
|
||||
|
||||
// The g1 remembered set of the heap.
|
||||
G1RemSet* _rem_set;
|
||||
// Global card set configuration
|
||||
@@ -955,9 +927,6 @@ public:
|
||||
static void start_codecache_marking_cycle_if_inactive(bool concurrent_mark_start);
|
||||
static void finish_codecache_marking_cycle();
|
||||
|
||||
// Apply the given closure on all cards in the Hot Card Cache, emptying it.
|
||||
void iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_id);
|
||||
|
||||
// The shared block offset table array.
|
||||
G1BlockOffsetTable* bot() const { return _bot; }
|
||||
|
||||
@@ -1085,8 +1054,6 @@ public:
|
||||
return reserved().contains(addr);
|
||||
}
|
||||
|
||||
G1HotCardCache* hot_card_cache() const { return _hot_card_cache; }
|
||||
|
||||
G1CardTable* card_table() const {
|
||||
return _card_table;
|
||||
}
|
||||
@@ -1288,10 +1255,6 @@ public:
|
||||
// Recalculate amount of used memory after GC. Must be called after all allocation
|
||||
// has finished.
|
||||
void update_used_after_gc(bool evacuation_failed);
|
||||
// Reset and re-enable the hot card cache.
|
||||
// Note the counts for the cards in the regions in the
|
||||
// collection set are reset when the collection set is freed.
|
||||
void reset_hot_card_cache();
|
||||
|
||||
// Rebuild the code root lists for each region
|
||||
// after a full GC.
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "gc/g1/g1CollectionSet.hpp"
|
||||
#include "gc/g1/g1CollectionSetCandidates.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
@@ -293,8 +292,7 @@ double G1CollectionSet::finalize_young_part(double target_pause_time_ms, G1Survi
|
||||
guarantee(target_pause_time_ms > 0.0,
|
||||
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
|
||||
|
||||
size_t pending_cards = _policy->pending_cards_at_gc_start() +
|
||||
_g1h->hot_card_cache()->num_entries();
|
||||
size_t pending_cards = _policy->pending_cards_at_gc_start();
|
||||
|
||||
log_trace(gc, ergo, cset)("Start choosing CSet. Pending cards: " SIZE_FORMAT " target pause time: %1.2fms",
|
||||
pending_cards, target_pause_time_ms);
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1FullGCMarker.hpp"
|
||||
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
|
||||
#include "gc/g1/g1FullGCPrepareTask.inline.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/shared/gcTraceTime.inline.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
@@ -119,11 +118,6 @@ G1FullGCPrepareTask::G1ResetMetadataClosure::G1ResetMetadataClosure(G1FullCollec
|
||||
void G1FullGCPrepareTask::G1ResetMetadataClosure::reset_region_metadata(HeapRegion* hr) {
|
||||
hr->rem_set()->clear();
|
||||
hr->clear_cardtable();
|
||||
|
||||
G1HotCardCache* hcc = _g1h->hot_card_cache();
|
||||
if (G1HotCardCache::use_cache()) {
|
||||
hcc->reset_card_counts(hr);
|
||||
}
|
||||
}
|
||||
|
||||
bool G1FullGCPrepareTask::G1ResetMetadataClosure::do_heap_region(HeapRegion* hr) {
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "gc/shared/gcTimer.hpp"
|
||||
#include "gc/shared/oopStorage.hpp"
|
||||
@@ -83,13 +82,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
}
|
||||
|
||||
_gc_par_phases[MergeLB] = new WorkerDataArray<double>("MergeLB", "Log Buffers (ms):", max_gc_threads);
|
||||
if (G1HotCardCache::use_cache()) {
|
||||
_gc_par_phases[MergeHCC] = new WorkerDataArray<double>("MergeHCC", "Hot Card Cache (ms):", max_gc_threads);
|
||||
_gc_par_phases[MergeHCC]->create_thread_work_items("Dirty Cards:", MergeHCCDirtyCards);
|
||||
_gc_par_phases[MergeHCC]->create_thread_work_items("Skipped Cards:", MergeHCCSkippedCards);
|
||||
} else {
|
||||
_gc_par_phases[MergeHCC] = NULL;
|
||||
}
|
||||
_gc_par_phases[ScanHR] = new WorkerDataArray<double>("ScanHR", "Scan Heap Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[OptScanHR] = new WorkerDataArray<double>("OptScanHR", "Optional Scan Heap Roots (ms):", max_gc_threads);
|
||||
_gc_par_phases[CodeRoots] = new WorkerDataArray<double>("CodeRoots", "Code Root Scan (ms):", max_gc_threads);
|
||||
@@ -106,7 +98,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_gc_par_phases[RemoveSelfForwards] = new WorkerDataArray<double>("RemoveSelfForwards", "Remove Self Forwards (ms):", max_gc_threads);
|
||||
_gc_par_phases[ClearCardTable] = new WorkerDataArray<double>("ClearLoggedCards", "Clear Logged Cards (ms):", max_gc_threads);
|
||||
_gc_par_phases[RecalculateUsed] = new WorkerDataArray<double>("RecalculateUsed", "Recalculate Used Memory (ms):", max_gc_threads);
|
||||
_gc_par_phases[ResetHotCardCache] = new WorkerDataArray<double>("ResetHotCardCache", "Reset Hot Card Cache (ms):", max_gc_threads);
|
||||
#if COMPILER2_OR_JVMCI
|
||||
_gc_par_phases[UpdateDerivedPointers] = new WorkerDataArray<double>("UpdateDerivedPointers", "Update Derived Pointers (ms):", max_gc_threads);
|
||||
#endif
|
||||
@@ -248,7 +239,6 @@ void G1GCPhaseTimes::record_gc_pause_end() {
|
||||
ASSERT_PHASE_UNINITIALIZED(MergeER);
|
||||
ASSERT_PHASE_UNINITIALIZED(MergeRS);
|
||||
ASSERT_PHASE_UNINITIALIZED(OptMergeRS);
|
||||
ASSERT_PHASE_UNINITIALIZED(MergeHCC);
|
||||
ASSERT_PHASE_UNINITIALIZED(MergeLB);
|
||||
ASSERT_PHASE_UNINITIALIZED(ScanHR);
|
||||
ASSERT_PHASE_UNINITIALIZED(CodeRoots);
|
||||
@@ -460,9 +450,6 @@ double G1GCPhaseTimes::print_evacuate_initial_collection_set() const {
|
||||
debug_time("Prepare Merge Heap Roots", _cur_prepare_merge_heap_roots_time_ms);
|
||||
debug_phase_merge_remset();
|
||||
|
||||
if (G1HotCardCache::use_cache()) {
|
||||
debug_phase(_gc_par_phases[MergeHCC]);
|
||||
}
|
||||
debug_phase(_gc_par_phases[MergeLB]);
|
||||
|
||||
info_time("Evacuate Collection Set", _cur_collection_initial_evac_time_ms);
|
||||
@@ -518,7 +505,6 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
|
||||
debug_phase(_gc_par_phases[RestorePreservedMarks], 1);
|
||||
debug_phase(_gc_par_phases[ClearRetainedRegionBitmaps], 1);
|
||||
}
|
||||
debug_phase(_gc_par_phases[ResetHotCardCache], 1);
|
||||
#if COMPILER2_OR_JVMCI
|
||||
debug_phase(_gc_par_phases[UpdateDerivedPointers], 1);
|
||||
#endif
|
||||
|
||||
@@ -58,7 +58,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
MergeRS,
|
||||
OptMergeRS,
|
||||
MergeLB,
|
||||
MergeHCC,
|
||||
ScanHR,
|
||||
OptScanHR,
|
||||
CodeRoots,
|
||||
@@ -82,7 +81,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
RemoveSelfForwards,
|
||||
ClearCardTable,
|
||||
RecalculateUsed,
|
||||
ResetHotCardCache,
|
||||
#if COMPILER2_OR_JVMCI
|
||||
UpdateDerivedPointers,
|
||||
#endif
|
||||
@@ -129,11 +127,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
ScanHRUsedMemory
|
||||
};
|
||||
|
||||
enum GCMergeHCCWorkItems {
|
||||
MergeHCCDirtyCards,
|
||||
MergeHCCSkippedCards
|
||||
};
|
||||
|
||||
enum GCMergeLBWorkItems {
|
||||
MergeLBDirtyCards,
|
||||
MergeLBSkippedCards
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -310,28 +310,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// We want all used regions to be moved to the bottom-end of the heap, so we have
|
||||
// a contiguous range of free regions at the top end of the heap. This way, we can
|
||||
// avoid fragmentation while allocating the archive regions.
|
||||
//
|
||||
// Before calling this, a full GC should have been executed with a single worker thread,
|
||||
// so that no old regions would be moved to the middle of the heap.
|
||||
void G1HeapVerifier::verify_ready_for_archiving() {
|
||||
VerifyReadyForArchivingRegionClosure cl;
|
||||
G1CollectedHeap::heap()->heap_region_iterate(&cl);
|
||||
if (cl.has_holes()) {
|
||||
log_warning(gc, verify)("All free regions should be at the top end of the heap, but"
|
||||
" we found holes. This is probably caused by (unmovable) humongous"
|
||||
" allocations or active GCLocker, and may lead to fragmentation while"
|
||||
" writing archive heap memory regions.");
|
||||
}
|
||||
if (cl.has_humongous()) {
|
||||
log_warning(gc, verify)("(Unmovable) humongous regions have been found and"
|
||||
" may lead to fragmentation while"
|
||||
" writing archive heap memory regions.");
|
||||
}
|
||||
}
|
||||
|
||||
class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
|
||||
virtual bool do_heap_region(HeapRegion* r) {
|
||||
if (r->is_archive()) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -81,7 +81,6 @@ public:
|
||||
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
|
||||
void verify_dirty_young_regions() PRODUCT_RETURN;
|
||||
|
||||
static void verify_ready_for_archiving();
|
||||
static void verify_archive_regions();
|
||||
};
|
||||
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CardTableEntryClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1DirtyCardQueue.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h):
|
||||
_g1h(g1h), _card_counts(g1h),
|
||||
_hot_cache(NULL), _hot_cache_size(0), _hot_cache_par_chunk_size(0),
|
||||
_hot_cache_idx(0), _hot_cache_par_claimed_idx(0), _cache_wrapped_around(false)
|
||||
{}
|
||||
|
||||
void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) {
|
||||
if (use_cache()) {
|
||||
_hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize;
|
||||
_hot_cache = ArrayAllocator<CardValue*>::allocate(_hot_cache_size, mtGC);
|
||||
|
||||
reset_hot_cache_internal();
|
||||
|
||||
// For refining the cards in the hot cache in parallel
|
||||
_hot_cache_par_chunk_size = ClaimChunkSize;
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
|
||||
_cache_wrapped_around = false;
|
||||
|
||||
_card_counts.initialize(card_counts_storage);
|
||||
}
|
||||
}
|
||||
|
||||
G1HotCardCache::~G1HotCardCache() {
|
||||
if (use_cache()) {
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
ArrayAllocator<CardValue*>::free(_hot_cache, _hot_cache_size);
|
||||
_hot_cache = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) {
|
||||
uint count = _card_counts.add_card_count(card_ptr);
|
||||
if (!_card_counts.is_hot(count)) {
|
||||
// The card is not hot so do not store it in the cache;
|
||||
// return it for immediate refining.
|
||||
return card_ptr;
|
||||
}
|
||||
// Otherwise, the card is hot.
|
||||
size_t index = Atomic::fetch_and_add(&_hot_cache_idx, 1u);
|
||||
if (index == _hot_cache_size) {
|
||||
// Can use relaxed store because all racing threads are writing the same
|
||||
// value and there aren't any concurrent readers.
|
||||
Atomic::store(&_cache_wrapped_around, true);
|
||||
}
|
||||
size_t masked_index = index & (_hot_cache_size - 1);
|
||||
CardValue* current_ptr = _hot_cache[masked_index];
|
||||
|
||||
// Try to store the new card pointer into the cache. Compare-and-swap to guard
|
||||
// against the unlikely event of a race resulting in another card pointer to
|
||||
// have already been written to the cache. In this case we will return
|
||||
// card_ptr in favor of the other option, which would be starting over. This
|
||||
// should be OK since card_ptr will likely be the older card already when/if
|
||||
// this ever happens.
|
||||
CardValue* previous_ptr = Atomic::cmpxchg(&_hot_cache[masked_index],
|
||||
current_ptr,
|
||||
card_ptr);
|
||||
return (previous_ptr == current_ptr) ? previous_ptr : card_ptr;
|
||||
}
|
||||
|
||||
void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_id) {
|
||||
assert(use_cache(), "Drain only necessary if we use the hot card cache.");
|
||||
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
|
||||
while (_hot_cache_par_claimed_idx < _hot_cache_size) {
|
||||
size_t end_idx = Atomic::add(&_hot_cache_par_claimed_idx,
|
||||
_hot_cache_par_chunk_size);
|
||||
size_t start_idx = end_idx - _hot_cache_par_chunk_size;
|
||||
// The current worker has successfully claimed the chunk [start_idx..end_idx)
|
||||
end_idx = MIN2(end_idx, _hot_cache_size);
|
||||
for (size_t i = start_idx; i < end_idx; i++) {
|
||||
CardValue* card_ptr = _hot_cache[i];
|
||||
if (card_ptr != NULL) {
|
||||
cl->do_card_ptr(card_ptr, worker_id);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The existing entries in the hot card cache, which were just refined
|
||||
// above, are discarded prior to re-enabling the cache near the end of the GC.
|
||||
}
|
||||
|
||||
void G1HotCardCache::reset_card_counts(HeapRegion* hr) {
|
||||
_card_counts.clear_region(hr);
|
||||
}
|
||||
@@ -1,146 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1HOTCARDCACHE_HPP
|
||||
#define SHARE_GC_G1_G1HOTCARDCACHE_HPP
|
||||
|
||||
#include "gc/g1/g1CardCounts.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class G1CardTableEntryClosure;
|
||||
class G1CollectedHeap;
|
||||
class HeapRegion;
|
||||
|
||||
// An evicting cache of cards that have been logged by the G1 post
|
||||
// write barrier. Placing a card in the cache delays the refinement
|
||||
// of the card until the card is evicted, or the cache is drained
|
||||
// during the next evacuation pause.
|
||||
//
|
||||
// The first thing the G1 post write barrier does is to check whether
|
||||
// the card containing the updated pointer is already dirty and, if
|
||||
// so, skips the remaining code in the barrier.
|
||||
//
|
||||
// Delaying the refinement of a card will make the card fail the
|
||||
// first is_dirty check in the write barrier, skipping the remainder
|
||||
// of the write barrier.
|
||||
//
|
||||
// This can significantly reduce the overhead of the write barrier
|
||||
// code, increasing throughput.
|
||||
|
||||
class G1HotCardCache: public CHeapObj<mtGC> {
|
||||
public:
|
||||
typedef CardTable::CardValue CardValue;
|
||||
|
||||
private:
|
||||
G1CollectedHeap* _g1h;
|
||||
|
||||
G1CardCounts _card_counts;
|
||||
|
||||
|
||||
// The card cache table
|
||||
CardValue** _hot_cache;
|
||||
|
||||
size_t _hot_cache_size;
|
||||
|
||||
size_t _hot_cache_par_chunk_size;
|
||||
|
||||
// Avoids false sharing when concurrently updating _hot_cache_idx or
|
||||
// _hot_cache_par_claimed_idx. These are never updated at the same time
|
||||
// thus it's not necessary to separate them as well
|
||||
char _pad_before[DEFAULT_CACHE_LINE_SIZE];
|
||||
|
||||
volatile size_t _hot_cache_idx;
|
||||
|
||||
volatile size_t _hot_cache_par_claimed_idx;
|
||||
|
||||
char _pad_after[DEFAULT_CACHE_LINE_SIZE];
|
||||
|
||||
// Records whether insertion overflowed the hot card cache at least once. This
|
||||
// avoids the need for a separate atomic counter of how many valid entries are
|
||||
// in the HCC.
|
||||
volatile bool _cache_wrapped_around;
|
||||
|
||||
// The number of cached cards a thread claims when flushing the cache
|
||||
static const int ClaimChunkSize = 32;
|
||||
|
||||
public:
|
||||
static bool use_cache() {
|
||||
return (G1ConcRSLogCacheSize > 0);
|
||||
}
|
||||
|
||||
G1HotCardCache(G1CollectedHeap* g1h);
|
||||
~G1HotCardCache();
|
||||
|
||||
void initialize(G1RegionToSpaceMapper* card_counts_storage);
|
||||
|
||||
// Returns the card to be refined or NULL.
|
||||
//
|
||||
// Increments the count for given the card. if the card is not 'hot',
|
||||
// it is returned for immediate refining. Otherwise the card is
|
||||
// added to the hot card cache.
|
||||
// If there is enough room in the hot card cache for the card we're
|
||||
// adding, NULL is returned and no further action in needed.
|
||||
// If we evict a card from the cache to make room for the new card,
|
||||
// the evicted card is then returned for refinement.
|
||||
CardValue* insert(CardValue* card_ptr);
|
||||
|
||||
// Refine the cards that have delayed as a result of
|
||||
// being in the cache.
|
||||
void drain(G1CardTableEntryClosure* cl, uint worker_id);
|
||||
|
||||
// Set up for parallel processing of the cards in the hot cache
|
||||
void reset_hot_cache_claimed_index() {
|
||||
_hot_cache_par_claimed_idx = 0;
|
||||
}
|
||||
|
||||
// Resets the hot card cache and discards the entries.
|
||||
void reset_hot_cache() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at a safepoint");
|
||||
if (use_cache()) {
|
||||
reset_hot_cache_internal();
|
||||
}
|
||||
}
|
||||
|
||||
// Zeros the values in the card counts table for the given region
|
||||
void reset_card_counts(HeapRegion* hr);
|
||||
|
||||
// Number of entries in the HCC.
|
||||
size_t num_entries() const {
|
||||
return _cache_wrapped_around ? _hot_cache_size : _hot_cache_idx + 1;
|
||||
}
|
||||
private:
|
||||
void reset_hot_cache_internal() {
|
||||
assert(_hot_cache != NULL, "Logic");
|
||||
_hot_cache_idx = 0;
|
||||
for (size_t i = 0; i < _hot_cache_size; i++) {
|
||||
_hot_cache[i] = NULL;
|
||||
}
|
||||
_cache_wrapped_around = false;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1HOTCARDCACHE_HPP
|
||||
@@ -34,7 +34,6 @@
|
||||
#include "gc/g1/g1ConcurrentRefine.hpp"
|
||||
#include "gc/g1/g1ConcurrentRefineStats.hpp"
|
||||
#include "gc/g1/g1CollectionSetChooser.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1IHOPControl.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
@@ -800,11 +799,9 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
|
||||
_eden_surv_rate_group->start_adding_regions();
|
||||
|
||||
double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
|
||||
if (update_stats) {
|
||||
// Update prediction for card merge.
|
||||
size_t const merged_cards_from_log_buffers = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
|
||||
p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
|
||||
size_t const merged_cards_from_log_buffers = p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
|
||||
// MergeRSCards includes the cards from the Eager Reclaim phase.
|
||||
size_t const merged_cards_from_rs = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSCards) +
|
||||
p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSCards);
|
||||
@@ -814,7 +811,6 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
if (total_cards_merged >= G1NumCardsCostSampleThreshold) {
|
||||
double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
|
||||
average_time_ms(G1GCPhaseTimes::MergeRS) +
|
||||
average_time_ms(G1GCPhaseTimes::MergeHCC) +
|
||||
average_time_ms(G1GCPhaseTimes::MergeLB) +
|
||||
average_time_ms(G1GCPhaseTimes::OptMergeRS);
|
||||
_analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, is_young_only_pause);
|
||||
@@ -897,36 +893,21 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
// Note that _mmu_tracker->max_gc_time() returns the time in seconds.
|
||||
double logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
|
||||
|
||||
if (logged_cards_time_goal_ms < merge_hcc_time_ms) {
|
||||
log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
|
||||
"Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
|
||||
logged_cards_time_goal_ms, merge_hcc_time_ms);
|
||||
|
||||
logged_cards_time_goal_ms = 0;
|
||||
} else {
|
||||
logged_cards_time_goal_ms -= merge_hcc_time_ms;
|
||||
}
|
||||
|
||||
double const logged_cards_time_ms = logged_cards_processing_time();
|
||||
size_t logged_cards =
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB,
|
||||
G1GCPhaseTimes::MergeLBDirtyCards);
|
||||
size_t hcc_cards =
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeHCC,
|
||||
G1GCPhaseTimes::MergeHCCDirtyCards);
|
||||
bool exceeded_goal = logged_cards_time_goal_ms < logged_cards_time_ms;
|
||||
size_t predicted_thread_buffer_cards = _analytics->predict_dirtied_cards_in_thread_buffers();
|
||||
G1ConcurrentRefine* cr = _g1h->concurrent_refine();
|
||||
|
||||
log_debug(gc, ergo, refine)
|
||||
("GC refinement: goal: %zu + %zu / %1.2fms, actual: %zu / %1.2fms, HCC: %zu / %1.2fms%s",
|
||||
("GC refinement: goal: %zu + %zu / %1.2fms, actual: %zu / %1.2fms, %s",
|
||||
cr->pending_cards_target(),
|
||||
predicted_thread_buffer_cards,
|
||||
logged_cards_time_goal_ms,
|
||||
logged_cards,
|
||||
logged_cards_time_ms,
|
||||
hcc_cards,
|
||||
merge_hcc_time_ms,
|
||||
(exceeded_goal ? " (exceeded goal)" : ""));
|
||||
|
||||
cr->adjust_after_gc(logged_cards_time_ms,
|
||||
|
||||
@@ -35,7 +35,6 @@
|
||||
#include "gc/g1/g1FromCardCache.hpp"
|
||||
#include "gc/g1/g1GCParPhaseTimesTracker.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
#include "gc/g1/g1RootClosures.hpp"
|
||||
@@ -64,7 +63,7 @@
|
||||
// Collects information about the overall heap root scan progress during an evacuation.
|
||||
//
|
||||
// Scanning the remembered sets works by first merging all sources of cards to be
|
||||
// scanned (log buffers, hcc, remembered sets) into a single data structure to remove
|
||||
// scanned (log buffers, remembered sets) into a single data structure to remove
|
||||
// duplicates and simplify work distribution.
|
||||
//
|
||||
// During the following card scanning we not only scan this combined set of cards, but
|
||||
@@ -467,14 +466,12 @@ public:
|
||||
};
|
||||
|
||||
G1RemSet::G1RemSet(G1CollectedHeap* g1h,
|
||||
G1CardTable* ct,
|
||||
G1HotCardCache* hot_card_cache) :
|
||||
G1CardTable* ct) :
|
||||
_scan_state(new G1RemSetScanState()),
|
||||
_prev_period_summary(false),
|
||||
_g1h(g1h),
|
||||
_ct(ct),
|
||||
_g1p(_g1h->policy()),
|
||||
_hot_card_cache(hot_card_cache) {
|
||||
_g1p(_g1h->policy()) {
|
||||
}
|
||||
|
||||
G1RemSet::~G1RemSet() {
|
||||
@@ -1370,17 +1367,6 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// Apply closure to log entries in the HCC.
|
||||
if (_initial_evacuation && G1HotCardCache::use_cache()) {
|
||||
assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
|
||||
G1GCParPhaseTimesTracker x(p, G1GCPhaseTimes::MergeHCC, worker_id);
|
||||
G1MergeLogBufferCardsClosure cl(g1h, _scan_state);
|
||||
g1h->iterate_hcc_closure(&cl, worker_id);
|
||||
|
||||
p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_dirty(), G1GCPhaseTimes::MergeHCCDirtyCards);
|
||||
p->record_thread_work_item(G1GCPhaseTimes::MergeHCC, worker_id, cl.cards_skipped(), G1GCPhaseTimes::MergeHCCSkippedCards);
|
||||
}
|
||||
|
||||
// Now apply the closure to all remaining log entries.
|
||||
if (_initial_evacuation) {
|
||||
assert(merge_remset_phase == G1GCPhaseTimes::MergeRS, "Wrong merge phase");
|
||||
@@ -1528,37 +1514,6 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// The result from the hot card cache insert call is either:
|
||||
// * pointer to the current card
|
||||
// (implying that the current card is not 'hot'),
|
||||
// * null
|
||||
// (meaning we had inserted the card ptr into the "hot" card cache,
|
||||
// which had some headroom),
|
||||
// * a pointer to a "hot" card that was evicted from the "hot" cache.
|
||||
//
|
||||
|
||||
if (G1HotCardCache::use_cache()) {
|
||||
const CardValue* orig_card_ptr = card_ptr;
|
||||
card_ptr = _hot_card_cache->insert(card_ptr);
|
||||
if (card_ptr == NULL) {
|
||||
// There was no eviction. Nothing to do.
|
||||
return false;
|
||||
} else if (card_ptr != orig_card_ptr) {
|
||||
// Original card was inserted and an old card was evicted.
|
||||
start = _ct->addr_for(card_ptr);
|
||||
r = _g1h->heap_region_containing_or_null(start);
|
||||
|
||||
// Check whether the region formerly in the cache should be
|
||||
// ignored, as discussed earlier for the original card. The
|
||||
// region could have been freed (or even uncommitted) while
|
||||
// in the cache.
|
||||
if (r == nullptr || !r->is_old_or_humongous_or_archive()) {
|
||||
return false;
|
||||
}
|
||||
*card_ptr_addr = card_ptr;
|
||||
} // Else we still have the original card.
|
||||
}
|
||||
|
||||
// Trim the region designated by the card to what's been allocated
|
||||
// in the region. The card could be stale, or the card could cover
|
||||
// (part of) an object at the end of the allocated space and extend
|
||||
|
||||
@@ -43,7 +43,6 @@ class CodeBlobClosure;
|
||||
class G1AbstractSubTask;
|
||||
class G1CollectedHeap;
|
||||
class G1CMBitMap;
|
||||
class G1HotCardCache;
|
||||
class G1RemSetScanState;
|
||||
class G1ParScanThreadState;
|
||||
class G1ParScanThreadStateSet;
|
||||
@@ -69,7 +68,6 @@ private:
|
||||
|
||||
G1CardTable* _ct;
|
||||
G1Policy* _g1p;
|
||||
G1HotCardCache* _hot_card_cache;
|
||||
|
||||
void print_merge_heap_roots_stats();
|
||||
|
||||
@@ -81,9 +79,7 @@ public:
|
||||
// Initialize data that depends on the heap size being known.
|
||||
void initialize(uint max_reserved_regions);
|
||||
|
||||
G1RemSet(G1CollectedHeap* g1h,
|
||||
G1CardTable* ct,
|
||||
G1HotCardCache* hot_card_cache);
|
||||
G1RemSet(G1CollectedHeap* g1h, G1CardTable* ct);
|
||||
~G1RemSet();
|
||||
|
||||
// Scan all cards in the non-collection set regions that potentially contain
|
||||
@@ -94,7 +90,7 @@ public:
|
||||
G1GCPhaseTimes::GCParPhases objcopy_phase,
|
||||
bool remember_already_scanned_cards);
|
||||
|
||||
// Merge cards from various sources (remembered sets, hot card cache, log buffers)
|
||||
// Merge cards from various sources (remembered sets, log buffers)
|
||||
// and calculate the cards that need to be scanned later (via scan_heap_roots()).
|
||||
// If initial_evacuation is set, this is called during the initial evacuation.
|
||||
void merge_heap_roots(bool initial_evacuation);
|
||||
@@ -126,8 +122,7 @@ public:
|
||||
// Two methods for concurrent refinement support, executed concurrently to
|
||||
// the mutator:
|
||||
// Cleans the card at "*card_ptr_addr" before refinement, returns true iff the
|
||||
// card needs later refinement. Note that "*card_ptr_addr" could be updated to
|
||||
// a different card due to use of hot card cache.
|
||||
// card needs later refinement.
|
||||
bool clean_card_before_refine(CardValue** const card_ptr_addr);
|
||||
// Refine the region corresponding to "card_ptr". Must be called after
|
||||
// being filtered by clean_card_before_refine(), and after proper
|
||||
|
||||
@@ -36,7 +36,6 @@
|
||||
#include "gc/g1/g1YoungGCEvacFailureInjector.hpp"
|
||||
#include "gc/g1/g1EvacInfo.hpp"
|
||||
#include "gc/g1/g1HRPrinter.hpp"
|
||||
#include "gc/g1/g1HotCardCache.hpp"
|
||||
#include "gc/g1/g1MonitoringSupport.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.inline.hpp"
|
||||
#include "gc/g1/g1Policy.hpp"
|
||||
@@ -202,10 +201,6 @@ G1GCPhaseTimes* G1YoungCollector::phase_times() const {
|
||||
return _g1h->phase_times();
|
||||
}
|
||||
|
||||
G1HotCardCache* G1YoungCollector::hot_card_cache() const {
|
||||
return _g1h->hot_card_cache();
|
||||
}
|
||||
|
||||
G1HRPrinter* G1YoungCollector::hr_printer() const {
|
||||
return _g1h->hr_printer();
|
||||
}
|
||||
@@ -520,8 +515,6 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info)
|
||||
|
||||
_g1h->gc_prologue(false);
|
||||
|
||||
hot_card_cache()->reset_hot_cache_claimed_index();
|
||||
|
||||
// Initialize the GC alloc regions.
|
||||
allocator()->init_gc_alloc_regions(evacuation_info);
|
||||
|
||||
|
||||
@@ -40,7 +40,6 @@ class G1ConcurrentMark;
|
||||
class G1EvacFailureRegions;
|
||||
class G1EvacInfo;
|
||||
class G1GCPhaseTimes;
|
||||
class G1HotCardCache;
|
||||
class G1HRPrinter;
|
||||
class G1MonitoringSupport;
|
||||
class G1MonotonicArenaMemoryStats;
|
||||
@@ -69,7 +68,7 @@ class G1YoungCollector {
|
||||
G1ConcurrentMark* concurrent_mark() const;
|
||||
STWGCTimer* gc_timer_stw() const;
|
||||
G1NewTracer* gc_tracer_stw() const;
|
||||
G1HotCardCache* hot_card_cache() const;
|
||||
|
||||
G1HRPrinter* hr_printer() const;
|
||||
G1MonitoringSupport* monitoring_support() const;
|
||||
G1GCPhaseTimes* phase_times() const;
|
||||
|
||||
@@ -239,14 +239,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class G1PostEvacuateCollectionSetCleanupTask2::ResetHotCardCacheTask : public G1AbstractSubTask {
|
||||
public:
|
||||
ResetHotCardCacheTask() : G1AbstractSubTask(G1GCPhaseTimes::ResetHotCardCache) { }
|
||||
|
||||
double worker_cost() const override { return 0.5; }
|
||||
void do_work(uint worker_id) override { G1CollectedHeap::heap()->reset_hot_card_cache(); }
|
||||
};
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
class G1PostEvacuateCollectionSetCleanupTask2::UpdateDerivedPointersTask : public G1AbstractSubTask {
|
||||
public:
|
||||
@@ -733,7 +725,6 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2
|
||||
G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1BatchedTask("Post Evacuate Cleanup 2", G1CollectedHeap::heap()->phase_times())
|
||||
{
|
||||
add_serial_task(new ResetHotCardCacheTask());
|
||||
#if COMPILER2_OR_JVMCI
|
||||
add_serial_task(new UpdateDerivedPointersTask());
|
||||
#endif
|
||||
|
||||
@@ -54,7 +54,6 @@ public:
|
||||
|
||||
// Second set of post evacuate collection set tasks containing (s means serial):
|
||||
// - Eagerly Reclaim Humongous Objects (s)
|
||||
// - Reset Hot Card Cache (s)
|
||||
// - Update Derived Pointers (s)
|
||||
// - Clear Retained Region Bitmaps (on evacuation failure)
|
||||
// - Redirty Logged Cards
|
||||
@@ -63,7 +62,6 @@ public:
|
||||
// - Resize TLABs
|
||||
class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask {
|
||||
class EagerlyReclaimHumongousObjectsTask;
|
||||
class ResetHotCardCacheTask;
|
||||
#if COMPILER2_OR_JVMCI
|
||||
class UpdateDerivedPointersTask;
|
||||
#endif
|
||||
|
||||
@@ -178,14 +178,6 @@
|
||||
"Control whether concurrent refinement is performed. " \
|
||||
"Disabling effectively ignores G1RSetUpdatingPauseTimePercent") \
|
||||
\
|
||||
product(size_t, G1ConcRSLogCacheSize, 10, \
|
||||
"Log base 2 of the length of conc RS hot-card cache.") \
|
||||
range(0, 27) \
|
||||
\
|
||||
product(uintx, G1ConcRSHotCardLimit, 4, \
|
||||
"The threshold that defines (>=) a hot card.") \
|
||||
range(0, max_jubyte) \
|
||||
\
|
||||
develop(uint, G1RemSetArrayOfCardsEntriesBase, 8, \
|
||||
"Maximum number of entries per region in the Array of Cards " \
|
||||
"card set container per MB of a heap region.") \
|
||||
|
||||
@@ -64,7 +64,6 @@ public:
|
||||
HeapRegionManager::HeapRegionManager() :
|
||||
_bot_mapper(NULL),
|
||||
_cardtable_mapper(NULL),
|
||||
_card_counts_mapper(NULL),
|
||||
_committed_map(),
|
||||
_allocated_heapregions_length(0),
|
||||
_regions(), _heap_mapper(NULL),
|
||||
@@ -75,8 +74,7 @@ HeapRegionManager::HeapRegionManager() :
|
||||
void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts) {
|
||||
G1RegionToSpaceMapper* cardtable) {
|
||||
_allocated_heapregions_length = 0;
|
||||
|
||||
_heap_mapper = heap_storage;
|
||||
@@ -86,8 +84,6 @@ void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
_bot_mapper = bot;
|
||||
_cardtable_mapper = cardtable;
|
||||
|
||||
_card_counts_mapper = card_counts;
|
||||
|
||||
_regions.initialize(heap_storage->reserved(), HeapRegion::GrainBytes);
|
||||
|
||||
_committed_map.initialize(reserved_length());
|
||||
@@ -191,8 +187,6 @@ void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkerThr
|
||||
|
||||
_bot_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
_cardtable_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
|
||||
_card_counts_mapper->commit_regions(index, num_regions, pretouch_workers);
|
||||
}
|
||||
|
||||
void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
|
||||
@@ -218,8 +212,6 @@ void HeapRegionManager::uncommit_regions(uint start, uint num_regions) {
|
||||
_bot_mapper->uncommit_regions(start, num_regions);
|
||||
_cardtable_mapper->uncommit_regions(start, num_regions);
|
||||
|
||||
_card_counts_mapper->uncommit_regions(start, num_regions);
|
||||
|
||||
_committed_map.uncommit(start, end);
|
||||
}
|
||||
|
||||
@@ -271,22 +263,18 @@ void HeapRegionManager::clear_auxiliary_data_structures(uint start, uint num_reg
|
||||
_bot_mapper->signal_mapping_changed(start, num_regions);
|
||||
// Signal G1CardTable to clear the given regions.
|
||||
_cardtable_mapper->signal_mapping_changed(start, num_regions);
|
||||
// Signal G1CardCounts to clear the given regions.
|
||||
_card_counts_mapper->signal_mapping_changed(start, num_regions);
|
||||
}
|
||||
|
||||
MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const {
|
||||
size_t used_sz =
|
||||
_bitmap_mapper->committed_size() +
|
||||
_bot_mapper->committed_size() +
|
||||
_cardtable_mapper->committed_size() +
|
||||
_card_counts_mapper->committed_size();
|
||||
_cardtable_mapper->committed_size();
|
||||
|
||||
size_t committed_sz =
|
||||
_bitmap_mapper->reserved_size() +
|
||||
_bot_mapper->reserved_size() +
|
||||
_cardtable_mapper->reserved_size() +
|
||||
_card_counts_mapper->reserved_size();
|
||||
_cardtable_mapper->reserved_size();
|
||||
|
||||
return MemoryUsage(0, used_sz, committed_sz, committed_sz);
|
||||
}
|
||||
|
||||
@@ -76,7 +76,6 @@ class HeapRegionManager: public CHeapObj<mtGC> {
|
||||
|
||||
G1RegionToSpaceMapper* _bot_mapper;
|
||||
G1RegionToSpaceMapper* _cardtable_mapper;
|
||||
G1RegionToSpaceMapper* _card_counts_mapper;
|
||||
|
||||
// Keeps track of the currently committed regions in the heap. The committed regions
|
||||
// can either be active (ready for use) or inactive (ready for uncommit).
|
||||
@@ -163,8 +162,7 @@ public:
|
||||
void initialize(G1RegionToSpaceMapper* heap_storage,
|
||||
G1RegionToSpaceMapper* bitmap,
|
||||
G1RegionToSpaceMapper* bot,
|
||||
G1RegionToSpaceMapper* cardtable,
|
||||
G1RegionToSpaceMapper* card_counts);
|
||||
G1RegionToSpaceMapper* cardtable);
|
||||
|
||||
// Return the "dummy" region used for G1AllocRegion. This is currently a hardwired
|
||||
// new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit
|
||||
|
||||
@@ -99,7 +99,8 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
|
||||
}
|
||||
|
||||
store = kit->store_to_memory(kit->control(), access.addr().node(), val.node(), bt,
|
||||
access.addr().type(), mo, requires_atomic_access, unaligned, mismatched, unsafe);
|
||||
access.addr().type(), mo, requires_atomic_access, unaligned,
|
||||
mismatched, unsafe, access.barrier_data());
|
||||
} else {
|
||||
assert(access.is_opt_access(), "either parse or opt access");
|
||||
C2OptAccess& opt_access = static_cast<C2OptAccess&>(access);
|
||||
@@ -117,6 +118,7 @@ Node* BarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) cons
|
||||
if (mismatched) {
|
||||
st->set_mismatched_access();
|
||||
}
|
||||
st->set_barrier_data(access.barrier_data());
|
||||
store = gvn.transform(st);
|
||||
if (store == st) {
|
||||
mm->set_memory_at(alias, st);
|
||||
|
||||
@@ -80,7 +80,7 @@ protected:
|
||||
// either precise or imprecise. We make non-virtual inline variants of
|
||||
// these functions here for performance.
|
||||
template <DecoratorSet decorators, typename T>
|
||||
void write_ref_field_post(T* field, oop newVal);
|
||||
void write_ref_field_post(T* field);
|
||||
|
||||
virtual void invalidate(MemRegion mr);
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
|
||||
inline void CardTableBarrierSet::write_ref_field_post(T* field) {
|
||||
volatile CardValue* byte = _card_table->byte_for(field);
|
||||
*byte = CardTable::dirty_card_val();
|
||||
}
|
||||
|
||||
@@ -298,7 +298,6 @@ void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||
do_full_collection(false); // don't clear all soft refs
|
||||
break;
|
||||
}
|
||||
case GCCause::_archive_time_gc:
|
||||
case GCCause::_metadata_GC_clear_soft_refs: {
|
||||
HandleMark hm(thread);
|
||||
do_full_collection(true); // do clear all soft refs
|
||||
|
||||
@@ -60,9 +60,6 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||
case _wb_breakpoint:
|
||||
return "WhiteBox Initiated Run to Breakpoint";
|
||||
|
||||
case _archive_time_gc:
|
||||
return "Full GC for -Xshare:dump";
|
||||
|
||||
case _no_gc:
|
||||
return "No GC";
|
||||
|
||||
|
||||
@@ -53,7 +53,6 @@ class GCCause : public AllStatic {
|
||||
_wb_young_gc,
|
||||
_wb_full_gc,
|
||||
_wb_breakpoint,
|
||||
_archive_time_gc,
|
||||
|
||||
/* implementation independent, but reserved for GC use */
|
||||
_no_gc,
|
||||
|
||||
@@ -49,7 +49,7 @@ public:
|
||||
inline void write_ref_field_pre(T* addr) {}
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void write_ref_field_post(T *addr, oop new_value) {}
|
||||
inline void write_ref_field_post(T *addr) {}
|
||||
|
||||
// Causes all refs in "mr" to be assumed to be modified.
|
||||
virtual void invalidate(MemRegion mr) = 0;
|
||||
|
||||
@@ -63,7 +63,7 @@ oop_store_in_heap(T* addr, oop value) {
|
||||
BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
|
||||
bs->template write_ref_field_pre<decorators>(addr);
|
||||
Raw::oop_store(addr, value);
|
||||
bs->template write_ref_field_post<decorators>(addr, value);
|
||||
bs->template write_ref_field_post<decorators>(addr);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
@@ -74,7 +74,7 @@ oop_atomic_cmpxchg_in_heap(T* addr, oop compare_value, oop new_value) {
|
||||
bs->template write_ref_field_pre<decorators>(addr);
|
||||
oop result = Raw::oop_atomic_cmpxchg(addr, compare_value, new_value);
|
||||
if (result == compare_value) {
|
||||
bs->template write_ref_field_post<decorators>(addr, new_value);
|
||||
bs->template write_ref_field_post<decorators>(addr);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@@ -86,7 +86,7 @@ oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
BarrierSetT *bs = barrier_set_cast<BarrierSetT>(barrier_set());
|
||||
bs->template write_ref_field_pre<decorators>(addr);
|
||||
oop result = Raw::oop_atomic_xchg(addr, new_value);
|
||||
bs->template write_ref_field_post<decorators>(addr, new_value);
|
||||
bs->template write_ref_field_post<decorators>(addr);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@@ -50,11 +50,11 @@ void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
}
|
||||
|
||||
ShenandoahBarrierSetC1::ShenandoahBarrierSetC1() :
|
||||
_pre_barrier_c1_runtime_code_blob(NULL),
|
||||
_load_reference_barrier_strong_rt_code_blob(NULL),
|
||||
_load_reference_barrier_strong_native_rt_code_blob(NULL),
|
||||
_load_reference_barrier_weak_rt_code_blob(NULL),
|
||||
_load_reference_barrier_phantom_rt_code_blob(NULL) {}
|
||||
_pre_barrier_c1_runtime_code_blob(nullptr),
|
||||
_load_reference_barrier_strong_rt_code_blob(nullptr),
|
||||
_load_reference_barrier_strong_native_rt_code_blob(nullptr),
|
||||
_load_reference_barrier_weak_rt_code_blob(nullptr),
|
||||
_load_reference_barrier_phantom_rt_code_blob(nullptr) {}
|
||||
|
||||
void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) {
|
||||
// First we test whether marking is in progress.
|
||||
@@ -97,7 +97,7 @@ void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info,
|
||||
assert(addr_opr->is_register(), "must be");
|
||||
addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
|
||||
}
|
||||
slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : NULL);
|
||||
slow = new ShenandoahPreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info ? new CodeEmitInfo(info) : nullptr);
|
||||
} else {
|
||||
assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
|
||||
assert(pre_val->is_register(), "must be");
|
||||
@@ -246,7 +246,7 @@ class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure
|
||||
virtual OopMapSet* generate_code(StubAssembler* sasm) {
|
||||
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->generate_c1_pre_barrier_runtime_stub(sasm);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -260,7 +260,7 @@ public:
|
||||
virtual OopMapSet* generate_code(StubAssembler* sasm) {
|
||||
ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->generate_c1_load_reference_barrier_runtime_stub(sasm, _decorators);
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ public:
|
||||
// previous value is assumed to have already been loaded into pre_val.
|
||||
ShenandoahPreBarrierStub(LIR_Opr pre_val) :
|
||||
_do_load(false), _addr(LIR_OprFact::illegalOpr), _pre_val(pre_val),
|
||||
_patch_code(lir_patch_none), _info(NULL)
|
||||
_patch_code(lir_patch_none), _info(nullptr)
|
||||
{
|
||||
assert(_pre_val->is_register(), "should be a register");
|
||||
}
|
||||
@@ -69,7 +69,7 @@ public:
|
||||
if (_do_load) {
|
||||
// don't pass in the code emit info since it's processed in the fast
|
||||
// path
|
||||
if (_info != NULL)
|
||||
if (_info != nullptr)
|
||||
visitor->do_slow_case(_info);
|
||||
else
|
||||
visitor->do_slow_case();
|
||||
@@ -142,7 +142,7 @@ private:
|
||||
public:
|
||||
LIR_OpShenandoahCompareAndSwap(LIR_Opr addr, LIR_Opr cmp_value, LIR_Opr new_value,
|
||||
LIR_Opr t1, LIR_Opr t2, LIR_Opr result)
|
||||
: LIR_Op(lir_none, result, NULL) // no info
|
||||
: LIR_Op(lir_none, result, nullptr) // no info
|
||||
, _addr(addr)
|
||||
, _cmp_value(cmp_value)
|
||||
, _new_value(new_value)
|
||||
@@ -205,27 +205,27 @@ public:
|
||||
ShenandoahBarrierSetC1();
|
||||
|
||||
CodeBlob* pre_barrier_c1_runtime_code_blob() {
|
||||
assert(_pre_barrier_c1_runtime_code_blob != NULL, "");
|
||||
assert(_pre_barrier_c1_runtime_code_blob != nullptr, "");
|
||||
return _pre_barrier_c1_runtime_code_blob;
|
||||
}
|
||||
|
||||
CodeBlob* load_reference_barrier_strong_rt_code_blob() {
|
||||
assert(_load_reference_barrier_strong_rt_code_blob != NULL, "");
|
||||
assert(_load_reference_barrier_strong_rt_code_blob != nullptr, "");
|
||||
return _load_reference_barrier_strong_rt_code_blob;
|
||||
}
|
||||
|
||||
CodeBlob* load_reference_barrier_strong_native_rt_code_blob() {
|
||||
assert(_load_reference_barrier_strong_native_rt_code_blob != NULL, "");
|
||||
assert(_load_reference_barrier_strong_native_rt_code_blob != nullptr, "");
|
||||
return _load_reference_barrier_strong_native_rt_code_blob;
|
||||
}
|
||||
|
||||
CodeBlob* load_reference_barrier_weak_rt_code_blob() {
|
||||
assert(_load_reference_barrier_weak_rt_code_blob != NULL, "");
|
||||
assert(_load_reference_barrier_weak_rt_code_blob != nullptr, "");
|
||||
return _load_reference_barrier_weak_rt_code_blob;
|
||||
}
|
||||
|
||||
CodeBlob* load_reference_barrier_phantom_rt_code_blob() {
|
||||
assert(_load_reference_barrier_phantom_rt_code_blob != NULL, "");
|
||||
assert(_load_reference_barrier_phantom_rt_code_blob != nullptr, "");
|
||||
return _load_reference_barrier_phantom_rt_code_blob;
|
||||
}
|
||||
|
||||
|
||||
@@ -48,8 +48,8 @@ ShenandoahBarrierSetC2* ShenandoahBarrierSetC2::bsc2() {
|
||||
}
|
||||
|
||||
ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena)
|
||||
: _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, NULL)),
|
||||
_load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) {
|
||||
: _iu_barriers(new (comp_arena) GrowableArray<ShenandoahIUBarrierNode*>(comp_arena, 8, 0, nullptr)),
|
||||
_load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, nullptr)) {
|
||||
}
|
||||
|
||||
int ShenandoahBarrierSetC2State::iu_barriers_count() const {
|
||||
@@ -107,7 +107,7 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTra
|
||||
return false; // cannot unalias unless there are precise offsets
|
||||
}
|
||||
|
||||
if (alloc == NULL) {
|
||||
if (alloc == nullptr) {
|
||||
return false; // No allocation found
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTra
|
||||
intptr_t st_offset = 0;
|
||||
Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
|
||||
|
||||
if (st_base == NULL) {
|
||||
if (st_base == nullptr) {
|
||||
break; // inscrutable pointer
|
||||
}
|
||||
|
||||
@@ -163,12 +163,12 @@ bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTra
|
||||
// Make sure that we are looking at the same allocation site.
|
||||
// The alloc variable is guaranteed to not be null here from earlier check.
|
||||
if (alloc == st_alloc) {
|
||||
// Check that the initialization is storing NULL so that no previous store
|
||||
// Check that the initialization is storing null so that no previous store
|
||||
// has been moved up and directly write a reference
|
||||
Node* captured_store = st_init->find_captured_store(offset,
|
||||
type2aelembytes(T_OBJECT),
|
||||
phase);
|
||||
if (captured_store == NULL || captured_store == st_init->zero_memory()) {
|
||||
if (captured_store == nullptr || captured_store == st_init->zero_memory()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -199,9 +199,9 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
|
||||
|
||||
if (do_load) {
|
||||
// We need to generate the load of the previous value
|
||||
assert(adr != NULL, "where are loading from?");
|
||||
assert(pre_val == NULL, "loaded already?");
|
||||
assert(val_type != NULL, "need a type");
|
||||
assert(adr != nullptr, "where are loading from?");
|
||||
assert(pre_val == nullptr, "loaded already?");
|
||||
assert(val_type != nullptr, "need a type");
|
||||
|
||||
if (ReduceInitialCardMarks
|
||||
&& satb_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
|
||||
@@ -210,7 +210,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
|
||||
|
||||
} else {
|
||||
// In this case both val_type and alias_idx are unused.
|
||||
assert(pre_val != NULL, "must be loaded already");
|
||||
assert(pre_val != nullptr, "must be loaded already");
|
||||
// Nothing to be done if pre_val is null.
|
||||
if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
|
||||
assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
|
||||
@@ -255,7 +255,7 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
|
||||
pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
|
||||
}
|
||||
|
||||
// if (pre_val != NULL)
|
||||
// if (pre_val != nullptr)
|
||||
__ if_then(pre_val, BoolTest::ne, kit->null()); {
|
||||
Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
|
||||
|
||||
@@ -277,13 +277,13 @@ void ShenandoahBarrierSetC2::satb_write_barrier_pre(GraphKit* kit,
|
||||
const TypeFunc *tf = ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type();
|
||||
__ make_leaf_call(tf, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", pre_val, tls);
|
||||
} __ end_if(); // (!index)
|
||||
} __ end_if(); // (pre_val != NULL)
|
||||
} __ end_if(); // (pre_val != nullptr)
|
||||
} __ end_if(); // (!marking)
|
||||
|
||||
// Final sync IdealKit and GraphKit.
|
||||
kit->final_sync(ideal);
|
||||
|
||||
if (ShenandoahSATBBarrier && adr != NULL) {
|
||||
if (ShenandoahSATBBarrier && adr != nullptr) {
|
||||
Node* c = kit->control();
|
||||
Node* call = c->in(1)->in(1)->in(1)->in(0);
|
||||
assert(is_shenandoah_wb_pre_call(call), "shenandoah_wb_pre call expected");
|
||||
@@ -370,7 +370,7 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
|
||||
|
||||
// If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
|
||||
const TypeX* otype = offset->find_intptr_t_type();
|
||||
if (otype != NULL && otype->is_con() &&
|
||||
if (otype != nullptr && otype->is_con() &&
|
||||
otype->get_con() != java_lang_ref_Reference::referent_offset()) {
|
||||
// Constant offset but not the reference_offset so just return
|
||||
return;
|
||||
@@ -378,14 +378,14 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
|
||||
|
||||
// We only need to generate the runtime guards for instances.
|
||||
const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
|
||||
if (btype != NULL) {
|
||||
if (btype != nullptr) {
|
||||
if (btype->isa_aryptr()) {
|
||||
// Array type so nothing to do
|
||||
return;
|
||||
}
|
||||
|
||||
const TypeInstPtr* itype = btype->isa_instptr();
|
||||
if (itype != NULL) {
|
||||
if (itype != nullptr) {
|
||||
// Can the klass of base_oop be statically determined to be
|
||||
// _not_ a sub-class of Reference and _not_ Object?
|
||||
ciKlass* klass = itype->instance_klass();
|
||||
@@ -424,7 +424,7 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
|
||||
__ sync_kit(kit);
|
||||
|
||||
Node* one = __ ConI(1);
|
||||
// is_instof == 0 if base_oop == NULL
|
||||
// is_instof == 0 if base_oop == nullptr
|
||||
__ if_then(is_instof, BoolTest::eq, one, unlikely); {
|
||||
|
||||
// Update graphKit from IdeakKit.
|
||||
@@ -432,7 +432,7 @@ void ShenandoahBarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, N
|
||||
|
||||
// Use the pre-barrier to record the value in the referent field
|
||||
satb_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
|
||||
nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
|
||||
pre_val /* pre_val */,
|
||||
T_OBJECT);
|
||||
if (need_mem_bar) {
|
||||
@@ -512,7 +512,7 @@ Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue&
|
||||
value = shenandoah_iu_barrier(kit, value);
|
||||
val.set_node(value);
|
||||
shenandoah_write_barrier_pre(kit, true /* do_load */, /*kit->control(),*/ access.base(), adr, adr_idx, val.node(),
|
||||
static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
|
||||
static_cast<const TypeOopPtr*>(val.type()), nullptr /* pre_val */, access.type());
|
||||
} else {
|
||||
assert(access.is_opt_access(), "only for optimization passes");
|
||||
assert(((decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0 || !ShenandoahSATBBarrier) && (decorators & C2_ARRAY_COPY) != 0, "unexpected caller of this code");
|
||||
@@ -539,7 +539,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
|
||||
|
||||
// 2: apply LRB if needed
|
||||
if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
|
||||
load = new ShenandoahLoadReferenceBarrierNode(NULL, load, decorators);
|
||||
load = new ShenandoahLoadReferenceBarrierNode(nullptr, load, decorators);
|
||||
if (access.is_parse_access()) {
|
||||
load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load);
|
||||
} else {
|
||||
@@ -579,7 +579,7 @@ Node* ShenandoahBarrierSetC2::load_at_resolved(C2Access& access, const Type* val
|
||||
if (on_weak_ref) {
|
||||
// Use the pre-barrier to record the value in the referent field
|
||||
satb_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
|
||||
nullptr /* obj */, nullptr /* adr */, max_juint /* alias_idx */, nullptr /* val */, nullptr /* val_type */,
|
||||
load /* pre_val */, T_OBJECT);
|
||||
// Add memory barrier to prevent commoning reads from this field
|
||||
// across safepoint since GC can change its value.
|
||||
@@ -600,14 +600,14 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
|
||||
if (access.is_oop()) {
|
||||
new_val = shenandoah_iu_barrier(kit, new_val);
|
||||
shenandoah_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL, NULL, max_juint, NULL, NULL,
|
||||
nullptr, nullptr, max_juint, nullptr, nullptr,
|
||||
expected_val /* pre_val */, T_OBJECT);
|
||||
|
||||
MemNode::MemOrd mo = access.mem_node_mo();
|
||||
Node* mem = access.memory();
|
||||
Node* adr = access.addr().node();
|
||||
const TypePtr* adr_type = access.addr().type();
|
||||
Node* load_store = NULL;
|
||||
Node* load_store = nullptr;
|
||||
|
||||
#ifdef _LP64
|
||||
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
|
||||
@@ -636,7 +636,7 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess
|
||||
load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type()));
|
||||
}
|
||||
#endif
|
||||
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store, access.decorators()));
|
||||
load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, load_store, access.decorators()));
|
||||
return load_store;
|
||||
}
|
||||
return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
|
||||
@@ -648,13 +648,13 @@ Node* ShenandoahBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAcces
|
||||
if (access.is_oop()) {
|
||||
new_val = shenandoah_iu_barrier(kit, new_val);
|
||||
shenandoah_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL, NULL, max_juint, NULL, NULL,
|
||||
nullptr, nullptr, max_juint, nullptr, nullptr,
|
||||
expected_val /* pre_val */, T_OBJECT);
|
||||
DecoratorSet decorators = access.decorators();
|
||||
MemNode::MemOrd mo = access.mem_node_mo();
|
||||
Node* mem = access.memory();
|
||||
bool is_weak_cas = (decorators & C2_WEAK_CMPXCHG) != 0;
|
||||
Node* load_store = NULL;
|
||||
Node* load_store = nullptr;
|
||||
Node* adr = access.addr().node();
|
||||
#ifdef _LP64
|
||||
if (adr->bottom_type()->is_ptr_to_narrowoop()) {
|
||||
@@ -704,9 +704,9 @@ Node* ShenandoahBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& acces
|
||||
}
|
||||
Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type);
|
||||
if (access.is_oop()) {
|
||||
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result, access.decorators()));
|
||||
result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(nullptr, result, access.decorators()));
|
||||
shenandoah_write_barrier_pre(kit, false /* do_load */,
|
||||
NULL, NULL, max_juint, NULL, NULL,
|
||||
nullptr, nullptr, max_juint, nullptr, nullptr,
|
||||
result /* pre_val */, T_OBJECT);
|
||||
}
|
||||
return result;
|
||||
@@ -724,7 +724,7 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
return false;
|
||||
}
|
||||
CallLeafNode *call = node->as_CallLeaf();
|
||||
if (call->_name == NULL) {
|
||||
if (call->_name == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -734,7 +734,7 @@ bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const {
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const {
|
||||
if (c == NULL) {
|
||||
if (c == nullptr) {
|
||||
return c;
|
||||
}
|
||||
if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
|
||||
@@ -783,7 +783,7 @@ bool ShenandoahBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_couple
|
||||
|
||||
bool ShenandoahBarrierSetC2::clone_needs_barrier(Node* src, PhaseGVN& gvn) {
|
||||
const TypeOopPtr* src_type = gvn.type(src)->is_oopptr();
|
||||
if (src_type->isa_instptr() != NULL) {
|
||||
if (src_type->isa_instptr() != nullptr) {
|
||||
ciInstanceKlass* ik = src_type->is_instptr()->instance_klass();
|
||||
if ((src_type->klass_is_exact() || !ik->has_subklass()) && !ik->has_injected_fields()) {
|
||||
if (ik->has_object_fields()) {
|
||||
@@ -832,7 +832,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo
|
||||
Node* gc_state_addr = phase->transform_later(new AddPNode(phase->C->top(), thread, offset));
|
||||
|
||||
uint gc_state_idx = Compile::AliasIdxRaw;
|
||||
const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
|
||||
const TypePtr* gc_state_adr_type = nullptr; // debug-mode-only argument
|
||||
debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
|
||||
|
||||
Node* gc_state = phase->transform_later(new LoadBNode(ctrl, mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered));
|
||||
@@ -873,7 +873,7 @@ void ShenandoahBarrierSetC2::clone_at_expansion(PhaseMacroExpand* phase, ArrayCo
|
||||
const char* name = "arraycopy";
|
||||
call = phase->make_leaf_call(ctrl, mem,
|
||||
OptoRuntime::fast_arraycopy_Type(),
|
||||
phase->basictype2arraycopy(T_LONG, NULL, NULL, true, name, true),
|
||||
phase->basictype2arraycopy(T_LONG, nullptr, nullptr, true, name, true),
|
||||
name, TypeRawPtr::BOTTOM,
|
||||
src, dest, length
|
||||
LP64_ONLY(COMMA phase->top()));
|
||||
@@ -988,7 +988,7 @@ void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase p
|
||||
worklist.push(compile->root());
|
||||
while (worklist.size() > 0) {
|
||||
Node *x = worklist.pop();
|
||||
if (x == NULL || x == compile->top()) continue;
|
||||
if (x == nullptr || x == compile->top()) continue;
|
||||
if (visited.member(x)) {
|
||||
continue;
|
||||
} else {
|
||||
@@ -1028,7 +1028,7 @@ void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase p
|
||||
if_ctrl = if_ctrl->in(0)->in(0);
|
||||
}
|
||||
}
|
||||
assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
|
||||
assert(load_ctrl != nullptr && if_ctrl == load_ctrl, "controls must match");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1057,7 +1057,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
Node* in1 = n->in(1);
|
||||
Node* in2 = n->in(2);
|
||||
|
||||
// If one input is NULL, then step over the strong LRB barriers on the other input
|
||||
// If one input is null, then step over the strong LRB barriers on the other input
|
||||
if (in1->bottom_type() == TypePtr::NULL_PTR &&
|
||||
!((in2->Opcode() == Op_ShenandoahLoadReferenceBarrier) &&
|
||||
!ShenandoahBarrierSet::is_strong_access(((ShenandoahLoadReferenceBarrierNode*)in2)->decorators()))) {
|
||||
@@ -1081,7 +1081,7 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
} else if (can_reshape &&
|
||||
n->Opcode() == Op_If &&
|
||||
ShenandoahBarrierC2Support::is_heap_stable_test(n) &&
|
||||
n->in(0) != NULL &&
|
||||
n->in(0) != nullptr &&
|
||||
n->outcnt() == 2) {
|
||||
Node* dom = n->in(0);
|
||||
Node* prev_dom = n;
|
||||
@@ -1091,23 +1091,23 @@ Node* ShenandoahBarrierSetC2::ideal_node(PhaseGVN* phase, Node* n, bool can_resh
|
||||
while (dom->Opcode() != op || // Not same opcode?
|
||||
!ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1?
|
||||
prev_dom->in(0) != dom) { // One path of test does not dominate?
|
||||
if (dist < 0) return NULL;
|
||||
if (dist < 0) return nullptr;
|
||||
|
||||
dist--;
|
||||
prev_dom = dom;
|
||||
dom = IfNode::up_one_dom(dom);
|
||||
if (!dom) return NULL;
|
||||
if (!dom) return nullptr;
|
||||
}
|
||||
|
||||
// Check that we did not follow a loop back to ourselves
|
||||
if (n == dom) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return n->as_If()->dominated_by(prev_dom, phase->is_IterGVN());
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) {
|
||||
@@ -1205,7 +1205,7 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
|
||||
case Op_ShenandoahCompareAndExchangeP:
|
||||
case Op_ShenandoahCompareAndExchangeN: {
|
||||
Node *adr = n->in(MemNode::Address);
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, nullptr);
|
||||
// fallthrough
|
||||
}
|
||||
case Op_ShenandoahCompareAndSwapP:
|
||||
@@ -1214,10 +1214,10 @@ bool ShenandoahBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph,
|
||||
case Op_ShenandoahWeakCompareAndSwapN:
|
||||
return conn_graph->add_final_edges_unsafe_access(n, opcode);
|
||||
case Op_ShenandoahIUBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL);
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
|
||||
return true;
|
||||
case Op_ShenandoahLoadReferenceBarrier:
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL);
|
||||
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), nullptr);
|
||||
return true;
|
||||
default:
|
||||
// Nothing
|
||||
|
||||
@@ -158,7 +158,7 @@ bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, Vecto
|
||||
|
||||
while (true) {
|
||||
if (in->bottom_type() == TypePtr::NULL_PTR) {
|
||||
if (trace) {tty->print_cr("NULL");}
|
||||
if (trace) {tty->print_cr("null");}
|
||||
} else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
|
||||
if (trace) {tty->print_cr("Non oop");}
|
||||
} else {
|
||||
@@ -265,10 +265,10 @@ bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, Vecto
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
|
||||
if (n1 != NULL) {
|
||||
if (n1 != nullptr) {
|
||||
n1->dump(+10);
|
||||
}
|
||||
if (n2 != NULL) {
|
||||
if (n2 != nullptr) {
|
||||
n2->dump(+10);
|
||||
}
|
||||
fatal("%s", msg);
|
||||
@@ -462,7 +462,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
|
||||
};
|
||||
|
||||
if (call->is_call_to_arraycopystub()) {
|
||||
Node* dest = NULL;
|
||||
Node* dest = nullptr;
|
||||
const TypeTuple* args = n->as_Call()->_tf->domain();
|
||||
for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
|
||||
if (args->field_at(i)->isa_ptr()) {
|
||||
@@ -597,7 +597,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
|
||||
}
|
||||
}
|
||||
for (uint j = 1; j < stop; j++) {
|
||||
if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
|
||||
if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
|
||||
n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
|
||||
uint k = 0;
|
||||
for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
|
||||
@@ -608,7 +608,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
|
||||
}
|
||||
} else {
|
||||
for (uint j = 1; j < stop; j++) {
|
||||
if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
|
||||
if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
|
||||
n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
|
||||
fatal("%s not covered", n->Name());
|
||||
}
|
||||
@@ -618,7 +618,7 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
|
||||
|
||||
if (n->is_SafePoint()) {
|
||||
SafePointNode* sfpt = n->as_SafePoint();
|
||||
if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
|
||||
if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
|
||||
for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
|
||||
if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
|
||||
phis.clear();
|
||||
@@ -667,7 +667,7 @@ bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node*
|
||||
}
|
||||
}
|
||||
for (uint i = 0; i < m->req(); i++) {
|
||||
if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
|
||||
if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
|
||||
wq.push(m->in(i));
|
||||
}
|
||||
}
|
||||
@@ -684,7 +684,7 @@ bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Nod
|
||||
}
|
||||
|
||||
Node* next_mem(Node* mem, int alias) {
|
||||
Node* res = NULL;
|
||||
Node* res = nullptr;
|
||||
if (mem->is_Proj()) {
|
||||
res = mem->in(0);
|
||||
} else if (mem->is_SafePoint() || mem->is_MemBar()) {
|
||||
@@ -706,7 +706,7 @@ Node* next_mem(Node* mem, int alias) {
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
|
||||
Node* iffproj = NULL;
|
||||
Node* iffproj = nullptr;
|
||||
while (c != dom) {
|
||||
Node* next = phase->idom(c);
|
||||
assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
|
||||
@@ -743,13 +743,13 @@ Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one
|
||||
}
|
||||
} else if (c->is_Proj()) {
|
||||
if (c->is_IfProj()) {
|
||||
if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
|
||||
if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
|
||||
// continue;
|
||||
} else {
|
||||
if (!allow_one_proj) {
|
||||
return NodeSentinel;
|
||||
}
|
||||
if (iffproj == NULL) {
|
||||
if (iffproj == nullptr) {
|
||||
iffproj = c;
|
||||
} else {
|
||||
return NodeSentinel;
|
||||
@@ -778,7 +778,7 @@ Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node
|
||||
while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
|
||||
mem = next_mem(mem, alias);
|
||||
if (wq.test_set(mem->_idx)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
mem_ctrl = phase->ctrl_or_self(mem);
|
||||
}
|
||||
@@ -790,11 +790,11 @@ Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node
|
||||
}
|
||||
|
||||
Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
|
||||
Node* mem = NULL;
|
||||
Node* mem = nullptr;
|
||||
Node* c = ctrl;
|
||||
do {
|
||||
if (c->is_Region()) {
|
||||
for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
|
||||
for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
|
||||
Node* u = c->fast_out(i);
|
||||
if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
|
||||
if (u->adr_type() == TypePtr::BOTTOM) {
|
||||
@@ -803,12 +803,12 @@ Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
|
||||
if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
|
||||
CallProjections projs;
|
||||
c->as_Call()->extract_projections(&projs, true, false);
|
||||
if (projs.fallthrough_memproj != NULL) {
|
||||
if (projs.fallthrough_memproj != nullptr) {
|
||||
if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
|
||||
if (projs.catchall_memproj == NULL) {
|
||||
if (projs.catchall_memproj == nullptr) {
|
||||
mem = projs.fallthrough_memproj;
|
||||
} else {
|
||||
if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
|
||||
@@ -821,7 +821,7 @@ Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
|
||||
}
|
||||
} else {
|
||||
Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
|
||||
if (proj != NULL &&
|
||||
if (proj != nullptr &&
|
||||
proj->adr_type() == TypePtr::BOTTOM) {
|
||||
mem = proj;
|
||||
}
|
||||
@@ -833,15 +833,15 @@ Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* ph
|
||||
u->bottom_type() == Type::MEMORY &&
|
||||
u->adr_type() == TypePtr::BOTTOM) {
|
||||
assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
|
||||
assert(mem == NULL, "only one proj");
|
||||
assert(mem == nullptr, "only one proj");
|
||||
mem = u;
|
||||
}
|
||||
}
|
||||
assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
|
||||
assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
|
||||
}
|
||||
}
|
||||
c = phase->idom(c);
|
||||
} while (mem == NULL);
|
||||
} while (mem == nullptr);
|
||||
return mem;
|
||||
}
|
||||
|
||||
@@ -874,7 +874,7 @@ void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*
|
||||
Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
Node* gc_state_addr = new AddPNode(phase->C->top(), thread, gc_state_offset);
|
||||
Node* gc_state = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
|
||||
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
|
||||
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
|
||||
TypeInt::BYTE, MemNode::unordered);
|
||||
Node* gc_state_and = new AndINode(gc_state, igvn.intcon(flags));
|
||||
Node* gc_state_cmp = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
|
||||
@@ -940,7 +940,7 @@ void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl,
|
||||
Node* cset_load_ptr = new CastX2PNode(cset_load_addr);
|
||||
|
||||
Node* cset_load = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
|
||||
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(NULL),
|
||||
DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
|
||||
TypeInt::BYTE, MemNode::unordered);
|
||||
Node* cset_cmp = new CmpINode(cset_load, igvn.zerocon(T_INT));
|
||||
Node* cset_bool = new BoolNode(cset_cmp, BoolTest::ne);
|
||||
@@ -971,8 +971,8 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
|
||||
IdealLoopTree*loop = phase->get_loop(ctrl);
|
||||
const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
|
||||
|
||||
address calladdr = NULL;
|
||||
const char* name = NULL;
|
||||
address calladdr = nullptr;
|
||||
const char* name = nullptr;
|
||||
bool is_strong = ShenandoahBarrierSet::is_strong_access(decorators);
|
||||
bool is_weak = ShenandoahBarrierSet::is_weak_access(decorators);
|
||||
bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
|
||||
@@ -1041,7 +1041,7 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
|
||||
Node *n = uses_to_ignore.at(next);
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
|
||||
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
|
||||
uses_to_ignore.push(in);
|
||||
}
|
||||
}
|
||||
@@ -1076,14 +1076,14 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
|
||||
}
|
||||
|
||||
static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
|
||||
Node* region = NULL;
|
||||
Node* region = nullptr;
|
||||
while (c != ctrl) {
|
||||
if (c->is_Region()) {
|
||||
region = c;
|
||||
}
|
||||
c = phase->idom(c);
|
||||
}
|
||||
assert(region != NULL, "");
|
||||
assert(region != nullptr, "");
|
||||
Node* phi = new PhiNode(region, n->bottom_type());
|
||||
for (uint j = 1; j < region->req(); j++) {
|
||||
Node* in = region->in(j);
|
||||
@@ -1125,14 +1125,14 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* ctrl = phase->get_ctrl(lrb);
|
||||
Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
|
||||
|
||||
CallStaticJavaNode* unc = NULL;
|
||||
Node* unc_ctrl = NULL;
|
||||
CallStaticJavaNode* unc = nullptr;
|
||||
Node* unc_ctrl = nullptr;
|
||||
Node* uncasted_val = val;
|
||||
|
||||
for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = lrb->fast_out(i);
|
||||
if (u->Opcode() == Op_CastPP &&
|
||||
u->in(0) != NULL &&
|
||||
u->in(0) != nullptr &&
|
||||
phase->is_dominator(u->in(0), ctrl)) {
|
||||
const Type* u_t = phase->igvn().type(u);
|
||||
|
||||
@@ -1153,7 +1153,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
}
|
||||
|
||||
Node* branch = no_branches(ctrl, u->in(0), false, phase);
|
||||
assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
|
||||
assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
|
||||
if (branch == NodeSentinel) {
|
||||
continue;
|
||||
}
|
||||
@@ -1184,7 +1184,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
if (idx < n->req()) {
|
||||
Node* in = n->in(idx);
|
||||
stack.set_index(idx+1);
|
||||
if (in != NULL) {
|
||||
if (in != nullptr) {
|
||||
if (phase->has_ctrl(in)) {
|
||||
if (phase->is_dominator(call, phase->get_ctrl(in))) {
|
||||
#ifdef ASSERT
|
||||
@@ -1337,15 +1337,15 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* orig_ctrl = ctrl;
|
||||
|
||||
Node* raw_mem = fixer.find_mem(ctrl, lrb);
|
||||
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
|
||||
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
|
||||
|
||||
IdealLoopTree *loop = phase->get_loop(ctrl);
|
||||
|
||||
Node* heap_stable_ctrl = NULL;
|
||||
Node* null_ctrl = NULL;
|
||||
Node* heap_stable_ctrl = nullptr;
|
||||
Node* null_ctrl = nullptr;
|
||||
|
||||
assert(val->bottom_type()->make_oopptr(), "need oop");
|
||||
assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
|
||||
assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
|
||||
|
||||
enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
|
||||
Node* region = new RegionNode(PATH_LIMIT);
|
||||
@@ -1363,14 +1363,14 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
region->init_req(_heap_stable, heap_stable_ctrl);
|
||||
val_phi->init_req(_heap_stable, val);
|
||||
|
||||
// Test for in-cset, unless it's a native-LRB. Native LRBs need to return NULL
|
||||
// Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
|
||||
// even for non-cset objects to prevent resurrection of such objects.
|
||||
// Wires !in_cset(obj) to slot 2 of region and phis
|
||||
Node* not_cset_ctrl = NULL;
|
||||
Node* not_cset_ctrl = nullptr;
|
||||
if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
|
||||
test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
|
||||
}
|
||||
if (not_cset_ctrl != NULL) {
|
||||
if (not_cset_ctrl != nullptr) {
|
||||
region->init_req(_not_cset, not_cset_ctrl);
|
||||
val_phi->init_req(_not_cset, val);
|
||||
} else {
|
||||
@@ -1382,7 +1382,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
// Make the unconditional resolve for fwdptr.
|
||||
|
||||
// Call lrb-stub and wire up that path in slots 4
|
||||
Node* result_mem = NULL;
|
||||
Node* result_mem = nullptr;
|
||||
|
||||
Node* addr;
|
||||
if (ShenandoahSelfFixing) {
|
||||
@@ -1469,9 +1469,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
IdealLoopTree* loop = phase->get_loop(ctrl);
|
||||
Node* raw_mem = fixer.find_mem(ctrl, barrier);
|
||||
Node* init_raw_mem = raw_mem;
|
||||
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
|
||||
Node* heap_stable_ctrl = NULL;
|
||||
Node* null_ctrl = NULL;
|
||||
Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
|
||||
Node* heap_stable_ctrl = nullptr;
|
||||
Node* null_ctrl = nullptr;
|
||||
uint last = phase->C->unique();
|
||||
|
||||
enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
|
||||
@@ -1488,9 +1488,9 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
phi->init_req(_heap_stable, raw_mem);
|
||||
|
||||
// Null path
|
||||
Node* reg2_ctrl = NULL;
|
||||
Node* reg2_ctrl = nullptr;
|
||||
test_null(ctrl, pre_val, null_ctrl, phase);
|
||||
if (null_ctrl != NULL) {
|
||||
if (null_ctrl != nullptr) {
|
||||
reg2_ctrl = null_ctrl->in(0);
|
||||
region2->init_req(_null_path, null_ctrl);
|
||||
phi2->init_req(_null_path, raw_mem);
|
||||
@@ -1518,7 +1518,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
|
||||
phase->register_new_node(index_test, ctrl);
|
||||
IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
|
||||
if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
|
||||
if (reg2_ctrl == nullptr) reg2_ctrl = queue_full_iff;
|
||||
phase->register_control(queue_full_iff, loop, ctrl);
|
||||
Node* not_full = new IfTrueNode(queue_full_iff);
|
||||
phase->register_control(not_full, loop, queue_full_iff);
|
||||
@@ -1598,7 +1598,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
|
||||
Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
|
||||
if (visited.test_set(in->_idx)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
switch (in->Opcode()) {
|
||||
case Op_Proj:
|
||||
@@ -1625,17 +1625,17 @@ Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet
|
||||
Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
|
||||
Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
|
||||
// Handle unambiguous cases: single address reported on both branches.
|
||||
if (t != NULL && f == NULL) return t;
|
||||
if (t == NULL && f != NULL) return f;
|
||||
if (t != NULL && t == f) return t;
|
||||
if (t != nullptr && f == nullptr) return t;
|
||||
if (t == nullptr && f != nullptr) return f;
|
||||
if (t != nullptr && t == f) return t;
|
||||
// Ambiguity.
|
||||
return phase->igvn().zerocon(T_OBJECT);
|
||||
}
|
||||
case Op_Phi: {
|
||||
Node* addr = NULL;
|
||||
Node* addr = nullptr;
|
||||
for (uint i = 1; i < in->req(); i++) {
|
||||
Node* addr1 = get_load_addr(phase, visited, in->in(i));
|
||||
if (addr == NULL) {
|
||||
if (addr == nullptr) {
|
||||
addr = addr1;
|
||||
}
|
||||
if (addr != addr1) {
|
||||
@@ -1677,7 +1677,7 @@ void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, Pha
|
||||
|
||||
assert(is_gc_state_load(load), "broken");
|
||||
if (!phase->is_dominator(load->in(0), entry_c)) {
|
||||
Node* mem_ctrl = NULL;
|
||||
Node* mem_ctrl = nullptr;
|
||||
Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
|
||||
load = load->clone();
|
||||
load->set_req(MemNode::Memory, mem);
|
||||
@@ -1771,7 +1771,7 @@ void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoo
|
||||
IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
|
||||
// Find first invariant test that doesn't exit the loop
|
||||
LoopNode *head = loop->_head->as_Loop();
|
||||
IfNode* unswitch_iff = NULL;
|
||||
IfNode* unswitch_iff = nullptr;
|
||||
Node* n = head->in(LoopNode::LoopBackControl);
|
||||
int loop_has_sfpts = -1;
|
||||
while (n != head) {
|
||||
@@ -1846,14 +1846,14 @@ void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, No
|
||||
Node* n = heap_stable_tests.at(i);
|
||||
IdealLoopTree* loop = phase->get_loop(n);
|
||||
if (loop != phase->ltree_root() &&
|
||||
loop->_child == NULL &&
|
||||
loop->_child == nullptr &&
|
||||
!loop->_irreducible) {
|
||||
Node* head = loop->_head;
|
||||
if (head->is_Loop() &&
|
||||
(!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
|
||||
!seen.test_set(head->_idx)) {
|
||||
IfNode* iff = find_unswitching_candidate(loop, phase);
|
||||
if (iff != NULL) {
|
||||
if (iff != nullptr) {
|
||||
Node* bol = iff->in(1);
|
||||
if (head->as_Loop()->is_strip_mined()) {
|
||||
head->as_Loop()->verify_strip_mined(0);
|
||||
@@ -1880,12 +1880,12 @@ void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, No
|
||||
}
|
||||
}
|
||||
|
||||
ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(NULL, val) {
|
||||
ShenandoahIUBarrierNode::ShenandoahIUBarrierNode(Node* val) : Node(nullptr, val) {
|
||||
ShenandoahBarrierSetC2::bsc2()->state()->add_iu_barrier(this);
|
||||
}
|
||||
|
||||
const Type* ShenandoahIUBarrierNode::bottom_type() const {
|
||||
if (in(1) == NULL || in(1)->is_top()) {
|
||||
if (in(1) == nullptr || in(1)->is_top()) {
|
||||
return Type::TOP;
|
||||
}
|
||||
const Type* t = in(1)->bottom_type();
|
||||
@@ -1896,7 +1896,7 @@ const Type* ShenandoahIUBarrierNode::bottom_type() const {
|
||||
}
|
||||
|
||||
const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
|
||||
if (in(1) == NULL) {
|
||||
if (in(1) == nullptr) {
|
||||
return Type::TOP;
|
||||
}
|
||||
const Type* t = phase->type(in(1));
|
||||
@@ -1910,11 +1910,11 @@ const Type* ShenandoahIUBarrierNode::Value(PhaseGVN* phase) const {
|
||||
}
|
||||
|
||||
int ShenandoahIUBarrierNode::needed(Node* n) {
|
||||
if (n == NULL ||
|
||||
if (n == nullptr ||
|
||||
n->is_Allocate() ||
|
||||
n->Opcode() == Op_ShenandoahIUBarrier ||
|
||||
n->bottom_type() == TypePtr::NULL_PTR ||
|
||||
(n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
|
||||
(n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr)) {
|
||||
return NotNeeded;
|
||||
}
|
||||
if (n->is_Phi() ||
|
||||
@@ -1926,11 +1926,11 @@ int ShenandoahIUBarrierNode::needed(Node* n) {
|
||||
|
||||
Node* ShenandoahIUBarrierNode::next(Node* n) {
|
||||
for (;;) {
|
||||
if (n == NULL) {
|
||||
if (n == nullptr) {
|
||||
return n;
|
||||
} else if (n->bottom_type() == TypePtr::NULL_PTR) {
|
||||
return n;
|
||||
} else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
|
||||
} else if (n->bottom_type()->make_oopptr() != nullptr && n->bottom_type()->make_oopptr()->const_oop() != nullptr) {
|
||||
return n;
|
||||
} else if (n->is_ConstraintCast() ||
|
||||
n->Opcode() == Op_DecodeN ||
|
||||
@@ -1943,7 +1943,7 @@ Node* ShenandoahIUBarrierNode::next(Node* n) {
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
|
||||
@@ -1956,7 +1956,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
|
||||
if (cont == NotNeeded) {
|
||||
return in(1);
|
||||
} else if (cont == MaybeNeeded) {
|
||||
if (igvn == NULL) {
|
||||
if (igvn == nullptr) {
|
||||
phase->record_for_igvn(this);
|
||||
return this;
|
||||
} else {
|
||||
@@ -1968,7 +1968,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
|
||||
if (n->is_Phi()) {
|
||||
for (uint i = 1; i < n->req(); i++) {
|
||||
Node* m = n->in(i);
|
||||
if (m != NULL) {
|
||||
if (m != nullptr) {
|
||||
wq.push(m);
|
||||
}
|
||||
}
|
||||
@@ -1979,7 +1979,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
|
||||
m = n->in(CMoveNode::IfTrue);
|
||||
wq.push(m);
|
||||
}
|
||||
Node* orig_n = NULL;
|
||||
Node* orig_n = nullptr;
|
||||
do {
|
||||
if (wq_i >= wq.size()) {
|
||||
return in(1);
|
||||
@@ -2004,7 +2004,7 @@ Node* ShenandoahIUBarrierNode::Identity(PhaseGVN* phase) {
|
||||
static bool has_never_branch(Node* root) {
|
||||
for (uint i = 1; i < root->req(); i++) {
|
||||
Node* in = root->in(i);
|
||||
if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
|
||||
if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -2025,7 +2025,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
int opc = n->Opcode();
|
||||
uint i = stack.index();
|
||||
if (i < n->req()) {
|
||||
Node* mem = NULL;
|
||||
Node* mem = nullptr;
|
||||
if (opc == Op_Root) {
|
||||
Node* in = n->in(i);
|
||||
int in_opc = in->Opcode();
|
||||
@@ -2066,7 +2066,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
}
|
||||
i++;
|
||||
stack.set_index(i);
|
||||
if (mem == NULL) {
|
||||
if (mem == nullptr) {
|
||||
continue;
|
||||
}
|
||||
for (;;) {
|
||||
@@ -2119,7 +2119,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
if (trace) {
|
||||
for (int i = rpo_list.size() - 1; i >= 0; i--) {
|
||||
Node* c = rpo_list.at(i);
|
||||
if (_memory_nodes[c->_idx] != NULL) {
|
||||
if (_memory_nodes[c->_idx] != nullptr) {
|
||||
tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump();
|
||||
}
|
||||
}
|
||||
@@ -2150,15 +2150,15 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
Node* prev_mem = _memory_nodes[c->_idx];
|
||||
if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
|
||||
Node* prev_region = regions[c->_idx];
|
||||
Node* unique = NULL;
|
||||
Node* unique = nullptr;
|
||||
for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
|
||||
Node* m = _memory_nodes[c->in(j)->_idx];
|
||||
assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
|
||||
if (m != NULL) {
|
||||
assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
|
||||
if (m != nullptr) {
|
||||
if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
|
||||
assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
|
||||
// continue
|
||||
} else if (unique == NULL) {
|
||||
} else if (unique == nullptr) {
|
||||
unique = m;
|
||||
} else if (m == unique) {
|
||||
// continue
|
||||
@@ -2167,30 +2167,30 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
}
|
||||
}
|
||||
}
|
||||
assert(unique != NULL, "empty phi???");
|
||||
assert(unique != nullptr, "empty phi???");
|
||||
if (unique != NodeSentinel) {
|
||||
if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
|
||||
if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
|
||||
dead_phis.push(prev_region);
|
||||
}
|
||||
regions.map(c->_idx, unique);
|
||||
} else {
|
||||
Node* phi = NULL;
|
||||
if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
|
||||
Node* phi = nullptr;
|
||||
if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
|
||||
phi = prev_region;
|
||||
for (uint k = 1; k < c->req(); k++) {
|
||||
Node* m = _memory_nodes[c->in(k)->_idx];
|
||||
assert(m != NULL, "expect memory state");
|
||||
assert(m != nullptr, "expect memory state");
|
||||
phi->set_req(k, m);
|
||||
}
|
||||
} else {
|
||||
for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
|
||||
for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
|
||||
Node* u = c->fast_out(j);
|
||||
if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
|
||||
(u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
|
||||
phi = u;
|
||||
for (uint k = 1; k < c->req() && phi != NULL; k++) {
|
||||
for (uint k = 1; k < c->req() && phi != nullptr; k++) {
|
||||
Node* m = _memory_nodes[c->in(k)->_idx];
|
||||
assert(m != NULL, "expect memory state");
|
||||
assert(m != nullptr, "expect memory state");
|
||||
if (u->in(k) != m) {
|
||||
phi = NodeSentinel;
|
||||
}
|
||||
@@ -2201,12 +2201,12 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
|
||||
for (uint k = 1; k < c->req(); k++) {
|
||||
Node* m = _memory_nodes[c->in(k)->_idx];
|
||||
assert(m != NULL, "expect memory state");
|
||||
assert(m != nullptr, "expect memory state");
|
||||
phi->init_req(k, m);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (phi != NULL) {
|
||||
if (phi != nullptr) {
|
||||
regions.map(c->_idx, phi);
|
||||
} else {
|
||||
assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
|
||||
@@ -2219,9 +2219,9 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
_memory_nodes.map(c->_idx, current_region);
|
||||
}
|
||||
}
|
||||
} else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
|
||||
} else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
|
||||
Node* m = _memory_nodes[_phase->idom(c)->_idx];
|
||||
assert(m != NULL || c->Opcode() == Op_Halt, "expect memory state");
|
||||
assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
|
||||
if (m != prev_mem) {
|
||||
_memory_nodes.map(c->_idx, m);
|
||||
progress = true;
|
||||
@@ -2245,8 +2245,8 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
Node* c = rpo_list.at(i);
|
||||
if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
|
||||
Node* n = regions[c->_idx];
|
||||
assert(n != NULL || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
|
||||
if (n != NULL && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
|
||||
assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
|
||||
if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
|
||||
_phase->register_new_node(n, c);
|
||||
}
|
||||
}
|
||||
@@ -2255,7 +2255,7 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
Node* c = rpo_list.at(i);
|
||||
if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
|
||||
Node* n = regions[c->_idx];
|
||||
assert(n != NULL || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
|
||||
assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
|
||||
for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = c->fast_out(i);
|
||||
if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
|
||||
@@ -2274,26 +2274,26 @@ void MemoryGraphFixer::collect_memory_nodes() {
|
||||
}
|
||||
|
||||
Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
|
||||
Node* mem = NULL;
|
||||
Node* mem = nullptr;
|
||||
Node* head = in->in(0);
|
||||
assert(head->is_Region(), "unexpected infinite loop graph shape");
|
||||
|
||||
Node* phi_mem = NULL;
|
||||
Node* phi_mem = nullptr;
|
||||
for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* u = head->fast_out(j);
|
||||
if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
|
||||
if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
|
||||
assert(phi_mem == NULL || phi_mem->adr_type() == TypePtr::BOTTOM, "");
|
||||
assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
|
||||
phi_mem = u;
|
||||
} else if (u->adr_type() == TypePtr::BOTTOM) {
|
||||
assert(phi_mem == NULL || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
|
||||
if (phi_mem == NULL) {
|
||||
assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
|
||||
if (phi_mem == nullptr) {
|
||||
phi_mem = u;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (phi_mem == NULL) {
|
||||
if (phi_mem == nullptr) {
|
||||
ResourceMark rm;
|
||||
Node_Stack stack(0);
|
||||
stack.push(head, 1);
|
||||
@@ -2316,7 +2316,7 @@ Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
|
||||
if (m->is_MergeMem()) {
|
||||
m = m->as_MergeMem()->memory_at(_alias);
|
||||
}
|
||||
assert(mem == NULL || mem == m, "several memory states");
|
||||
assert(mem == nullptr || mem == m, "several memory states");
|
||||
mem = m;
|
||||
break;
|
||||
} else {
|
||||
@@ -2327,7 +2327,7 @@ Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
|
||||
}
|
||||
}
|
||||
} while (stack.size() > 0);
|
||||
assert(mem != NULL, "should have found safepoint");
|
||||
assert(mem != nullptr, "should have found safepoint");
|
||||
} else {
|
||||
mem = phi_mem;
|
||||
}
|
||||
@@ -2336,12 +2336,12 @@ Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
|
||||
|
||||
Node* MemoryGraphFixer::get_ctrl(Node* n) const {
|
||||
Node* c = _phase->get_ctrl(n);
|
||||
if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
|
||||
if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
|
||||
assert(c == n->in(0), "");
|
||||
CallNode* call = c->as_Call();
|
||||
CallProjections projs;
|
||||
call->extract_projections(&projs, true, false);
|
||||
if (projs.catchall_memproj != NULL) {
|
||||
if (projs.catchall_memproj != nullptr) {
|
||||
if (projs.fallthrough_memproj == n) {
|
||||
c = projs.fallthrough_catchproj;
|
||||
} else {
|
||||
@@ -2363,11 +2363,11 @@ Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
|
||||
}
|
||||
|
||||
bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
|
||||
return m != NULL && get_ctrl(m) == c;
|
||||
return m != nullptr && get_ctrl(m) == c;
|
||||
}
|
||||
|
||||
Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
|
||||
assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
|
||||
assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
|
||||
assert(!ctrl->is_Call() || ctrl == n, "projection expected");
|
||||
#ifdef ASSERT
|
||||
if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
|
||||
@@ -2386,11 +2386,11 @@ Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
|
||||
Node* mem = _memory_nodes[ctrl->_idx];
|
||||
Node* c = ctrl;
|
||||
while (!mem_is_valid(mem, c) &&
|
||||
(!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
|
||||
(!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
|
||||
c = _phase->idom(c);
|
||||
mem = _memory_nodes[c->_idx];
|
||||
}
|
||||
if (n != NULL && mem_is_valid(mem, c)) {
|
||||
if (n != nullptr && mem_is_valid(mem, c)) {
|
||||
while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
|
||||
mem = next_mem(mem, _alias);
|
||||
}
|
||||
@@ -2402,7 +2402,7 @@ Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
|
||||
c = _phase->idom(c);
|
||||
mem = _memory_nodes[c->_idx];
|
||||
} while (!mem_is_valid(mem, c) &&
|
||||
(!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
|
||||
(!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
|
||||
}
|
||||
}
|
||||
assert(mem->bottom_type() == Type::MEMORY, "");
|
||||
@@ -2428,7 +2428,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
GrowableArray<Node*> phis;
|
||||
if (mem_for_ctrl != mem) {
|
||||
Node* old = mem_for_ctrl;
|
||||
Node* prev = NULL;
|
||||
Node* prev = nullptr;
|
||||
while (old != mem) {
|
||||
prev = old;
|
||||
if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
|
||||
@@ -2441,7 +2441,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
assert(prev != NULL, "");
|
||||
assert(prev != nullptr, "");
|
||||
if (new_ctrl != ctrl) {
|
||||
_memory_nodes.map(ctrl->_idx, mem);
|
||||
_memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
|
||||
@@ -2464,7 +2464,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
!has_mem_phi(u) &&
|
||||
u->unique_ctrl_out()->Opcode() != Op_Halt) {
|
||||
DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
|
||||
DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
|
||||
DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
|
||||
|
||||
if (!mem_is_valid(m, u) || !m->is_Phi()) {
|
||||
bool push = true;
|
||||
@@ -2484,7 +2484,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
|
||||
for (;;) {
|
||||
assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
|
||||
Node* next = NULL;
|
||||
Node* next = nullptr;
|
||||
if (m->is_Proj()) {
|
||||
next = m->in(0);
|
||||
} else {
|
||||
@@ -2531,14 +2531,14 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
Node* r = n->in(0);
|
||||
DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
|
||||
for (uint j = 1; j < n->req(); j++) {
|
||||
Node* m = find_mem(r->in(j), NULL);
|
||||
Node* m = find_mem(r->in(j), nullptr);
|
||||
_phase->igvn().replace_input_of(n, j, m);
|
||||
DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
|
||||
}
|
||||
}
|
||||
}
|
||||
uint last = _phase->C->unique();
|
||||
MergeMemNode* mm = NULL;
|
||||
MergeMemNode* mm = nullptr;
|
||||
int alias = _alias;
|
||||
DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
|
||||
// Process loads first to not miss an anti-dependency: if the memory
|
||||
@@ -2570,7 +2570,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
} else if (u->is_MergeMem()) {
|
||||
MergeMemNode* u_mm = u->as_MergeMem();
|
||||
if (u_mm->memory_at(alias) == mem) {
|
||||
MergeMemNode* newmm = NULL;
|
||||
MergeMemNode* newmm = nullptr;
|
||||
for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* uu = u->fast_out(j);
|
||||
assert(!uu->is_MergeMem(), "chain of MergeMems?");
|
||||
@@ -2580,7 +2580,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
int nb = 0;
|
||||
for (uint k = 1; k < uu->req(); k++) {
|
||||
if (uu->in(k) == u) {
|
||||
Node* m = find_mem(region->in(k), NULL);
|
||||
Node* m = find_mem(region->in(k), nullptr);
|
||||
if (m != mem) {
|
||||
DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
|
||||
newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
|
||||
@@ -2615,7 +2615,7 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
bool replaced = false;
|
||||
for (uint j = 1; j < u->req(); j++) {
|
||||
if (u->in(j) == mem) {
|
||||
Node* m = find_mem(region->in(j), NULL);
|
||||
Node* m = find_mem(region->in(j), nullptr);
|
||||
Node* nnew = m;
|
||||
if (m != mem) {
|
||||
if (u->adr_type() == TypePtr::BOTTOM) {
|
||||
@@ -2633,8 +2633,8 @@ void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_
|
||||
}
|
||||
}
|
||||
} else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
|
||||
u->adr_type() == NULL) {
|
||||
assert(u->adr_type() != NULL ||
|
||||
u->adr_type() == nullptr) {
|
||||
assert(u->adr_type() != nullptr ||
|
||||
u->Opcode() == Op_Rethrow ||
|
||||
u->Opcode() == Op_Return ||
|
||||
u->Opcode() == Op_SafePoint ||
|
||||
@@ -2690,7 +2690,7 @@ MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, No
|
||||
}
|
||||
|
||||
MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
|
||||
MergeMemNode* newmm = NULL;
|
||||
MergeMemNode* newmm = nullptr;
|
||||
MergeMemNode* u_mm = u->as_MergeMem();
|
||||
Node* c = _phase->get_ctrl(u);
|
||||
if (_phase->is_dominator(c, rep_ctrl)) {
|
||||
@@ -2750,7 +2750,7 @@ bool MemoryGraphFixer::should_process_phi(Node* phi) const {
|
||||
|
||||
void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
|
||||
uint last = _phase-> C->unique();
|
||||
MergeMemNode* mm = NULL;
|
||||
MergeMemNode* mm = nullptr;
|
||||
assert(mem->bottom_type() == Type::MEMORY, "");
|
||||
for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
|
||||
Node* u = mem->out(i);
|
||||
@@ -2758,7 +2758,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
|
||||
if (u->is_MergeMem()) {
|
||||
MergeMemNode* u_mm = u->as_MergeMem();
|
||||
if (u_mm->memory_at(_alias) == mem) {
|
||||
MergeMemNode* newmm = NULL;
|
||||
MergeMemNode* newmm = nullptr;
|
||||
for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* uu = u->fast_out(j);
|
||||
assert(!uu->is_MergeMem(), "chain of MergeMems?");
|
||||
@@ -2768,7 +2768,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
|
||||
int nb = 0;
|
||||
for (uint k = 1; k < uu->req(); k++) {
|
||||
if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
|
||||
if (newmm == NULL) {
|
||||
if (newmm == nullptr) {
|
||||
newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
|
||||
}
|
||||
if (newmm != u) {
|
||||
@@ -2784,7 +2784,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
|
||||
}
|
||||
} else {
|
||||
if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
|
||||
if (newmm == NULL) {
|
||||
if (newmm == nullptr) {
|
||||
newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
|
||||
}
|
||||
if (newmm != u) {
|
||||
@@ -2804,7 +2804,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
|
||||
if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
|
||||
Node* nnew = rep_proj;
|
||||
if (u->adr_type() == TypePtr::BOTTOM) {
|
||||
if (mm == NULL) {
|
||||
if (mm == nullptr) {
|
||||
mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
|
||||
}
|
||||
nnew = mm;
|
||||
@@ -2819,8 +2819,8 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
|
||||
|
||||
}
|
||||
} else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
|
||||
u->adr_type() == NULL) {
|
||||
assert(u->adr_type() != NULL ||
|
||||
u->adr_type() == nullptr) {
|
||||
assert(u->adr_type() != nullptr ||
|
||||
u->Opcode() == Op_Rethrow ||
|
||||
u->Opcode() == Op_Return ||
|
||||
u->Opcode() == Op_SafePoint ||
|
||||
@@ -2828,7 +2828,7 @@ void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_p
|
||||
(u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
|
||||
u->Opcode() == Op_CallLeaf, "%s", u->Name());
|
||||
if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
|
||||
if (mm == NULL) {
|
||||
if (mm == nullptr) {
|
||||
mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
|
||||
}
|
||||
_phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
|
||||
@@ -2873,7 +2873,7 @@ bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
|
||||
}
|
||||
|
||||
const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
|
||||
if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
|
||||
if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
|
||||
return Type::TOP;
|
||||
}
|
||||
const Type* t = in(ValueIn)->bottom_type();
|
||||
@@ -2918,7 +2918,7 @@ bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n)
|
||||
}
|
||||
|
||||
bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
|
||||
if (n == NULL) return false;
|
||||
if (n == nullptr) return false;
|
||||
if (visited.member(n)) {
|
||||
return false; // Been there.
|
||||
}
|
||||
@@ -2941,7 +2941,7 @@ bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Nod
|
||||
// tty->print_cr("optimize barrier on null");
|
||||
return false;
|
||||
}
|
||||
if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
|
||||
if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
|
||||
// tty->print_cr("optimize barrier on constant");
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ private:
|
||||
};
|
||||
|
||||
static bool verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used);
|
||||
static void report_verify_failure(const char* msg, Node* n1 = NULL, Node* n2 = NULL);
|
||||
static void report_verify_failure(const char* msg, Node* n1 = nullptr, Node* n2 = nullptr);
|
||||
#endif
|
||||
static Node* dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase);
|
||||
static Node* no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase);
|
||||
@@ -142,10 +142,10 @@ public:
|
||||
: CompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
|
||||
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
|
||||
return new CompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
@@ -157,10 +157,10 @@ public:
|
||||
: CompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { }
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
|
||||
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
|
||||
return new CompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
@@ -172,10 +172,10 @@ public:
|
||||
: WeakCompareAndSwapPNode(c, mem, adr, val, ex, mem_ord) { }
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
|
||||
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
|
||||
return new WeakCompareAndSwapPNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
@@ -187,10 +187,10 @@ public:
|
||||
: WeakCompareAndSwapNNode(c, mem, adr, val, ex, mem_ord) { }
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
|
||||
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
|
||||
return new WeakCompareAndSwapNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), order());
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
@@ -202,10 +202,10 @@ public:
|
||||
: CompareAndExchangePNode(c, mem, adr, val, ex, at, t, mem_ord) { }
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
|
||||
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypePtr::NULL_PTR) {
|
||||
return new CompareAndExchangePNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order());
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
@@ -217,10 +217,10 @@ public:
|
||||
: CompareAndExchangeNNode(c, mem, adr, val, ex, at, t, mem_ord) { }
|
||||
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (in(ExpectedIn) != NULL && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
|
||||
if (in(ExpectedIn) != nullptr && phase->type(in(ExpectedIn)) == TypeNarrowOop::NULL_PTR) {
|
||||
return new CompareAndExchangeNNode(in(MemNode::Control), in(MemNode::Memory), in(MemNode::Address), in(MemNode::ValueIn), in(ExpectedIn), adr_type(), bottom_type(), order());
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
virtual int Opcode() const;
|
||||
|
||||
@@ -43,7 +43,7 @@ int ShenandoahHeuristics::compare_by_garbage(RegionData a, RegionData b) {
|
||||
}
|
||||
|
||||
ShenandoahHeuristics::ShenandoahHeuristics() :
|
||||
_region_data(NULL),
|
||||
_region_data(nullptr),
|
||||
_degenerated_cycles_in_a_row(0),
|
||||
_successful_cycles_in_a_row(0),
|
||||
_cycle_start(os::elapsedTime()),
|
||||
|
||||
@@ -65,7 +65,7 @@ void ShenandoahIUMode::initialize_flags() const {
|
||||
}
|
||||
|
||||
ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const {
|
||||
if (ShenandoahGCHeuristics == NULL) {
|
||||
if (ShenandoahGCHeuristics == nullptr) {
|
||||
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)");
|
||||
}
|
||||
if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
|
||||
@@ -78,5 +78,5 @@ ShenandoahHeuristics* ShenandoahIUMode::initialize_heuristics() const {
|
||||
return new ShenandoahCompactHeuristics();
|
||||
}
|
||||
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ void ShenandoahPassiveMode::initialize_flags() const {
|
||||
// No barriers are required to run.
|
||||
}
|
||||
ShenandoahHeuristics* ShenandoahPassiveMode::initialize_heuristics() const {
|
||||
if (ShenandoahGCHeuristics == NULL) {
|
||||
if (ShenandoahGCHeuristics == nullptr) {
|
||||
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)");
|
||||
}
|
||||
return new ShenandoahPassiveHeuristics();
|
||||
|
||||
@@ -53,7 +53,7 @@ void ShenandoahSATBMode::initialize_flags() const {
|
||||
}
|
||||
|
||||
ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const {
|
||||
if (ShenandoahGCHeuristics == NULL) {
|
||||
if (ShenandoahGCHeuristics == nullptr) {
|
||||
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option (null)");
|
||||
}
|
||||
if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
|
||||
@@ -66,5 +66,5 @@ ShenandoahHeuristics* ShenandoahSATBMode::initialize_heuristics() const {
|
||||
return new ShenandoahCompactHeuristics();
|
||||
}
|
||||
vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ void print_raw_memory(ShenandoahMessageBuffer &msg, void* loc) {
|
||||
if (!heap->is_in(loc)) return;
|
||||
|
||||
ShenandoahHeapRegion* r = heap->heap_region_containing(loc);
|
||||
if (r != NULL && r->is_committed()) {
|
||||
if (r != nullptr && r->is_committed()) {
|
||||
address start = MAX2((address) r->bottom(), (address) loc - 32);
|
||||
address end = MIN2((address) r->end(), (address) loc + 128);
|
||||
if (start >= end) return;
|
||||
@@ -98,7 +98,7 @@ void ShenandoahAsserts::print_obj_safe(ShenandoahMessageBuffer& msg, void* loc)
|
||||
msg.append(" " PTR_FORMAT " - safe print, no details\n", p2i(loc));
|
||||
if (heap->is_in(loc)) {
|
||||
ShenandoahHeapRegion* r = heap->heap_region_containing(loc);
|
||||
if (r != NULL) {
|
||||
if (r != nullptr) {
|
||||
stringStream ss;
|
||||
r->print_on(&ss);
|
||||
msg.append(" region: %s", ss.freeze());
|
||||
@@ -113,12 +113,12 @@ void ShenandoahAsserts::print_failure(SafeLevel level, oop obj, void* interior_l
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ResourceMark rm;
|
||||
|
||||
bool loc_in_heap = (loc != NULL && heap->is_in(loc));
|
||||
bool loc_in_heap = (loc != nullptr && heap->is_in(loc));
|
||||
|
||||
ShenandoahMessageBuffer msg("%s; %s\n\n", phase, label);
|
||||
|
||||
msg.append("Referenced from:\n");
|
||||
if (interior_loc != NULL) {
|
||||
if (interior_loc != nullptr) {
|
||||
msg.append(" interior location: " PTR_FORMAT "\n", p2i(interior_loc));
|
||||
if (loc_in_heap) {
|
||||
print_obj(msg, loc);
|
||||
@@ -170,7 +170,7 @@ void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
if (!heap->is_in(obj)) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap failed",
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap failed",
|
||||
"oop must point to a heap address",
|
||||
file, line);
|
||||
}
|
||||
@@ -179,8 +179,8 @@ void ShenandoahAsserts::assert_in_heap(void* interior_loc, oop obj, const char *
|
||||
void ShenandoahAsserts::assert_in_heap_or_null(void* interior_loc, oop obj, const char *file, int line) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
if (obj != NULL && !heap->is_in(obj)) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_heap_or_null failed",
|
||||
if (obj != nullptr && !heap->is_in(obj)) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_heap_or_null failed",
|
||||
"oop must point to a heap address",
|
||||
file, line);
|
||||
}
|
||||
@@ -192,20 +192,20 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char*
|
||||
// Step 1. Check that obj is correct.
|
||||
// After this step, it is safe to call heap_region_containing().
|
||||
if (!heap->is_in(obj)) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"oop must point to a heap address",
|
||||
file, line);
|
||||
}
|
||||
|
||||
Klass* obj_klass = obj->klass_or_null();
|
||||
if (obj_klass == NULL) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
"Object klass pointer should not be NULL",
|
||||
if (obj_klass == nullptr) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Object klass pointer should not be null",
|
||||
file,line);
|
||||
}
|
||||
|
||||
if (!Metaspace::contains(obj_klass)) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Object klass pointer must go to metaspace",
|
||||
file,line);
|
||||
}
|
||||
@@ -217,27 +217,27 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char*
|
||||
// tries fwdptr manipulation when Full GC is running. The only exception is using the fwdptr
|
||||
// that still points to the object itself.
|
||||
if (heap->is_full_gc_move_in_progress()) {
|
||||
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Non-trivial forwarding pointer during Full GC moves, probable bug.",
|
||||
file, line);
|
||||
}
|
||||
|
||||
// Step 2. Check that forwardee is correct
|
||||
if (!heap->is_in(fwd)) {
|
||||
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Forwardee must point to a heap address",
|
||||
file, line);
|
||||
}
|
||||
|
||||
if (obj_klass != fwd->klass()) {
|
||||
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Forwardee klass disagrees with object class",
|
||||
file, line);
|
||||
}
|
||||
|
||||
// Step 3. Check that forwardee points to correct region
|
||||
if (heap->heap_region_index_containing(fwd) == heap->heap_region_index_containing(obj)) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Non-trivial forwardee should in another region",
|
||||
file, line);
|
||||
}
|
||||
@@ -245,7 +245,7 @@ void ShenandoahAsserts::assert_correct(void* interior_loc, oop obj, const char*
|
||||
// Step 4. Check for multiple forwardings
|
||||
oop fwd2 = ShenandoahForwarding::get_forwardee_raw_unchecked(fwd);
|
||||
if (fwd != fwd2) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_correct failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_correct failed",
|
||||
"Multiple forwardings",
|
||||
file, line);
|
||||
}
|
||||
@@ -258,7 +258,7 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahHeapRegion* r = heap->heap_region_containing(obj);
|
||||
if (!r->is_active()) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed",
|
||||
"Object must reside in active region",
|
||||
file, line);
|
||||
}
|
||||
@@ -270,12 +270,12 @@ void ShenandoahAsserts::assert_in_correct_region(void* interior_loc, oop obj, co
|
||||
for (size_t i = idx; i < idx + num_regions; i++) {
|
||||
ShenandoahHeapRegion* chain_reg = heap->get_region(i);
|
||||
if (i == idx && !chain_reg->is_humongous_start()) {
|
||||
print_failure(_safe_unknown, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
|
||||
print_failure(_safe_unknown, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed",
|
||||
"Object must reside in humongous start",
|
||||
file, line);
|
||||
}
|
||||
if (i != idx && !chain_reg->is_humongous_continuation()) {
|
||||
print_failure(_safe_oop, obj, interior_loc, NULL, "Shenandoah assert_in_correct_region failed",
|
||||
print_failure(_safe_oop, obj, interior_loc, nullptr, "Shenandoah assert_in_correct_region failed",
|
||||
"Humongous continuation should be of proper size",
|
||||
file, line);
|
||||
}
|
||||
@@ -288,7 +288,7 @@ void ShenandoahAsserts::assert_forwarded(void* interior_loc, oop obj, const char
|
||||
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
|
||||
|
||||
if (obj == fwd) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_forwarded failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_forwarded failed",
|
||||
"Object should be forwarded",
|
||||
file, line);
|
||||
}
|
||||
@@ -299,7 +299,7 @@ void ShenandoahAsserts::assert_not_forwarded(void* interior_loc, oop obj, const
|
||||
oop fwd = ShenandoahForwarding::get_forwardee_raw_unchecked(obj);
|
||||
|
||||
if (obj != fwd) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_forwarded failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_not_forwarded failed",
|
||||
"Object should not be forwarded",
|
||||
file, line);
|
||||
}
|
||||
@@ -310,7 +310,7 @@ void ShenandoahAsserts::assert_marked(void *interior_loc, oop obj, const char *f
|
||||
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->marking_context()->is_marked(obj)) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_marked failed",
|
||||
"Object should be marked",
|
||||
file, line);
|
||||
}
|
||||
@@ -321,7 +321,7 @@ void ShenandoahAsserts::assert_marked_weak(void *interior_loc, oop obj, const ch
|
||||
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->marking_context()->is_marked_weak(obj)) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked_weak failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_marked_weak failed",
|
||||
"Object should be marked weakly",
|
||||
file, line);
|
||||
}
|
||||
@@ -332,7 +332,7 @@ void ShenandoahAsserts::assert_marked_strong(void *interior_loc, oop obj, const
|
||||
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->marking_context()->is_marked_strong(obj)) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_marked_strong failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_marked_strong failed",
|
||||
"Object should be marked strongly",
|
||||
file, line);
|
||||
}
|
||||
@@ -343,7 +343,7 @@ void ShenandoahAsserts::assert_in_cset(void* interior_loc, oop obj, const char*
|
||||
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->in_collection_set(obj)) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_in_cset failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_in_cset failed",
|
||||
"Object should be in collection set",
|
||||
file, line);
|
||||
}
|
||||
@@ -354,7 +354,7 @@ void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const ch
|
||||
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (heap->in_collection_set(obj)) {
|
||||
print_failure(_safe_all, obj, interior_loc, NULL, "Shenandoah assert_not_in_cset failed",
|
||||
print_failure(_safe_all, obj, interior_loc, nullptr, "Shenandoah assert_not_in_cset failed",
|
||||
"Object should not be in collection set",
|
||||
file, line);
|
||||
}
|
||||
@@ -363,7 +363,7 @@ void ShenandoahAsserts::assert_not_in_cset(void* interior_loc, oop obj, const ch
|
||||
void ShenandoahAsserts::assert_not_in_cset_loc(void* interior_loc, const char* file, int line) {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (heap->in_collection_set_loc(interior_loc)) {
|
||||
print_failure(_safe_unknown, NULL, interior_loc, NULL, "Shenandoah assert_not_in_cset_loc failed",
|
||||
print_failure(_safe_unknown, nullptr, interior_loc, nullptr, "Shenandoah assert_not_in_cset_loc failed",
|
||||
"Interior location should not be in collection set",
|
||||
file, line);
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
|
||||
BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
|
||||
make_barrier_set_c1<ShenandoahBarrierSetC1>(),
|
||||
make_barrier_set_c2<ShenandoahBarrierSetC2>(),
|
||||
ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : NULL,
|
||||
ShenandoahNMethodBarrier ? new ShenandoahBarrierSetNMethod(heap) : nullptr,
|
||||
new ShenandoahBarrierSetStackChunk(),
|
||||
BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
|
||||
_heap(heap),
|
||||
@@ -103,7 +103,7 @@ void ShenandoahBarrierSet::on_thread_attach(Thread *thread) {
|
||||
ShenandoahThreadLocalData::initialize_gclab(thread);
|
||||
|
||||
BarrierSetNMethod* bs_nm = barrier_set_nmethod();
|
||||
if (bs_nm != NULL) {
|
||||
if (bs_nm != nullptr) {
|
||||
thread->set_nmethod_disarmed_guard_value(bs_nm->disarmed_guard_value());
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ void ShenandoahBarrierSet::on_thread_detach(Thread *thread) {
|
||||
_satb_mark_queue_set.flush_queue(queue);
|
||||
if (thread->is_Java_thread()) {
|
||||
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
|
||||
if (gclab != NULL) {
|
||||
if (gclab != nullptr) {
|
||||
gclab->retire();
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ inline oop ShenandoahBarrierSet::resolve_forwarded_not_null(oop p) {
|
||||
}
|
||||
|
||||
inline oop ShenandoahBarrierSet::resolve_forwarded(oop p) {
|
||||
if (p != NULL) {
|
||||
if (p != nullptr) {
|
||||
return resolve_forwarded_not_null(p);
|
||||
} else {
|
||||
return p;
|
||||
@@ -68,7 +68,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, T* load
|
||||
fwd = _heap->evacuate_object(obj, t);
|
||||
}
|
||||
|
||||
if (load_addr != NULL && fwd != obj) {
|
||||
if (load_addr != nullptr && fwd != obj) {
|
||||
// Since we are here and we know the load address, update the reference.
|
||||
ShenandoahHeap::atomic_update_oop(fwd, load_addr, obj);
|
||||
}
|
||||
@@ -81,8 +81,8 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
|
||||
return obj;
|
||||
}
|
||||
if (_heap->has_forwarded_objects() &&
|
||||
_heap->in_collection_set(obj)) { // Subsumes NULL-check
|
||||
assert(obj != NULL, "cset check must have subsumed NULL-check");
|
||||
_heap->in_collection_set(obj)) { // Subsumes null-check
|
||||
assert(obj != nullptr, "cset check must have subsumed null-check");
|
||||
oop fwd = resolve_forwarded_not_null(obj);
|
||||
if (obj == fwd && _heap->is_evacuation_in_progress()) {
|
||||
Thread* t = Thread::current();
|
||||
@@ -96,22 +96,22 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
|
||||
|
||||
template <class T>
|
||||
inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators, oop obj, T* load_addr) {
|
||||
if (obj == NULL) {
|
||||
return NULL;
|
||||
if (obj == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Prevent resurrection of unreachable phantom (i.e. weak-native) references.
|
||||
if ((decorators & ON_PHANTOM_OOP_REF) != 0 &&
|
||||
_heap->is_concurrent_weak_root_in_progress() &&
|
||||
!_heap->marking_context()->is_marked(obj)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Prevent resurrection of unreachable weak references.
|
||||
if ((decorators & ON_WEAK_OOP_REF) != 0 &&
|
||||
_heap->is_concurrent_weak_root_in_progress() &&
|
||||
!_heap->marking_context()->is_marked_strong(obj)) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Prevent resurrection of unreachable objects that are visited during
|
||||
@@ -123,7 +123,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators,
|
||||
}
|
||||
|
||||
oop fwd = load_reference_barrier(obj);
|
||||
if (ShenandoahSelfFixing && load_addr != NULL && fwd != obj) {
|
||||
if (ShenandoahSelfFixing && load_addr != nullptr && fwd != obj) {
|
||||
// Since we are here and we know the load address, update the reference.
|
||||
ShenandoahHeap::atomic_update_oop(fwd, load_addr, obj);
|
||||
}
|
||||
@@ -132,7 +132,7 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(DecoratorSet decorators,
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::enqueue(oop obj) {
|
||||
assert(obj != NULL, "checked by caller");
|
||||
assert(obj != nullptr, "checked by caller");
|
||||
assert(_satb_mark_queue_set.is_active(), "only get here when SATB active");
|
||||
|
||||
// Filter marked objects before hitting the SATB queues. The same predicate would
|
||||
@@ -159,13 +159,13 @@ inline void ShenandoahBarrierSet::satb_barrier(T *field) {
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::satb_enqueue(oop value) {
|
||||
if (value != NULL && ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
|
||||
if (value != nullptr && ShenandoahSATBBarrier && _heap->is_concurrent_mark_in_progress()) {
|
||||
enqueue(value);
|
||||
}
|
||||
}
|
||||
|
||||
inline void ShenandoahBarrierSet::iu_barrier(oop obj) {
|
||||
if (ShenandoahIUBarrier && obj != NULL && _heap->is_concurrent_mark_in_progress()) {
|
||||
if (ShenandoahIUBarrier && obj != nullptr && _heap->is_concurrent_mark_in_progress()) {
|
||||
enqueue(obj);
|
||||
}
|
||||
}
|
||||
@@ -242,8 +242,8 @@ inline oop ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_loa
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_common(T* addr, oop value) {
|
||||
shenandoah_assert_marked_if(NULL, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
|
||||
shenandoah_assert_not_in_cset_if(addr, value, value != NULL && !ShenandoahHeap::heap()->cancelled_gc());
|
||||
shenandoah_assert_marked_if(nullptr, value, !CompressedOops::is_null(value) && ShenandoahHeap::heap()->is_evacuation_in_progress());
|
||||
shenandoah_assert_not_in_cset_if(addr, value, value != nullptr && !ShenandoahHeap::heap()->cancelled_gc());
|
||||
ShenandoahBarrierSet* const bs = ShenandoahBarrierSet::barrier_set();
|
||||
bs->iu_barrier(value);
|
||||
bs->satb_barrier<decorators>(addr);
|
||||
@@ -260,7 +260,7 @@ template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline void ShenandoahBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_store_in_heap(T* addr, oop value) {
|
||||
shenandoah_assert_not_in_cset_loc_except(addr, ShenandoahHeap::heap()->cancelled_gc());
|
||||
shenandoah_assert_not_forwarded_except (addr, value, value == NULL || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
|
||||
shenandoah_assert_not_forwarded_except (addr, value, value == nullptr || ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
|
||||
|
||||
oop_store_common(addr, value);
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ void ShenandoahBarrierSet::clone_update(oop obj) {
|
||||
|
||||
void ShenandoahBarrierSet::clone_barrier(oop obj) {
|
||||
assert(ShenandoahCloneBarrier, "only get here with clone barriers enabled");
|
||||
shenandoah_assert_correct(NULL, obj);
|
||||
shenandoah_assert_correct(nullptr, obj);
|
||||
|
||||
int gc_state = _heap->gc_state();
|
||||
if ((gc_state & ShenandoahHeap::MARKING) != 0) {
|
||||
|
||||
@@ -37,7 +37,7 @@
|
||||
|
||||
bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||
ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
|
||||
assert(lock != NULL, "Must be");
|
||||
assert(lock != nullptr, "Must be");
|
||||
ShenandoahReentrantLocker locker(lock);
|
||||
|
||||
if (!is_armed(nm)) {
|
||||
|
||||
@@ -47,7 +47,7 @@ bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
|
||||
return false;
|
||||
}
|
||||
obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
|
||||
shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
|
||||
shenandoah_assert_not_forwarded_if(nullptr, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
|
||||
return _mark_context->is_marked(obj);
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
|
||||
if (CompressedOops::is_null(obj)) {
|
||||
return false;
|
||||
}
|
||||
shenandoah_assert_not_forwarded(NULL, obj);
|
||||
shenandoah_assert_not_forwarded(nullptr, obj);
|
||||
return _mark_context->is_marked(obj);
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ void ShenandoahUpdateRefsClosure::do_oop(narrowOop* p) { do_oop_work(p); }
|
||||
|
||||
template <bool concurrent, bool stable_thread>
|
||||
ShenandoahEvacuateUpdateRootClosureBase<concurrent, stable_thread>::ShenandoahEvacuateUpdateRootClosureBase() :
|
||||
_heap(ShenandoahHeap::heap()), _thread(stable_thread ? Thread::current() : NULL) {
|
||||
_heap(ShenandoahHeap::heap()), _thread(stable_thread ? Thread::current() : nullptr) {
|
||||
}
|
||||
|
||||
template <bool concurrent, bool stable_thread>
|
||||
@@ -199,7 +199,7 @@ ShenandoahCodeBlobAndDisarmClosure::ShenandoahCodeBlobAndDisarmClosure(OopClosur
|
||||
|
||||
void ShenandoahCodeBlobAndDisarmClosure::do_code_blob(CodeBlob* cb) {
|
||||
nmethod* const nm = cb->as_nmethod_or_null();
|
||||
if (nm != NULL) {
|
||||
if (nm != nullptr) {
|
||||
assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here");
|
||||
CodeBlobToOopClosure::do_code_blob(cb);
|
||||
_bs->disarm(nm);
|
||||
|
||||
@@ -83,7 +83,7 @@ void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
|
||||
int count = 0;
|
||||
bool process_block = true;
|
||||
|
||||
for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != NULL; cb = CodeCache::next_blob(_heap, cb)) {
|
||||
for (CodeBlob *cb = CodeCache::first_blob(_heap); cb != nullptr; cb = CodeCache::next_blob(_heap, cb)) {
|
||||
int current = count++;
|
||||
if ((current & stride_mask) == 0) {
|
||||
process_block = (current >= _claimed_idx) &&
|
||||
@@ -119,7 +119,7 @@ void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
|
||||
}
|
||||
|
||||
void ShenandoahCodeRoots::arm_nmethods() {
|
||||
assert(BarrierSet::barrier_set()->barrier_set_nmethod() != NULL, "Sanity");
|
||||
assert(BarrierSet::barrier_set()->barrier_set_nmethod() != nullptr, "Sanity");
|
||||
BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods();
|
||||
}
|
||||
|
||||
@@ -285,7 +285,7 @@ void ShenandoahCodeRoots::purge() {
|
||||
|
||||
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
|
||||
_par_iterator(CodeCache::heaps()),
|
||||
_table_snapshot(NULL) {
|
||||
_table_snapshot(nullptr) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
|
||||
MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
_table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
|
||||
@@ -294,12 +294,12 @@ ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
|
||||
ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
|
||||
MonitorLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
|
||||
_table_snapshot = NULL;
|
||||
_table_snapshot = nullptr;
|
||||
locker.notify_all();
|
||||
}
|
||||
|
||||
void ShenandoahCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure *f) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
|
||||
assert(_table_snapshot != NULL, "Sanity");
|
||||
assert(_table_snapshot != nullptr, "Sanity");
|
||||
_table_snapshot->parallel_blobs_do(f);
|
||||
}
|
||||
|
||||
@@ -46,8 +46,8 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS
|
||||
_current_index(0) {
|
||||
|
||||
// The collection set map is reserved to cover the entire heap *and* zero addresses.
|
||||
// This is needed to accept in-cset checks for both heap oops and NULLs, freeing
|
||||
// high-performance code from checking for NULL first.
|
||||
// This is needed to accept in-cset checks for both heap oops and nulls, freeing
|
||||
// high-performance code from checking for null first.
|
||||
//
|
||||
// Since heap_base can be far away, committing the entire map would waste memory.
|
||||
// Therefore, we only commit the parts that are needed to operate: the heap view,
|
||||
@@ -131,7 +131,7 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::claim_next() {
|
||||
}
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
|
||||
@@ -146,7 +146,7 @@ ShenandoahHeapRegion* ShenandoahCollectionSet::next() {
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void ShenandoahCollectionSet::print_on(outputStream* out) const {
|
||||
|
||||
@@ -41,12 +41,12 @@ bool ShenandoahCollectionSet::is_in(ShenandoahHeapRegion* r) const {
|
||||
}
|
||||
|
||||
bool ShenandoahCollectionSet::is_in(oop p) const {
|
||||
shenandoah_assert_in_heap_or_null(NULL, p);
|
||||
shenandoah_assert_in_heap_or_null(nullptr, p);
|
||||
return is_in_loc(cast_from_oop<void*>(p));
|
||||
}
|
||||
|
||||
bool ShenandoahCollectionSet::is_in_loc(void* p) const {
|
||||
assert(p == NULL || _heap->is_in(p), "Must be in the heap");
|
||||
assert(p == nullptr || _heap->is_in(p), "Must be in the heap");
|
||||
uintx index = ((uintx) p) >> _region_size_bytes_shift;
|
||||
// no need to subtract the bottom of the heap from p,
|
||||
// _biased_cset_map is biased
|
||||
|
||||
@@ -734,7 +734,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// This task not only evacuates/updates marked weak roots, but also "NULL"
|
||||
// This task not only evacuates/updates marked weak roots, but also "null"
|
||||
// dead weak roots.
|
||||
class ShenandoahConcurrentWeakRootsEvacUpdateTask : public WorkerTask {
|
||||
private:
|
||||
@@ -782,7 +782,7 @@ public:
|
||||
// cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
|
||||
// can cleanup immediate garbage sooner.
|
||||
if (ShenandoahHeap::heap()->unload_classes()) {
|
||||
// Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
|
||||
// Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either null the
|
||||
// CLD's holder or evacuate it.
|
||||
{
|
||||
ShenandoahIsCLDAliveClosure is_cld_alive;
|
||||
@@ -953,7 +953,7 @@ void ShenandoahUpdateThreadClosure::do_thread(Thread* thread) {
|
||||
if (thread->is_Java_thread()) {
|
||||
JavaThread* jt = JavaThread::cast(thread);
|
||||
ResourceMark rm;
|
||||
jt->oops_do(&_cl, NULL);
|
||||
jt->oops_do(&_cl, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ public:
|
||||
ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
|
||||
ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
|
||||
ShenandoahReferenceProcessor* rp = heap->ref_processor();
|
||||
assert(rp != NULL, "need reference processor");
|
||||
assert(rp != nullptr, "need reference processor");
|
||||
StringDedup::Requests requests;
|
||||
_cm->mark_loop(worker_id, _terminator, rp,
|
||||
true /*cancellable*/,
|
||||
@@ -84,9 +84,9 @@ public:
|
||||
// Transfer any partial buffer to the qset for completed buffer processing.
|
||||
_satb_qset.flush_queue(ShenandoahThreadLocalData::satb_mark_queue(thread));
|
||||
if (thread->is_Java_thread()) {
|
||||
if (_cl != NULL) {
|
||||
if (_cl != nullptr) {
|
||||
ResourceMark rm;
|
||||
thread->oops_do(_cl, NULL);
|
||||
thread->oops_do(_cl, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -121,7 +121,7 @@ public:
|
||||
|
||||
ShenandoahMarkRefsClosure mark_cl(q, rp);
|
||||
ShenandoahSATBAndRemarkThreadsClosure tc(satb_mq_set,
|
||||
ShenandoahIUBarrier ? &mark_cl : NULL);
|
||||
ShenandoahIUBarrier ? &mark_cl : nullptr);
|
||||
Threads::possibly_parallel_threads_do(true /* is_par */, &tc);
|
||||
}
|
||||
_cm->mark_loop(worker_id, _terminator, rp,
|
||||
|
||||
@@ -150,7 +150,7 @@ void ShenandoahDegenGC::op_degenerated() {
|
||||
heap->collection_set()->clear_current_index();
|
||||
|
||||
ShenandoahHeapRegion* r;
|
||||
while ((r = heap->collection_set()->next()) != NULL) {
|
||||
while ((r = heap->collection_set()->next()) != nullptr) {
|
||||
if (r->is_pinned()) {
|
||||
heap->cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
|
||||
op_degenerated_fail();
|
||||
|
||||
@@ -32,19 +32,19 @@
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
inline oop ShenandoahForwarding::get_forwardee_raw(oop obj) {
|
||||
shenandoah_assert_in_heap(NULL, obj);
|
||||
shenandoah_assert_in_heap(nullptr, obj);
|
||||
return get_forwardee_raw_unchecked(obj);
|
||||
}
|
||||
|
||||
inline oop ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
|
||||
// JVMTI and JFR code use mark words for marking objects for their needs.
|
||||
// On this path, we can encounter the "marked" object, but with NULL
|
||||
// On this path, we can encounter the "marked" object, but with null
|
||||
// fwdptr. That object is still not forwarded, and we need to return
|
||||
// the object itself.
|
||||
markWord mark = obj->mark();
|
||||
if (mark.is_marked()) {
|
||||
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
|
||||
if (fwdptr != NULL) {
|
||||
if (fwdptr != nullptr) {
|
||||
return cast_to_oop(fwdptr);
|
||||
}
|
||||
}
|
||||
@@ -52,14 +52,14 @@ inline oop ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
|
||||
}
|
||||
|
||||
inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) {
|
||||
// Same as above, but mutator thread cannot ever see NULL forwardee.
|
||||
shenandoah_assert_correct(NULL, obj);
|
||||
// Same as above, but mutator thread cannot ever see null forwardee.
|
||||
shenandoah_assert_correct(nullptr, obj);
|
||||
assert(Thread::current()->is_Java_thread(), "Must be a mutator thread");
|
||||
|
||||
markWord mark = obj->mark();
|
||||
if (mark.is_marked()) {
|
||||
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
|
||||
assert(fwdptr != NULL, "Forwarding pointer is never null here");
|
||||
assert(fwdptr != nullptr, "Forwarding pointer is never null here");
|
||||
return cast_to_oop(fwdptr);
|
||||
} else {
|
||||
return obj;
|
||||
@@ -67,7 +67,7 @@ inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) {
|
||||
}
|
||||
|
||||
inline oop ShenandoahForwarding::get_forwardee(oop obj) {
|
||||
shenandoah_assert_correct(NULL, obj);
|
||||
shenandoah_assert_correct(nullptr, obj);
|
||||
return get_forwardee_raw_unchecked(obj);
|
||||
}
|
||||
|
||||
|
||||
@@ -82,7 +82,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
|
||||
for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
|
||||
if (is_mutator_free(idx)) {
|
||||
HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -100,7 +100,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
|
||||
size_t idx = c - 1;
|
||||
if (is_collector_free(idx)) {
|
||||
HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -108,7 +108,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
|
||||
|
||||
// No dice. Can we borrow space from mutator view?
|
||||
if (!ShenandoahEvacReserveOverflow) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Try to steal the empty region from the mutator view
|
||||
@@ -119,7 +119,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
|
||||
if (can_allocate_from(r)) {
|
||||
flip_to_gc(r);
|
||||
HeapWord *result = try_allocate_in(r, req, in_new_region);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -136,7 +136,7 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
|
||||
@@ -144,14 +144,14 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
|
||||
if (_heap->is_concurrent_weak_root_in_progress() &&
|
||||
r->is_trash()) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
try_recycle_trashed(r);
|
||||
|
||||
in_new_region = r->is_empty();
|
||||
|
||||
HeapWord* result = NULL;
|
||||
HeapWord* result = nullptr;
|
||||
size_t size = req.size();
|
||||
|
||||
if (ShenandoahElasticTLAB && req.is_lab_alloc()) {
|
||||
@@ -161,13 +161,13 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
}
|
||||
if (size >= req.min_size()) {
|
||||
result = r->allocate(size, req.type());
|
||||
assert (result != NULL, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
|
||||
assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);
|
||||
}
|
||||
} else {
|
||||
result = r->allocate(size, req.type());
|
||||
}
|
||||
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
// Allocation successful, bump stats:
|
||||
if (req.is_mutator_alloc()) {
|
||||
increase_used(size * HeapWordSize);
|
||||
@@ -181,7 +181,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
|
||||
}
|
||||
}
|
||||
|
||||
if (result == NULL || has_no_alloc_capacity(r)) {
|
||||
if (result == nullptr || has_no_alloc_capacity(r)) {
|
||||
// Region cannot afford this or future allocations. Retire it.
|
||||
//
|
||||
// While this seems a bit harsh, especially in the case when this large allocation does not
|
||||
@@ -250,7 +250,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
|
||||
|
||||
// No regions left to satisfy allocation, bye.
|
||||
if (num > mutator_count()) {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Find the continuous interval of $num regions, starting from $beg and ending in $end,
|
||||
@@ -262,7 +262,7 @@ HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
|
||||
while (true) {
|
||||
if (end >= _max) {
|
||||
// Hit the end, goodbye
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
|
||||
@@ -549,10 +549,10 @@ HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_
|
||||
in_new_region = false;
|
||||
assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
|
||||
req.size(), ShenandoahHeapRegion::humongous_threshold_words());
|
||||
return NULL;
|
||||
return nullptr;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
return allocate_single(req, in_new_region);
|
||||
|
||||
@@ -314,7 +314,7 @@ public:
|
||||
_empty_regions(empty_regions),
|
||||
_empty_regions_pos(0),
|
||||
_to_region(to_region),
|
||||
_from_region(NULL),
|
||||
_from_region(nullptr),
|
||||
_compact_point(to_region->bottom()) {}
|
||||
|
||||
void set_from_region(ShenandoahHeapRegion* from_region) {
|
||||
@@ -322,7 +322,7 @@ public:
|
||||
}
|
||||
|
||||
void finish_region() {
|
||||
assert(_to_region != NULL, "should not happen");
|
||||
assert(_to_region != nullptr, "should not happen");
|
||||
_to_region->set_new_top(_compact_point);
|
||||
}
|
||||
|
||||
@@ -335,7 +335,7 @@ public:
|
||||
}
|
||||
|
||||
void do_object(oop p) {
|
||||
assert(_from_region != NULL, "must set before work");
|
||||
assert(_from_region != nullptr, "must set before work");
|
||||
assert(_heap->complete_marking_context()->is_marked(p), "must be marked");
|
||||
assert(!_heap->complete_marking_context()->allocated_after_mark_start(p), "must be truly marked");
|
||||
|
||||
@@ -354,14 +354,14 @@ public:
|
||||
}
|
||||
|
||||
assert(new_to_region != _to_region, "must not reuse same to-region");
|
||||
assert(new_to_region != NULL, "must not be NULL");
|
||||
assert(new_to_region != nullptr, "must not be null");
|
||||
_to_region = new_to_region;
|
||||
_compact_point = _to_region->bottom();
|
||||
}
|
||||
|
||||
// Object fits into current region, record new location:
|
||||
assert(_compact_point + obj_size <= _to_region->end(), "must fit");
|
||||
shenandoah_assert_not_forwarded(NULL, p);
|
||||
shenandoah_assert_not_forwarded(nullptr, p);
|
||||
_preserved_marks->push_if_necessary(p, p->mark());
|
||||
p->forward_to(cast_to_oop(_compact_point));
|
||||
_compact_point += obj_size;
|
||||
@@ -399,7 +399,7 @@ public:
|
||||
ShenandoahHeapRegionSetIterator it(slice);
|
||||
ShenandoahHeapRegion* from_region = it.next();
|
||||
// No work?
|
||||
if (from_region == NULL) {
|
||||
if (from_region == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -411,7 +411,7 @@ public:
|
||||
|
||||
ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region);
|
||||
|
||||
while (from_region != NULL) {
|
||||
while (from_region != nullptr) {
|
||||
assert(is_candidate_region(from_region), "Sanity");
|
||||
|
||||
cl.set_from_region(from_region);
|
||||
@@ -665,7 +665,7 @@ void ShenandoahFullGC::distribute_slices(ShenandoahHeapRegionSet** worker_slices
|
||||
for (size_t wid = 0; wid < n_workers; wid++) {
|
||||
ShenandoahHeapRegionSetIterator it(worker_slices[wid]);
|
||||
ShenandoahHeapRegion* r = it.next();
|
||||
while (r != NULL) {
|
||||
while (r != nullptr) {
|
||||
size_t idx = r->index();
|
||||
assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx);
|
||||
assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx);
|
||||
@@ -779,7 +779,7 @@ public:
|
||||
ShenandoahParallelWorkerSession worker_session(worker_id);
|
||||
ShenandoahAdjustPointersObjectClosure obj_cl;
|
||||
ShenandoahHeapRegion* r = _regions.next();
|
||||
while (r != NULL) {
|
||||
while (r != nullptr) {
|
||||
if (!r->is_humongous_continuation() && r->has_live()) {
|
||||
_heap->marked_object_iterate(r, &obj_cl);
|
||||
}
|
||||
@@ -872,7 +872,7 @@ public:
|
||||
|
||||
ShenandoahCompactObjectsClosure cl(worker_id);
|
||||
ShenandoahHeapRegion* r = slice.next();
|
||||
while (r != NULL) {
|
||||
while (r != nullptr) {
|
||||
assert(!r->is_humongous(), "must not get humongous regions here");
|
||||
if (r->has_live()) {
|
||||
_heap->marked_object_iterate(r, &cl);
|
||||
@@ -1017,7 +1017,7 @@ public:
|
||||
ShenandoahHeapRegion* region = _regions.next();
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahMarkingContext* const ctx = heap->complete_marking_context();
|
||||
while (region != NULL) {
|
||||
while (region != nullptr) {
|
||||
if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) {
|
||||
ctx->clear_bitmap(region);
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ public:
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
ShenandoahHeapRegion* r = _regions.next();
|
||||
while (r != NULL) {
|
||||
while (r != nullptr) {
|
||||
if (r->is_committed()) {
|
||||
os::pretouch_memory(r->bottom(), r->end(), _page_size);
|
||||
}
|
||||
@@ -126,7 +126,7 @@ public:
|
||||
|
||||
virtual void work(uint worker_id) {
|
||||
ShenandoahHeapRegion* r = _regions.next();
|
||||
while (r != NULL) {
|
||||
while (r != nullptr) {
|
||||
size_t start = r->index() * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
|
||||
size_t end = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
|
||||
assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
|
||||
@@ -311,7 +311,7 @@ jint ShenandoahHeap::initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
if (_collection_set == NULL) {
|
||||
if (_collection_set == nullptr) {
|
||||
ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
|
||||
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
|
||||
}
|
||||
@@ -397,7 +397,7 @@ jint ShenandoahHeap::initialize() {
|
||||
_pacer = new ShenandoahPacer(this);
|
||||
_pacer->setup_for_idle();
|
||||
} else {
|
||||
_pacer = NULL;
|
||||
_pacer = nullptr;
|
||||
}
|
||||
|
||||
_control_thread = new ShenandoahControlThread();
|
||||
@@ -408,7 +408,7 @@ jint ShenandoahHeap::initialize() {
|
||||
}
|
||||
|
||||
void ShenandoahHeap::initialize_mode() {
|
||||
if (ShenandoahGCMode != NULL) {
|
||||
if (ShenandoahGCMode != nullptr) {
|
||||
if (strcmp(ShenandoahGCMode, "satb") == 0) {
|
||||
_gc_mode = new ShenandoahSATBMode();
|
||||
} else if (strcmp(ShenandoahGCMode, "iu") == 0) {
|
||||
@@ -435,7 +435,7 @@ void ShenandoahHeap::initialize_mode() {
|
||||
}
|
||||
|
||||
void ShenandoahHeap::initialize_heuristics() {
|
||||
assert(_gc_mode != NULL, "Must be initialized");
|
||||
assert(_gc_mode != nullptr, "Must be initialized");
|
||||
_heuristics = _gc_mode->initialize_heuristics();
|
||||
|
||||
if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
|
||||
@@ -462,36 +462,36 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
|
||||
_committed(0),
|
||||
_bytes_allocated_since_gc_start(0),
|
||||
_max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
|
||||
_workers(NULL),
|
||||
_safepoint_workers(NULL),
|
||||
_workers(nullptr),
|
||||
_safepoint_workers(nullptr),
|
||||
_heap_region_special(false),
|
||||
_num_regions(0),
|
||||
_regions(NULL),
|
||||
_regions(nullptr),
|
||||
_update_refs_iterator(this),
|
||||
_control_thread(NULL),
|
||||
_control_thread(nullptr),
|
||||
_shenandoah_policy(policy),
|
||||
_gc_mode(NULL),
|
||||
_heuristics(NULL),
|
||||
_free_set(NULL),
|
||||
_pacer(NULL),
|
||||
_verifier(NULL),
|
||||
_phase_timings(NULL),
|
||||
_monitoring_support(NULL),
|
||||
_memory_pool(NULL),
|
||||
_gc_mode(nullptr),
|
||||
_heuristics(nullptr),
|
||||
_free_set(nullptr),
|
||||
_pacer(nullptr),
|
||||
_verifier(nullptr),
|
||||
_phase_timings(nullptr),
|
||||
_monitoring_support(nullptr),
|
||||
_memory_pool(nullptr),
|
||||
_stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
|
||||
_cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
|
||||
_gc_timer(new ConcurrentGCTimer()),
|
||||
_soft_ref_policy(),
|
||||
_log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
|
||||
_ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
|
||||
_marking_context(NULL),
|
||||
_marking_context(nullptr),
|
||||
_bitmap_size(0),
|
||||
_bitmap_regions_per_slice(0),
|
||||
_bitmap_bytes_per_slice(0),
|
||||
_bitmap_region_special(false),
|
||||
_aux_bitmap_region_special(false),
|
||||
_liveness_cache(NULL),
|
||||
_collection_set(NULL)
|
||||
_liveness_cache(nullptr),
|
||||
_collection_set(nullptr)
|
||||
{
|
||||
// Initialize GC mode early, so we can adjust barrier support
|
||||
initialize_mode();
|
||||
@@ -499,7 +499,7 @@ ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
|
||||
|
||||
_max_workers = MAX2(_max_workers, 1U);
|
||||
_workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
|
||||
if (_workers == NULL) {
|
||||
if (_workers == nullptr) {
|
||||
vm_exit_during_initialization("Failed necessary allocation.");
|
||||
} else {
|
||||
_workers->initialize_workers();
|
||||
@@ -528,7 +528,7 @@ public:
|
||||
ShenandoahHeapRegion* region = _regions.next();
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
ShenandoahMarkingContext* const ctx = heap->marking_context();
|
||||
while (region != NULL) {
|
||||
while (region != nullptr) {
|
||||
if (heap->is_bitmap_slice_committed(region)) {
|
||||
ctx->clear_bitmap(region);
|
||||
}
|
||||
@@ -583,11 +583,11 @@ void ShenandoahHeap::print_on(outputStream* st) const {
|
||||
|
||||
ShenandoahCollectionSet* cset = collection_set();
|
||||
st->print_cr("Collection set:");
|
||||
if (cset != NULL) {
|
||||
if (cset != nullptr) {
|
||||
st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
|
||||
st->print_cr(" - map (biased): " PTR_FORMAT, p2i(cset->biased_map_address()));
|
||||
} else {
|
||||
st->print_cr(" (NULL)");
|
||||
st->print_cr(" (null)");
|
||||
}
|
||||
|
||||
st->cr();
|
||||
@@ -601,7 +601,7 @@ void ShenandoahHeap::print_on(outputStream* st) const {
|
||||
class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
|
||||
public:
|
||||
void do_thread(Thread* thread) {
|
||||
assert(thread != NULL, "Sanity");
|
||||
assert(thread != nullptr, "Sanity");
|
||||
assert(thread->is_Worker_thread(), "Only worker thread expected");
|
||||
ShenandoahThreadLocalData::initialize_gclab(thread);
|
||||
}
|
||||
@@ -617,7 +617,7 @@ void ShenandoahHeap::post_initialize() {
|
||||
// gclab can not be initialized early during VM startup, as it can not determinate its max_size.
|
||||
// Now, we will let WorkerThreads to initialize gclab when new worker is created.
|
||||
_workers->set_initialize_gclab();
|
||||
if (_safepoint_workers != NULL) {
|
||||
if (_safepoint_workers != nullptr) {
|
||||
_safepoint_workers->threads_do(&init_gclabs);
|
||||
_safepoint_workers->set_initialize_gclab();
|
||||
}
|
||||
@@ -760,7 +760,7 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size)
|
||||
if (new_size < size) {
|
||||
// New size still does not fit the object. Fall back to shared allocation.
|
||||
// This avoids retiring perfectly good GCLABs, when we encounter a large object.
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Retire current GCLAB, and allocate a new one.
|
||||
@@ -769,8 +769,8 @@ HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size)
|
||||
|
||||
size_t actual_size = 0;
|
||||
HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
|
||||
if (gclab_buf == NULL) {
|
||||
return NULL;
|
||||
if (gclab_buf == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
assert (size <= actual_size, "allocation should fit");
|
||||
@@ -797,7 +797,7 @@ HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
|
||||
size_t* actual_size) {
|
||||
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
|
||||
HeapWord* res = allocate_memory(req);
|
||||
if (res != NULL) {
|
||||
if (res != nullptr) {
|
||||
*actual_size = req.actual_size();
|
||||
} else {
|
||||
*actual_size = 0;
|
||||
@@ -810,7 +810,7 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
|
||||
size_t* actual_size) {
|
||||
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
|
||||
HeapWord* res = allocate_memory(req);
|
||||
if (res != NULL) {
|
||||
if (res != nullptr) {
|
||||
*actual_size = req.actual_size();
|
||||
} else {
|
||||
*actual_size = 0;
|
||||
@@ -821,7 +821,7 @@ HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
|
||||
HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
intptr_t pacer_epoch = 0;
|
||||
bool in_new_region = false;
|
||||
HeapWord* result = NULL;
|
||||
HeapWord* result = nullptr;
|
||||
|
||||
if (req.is_mutator_alloc()) {
|
||||
if (ShenandoahPacing) {
|
||||
@@ -845,13 +845,13 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
|
||||
size_t tries = 0;
|
||||
|
||||
while (result == NULL && _progress_last_gc.is_set()) {
|
||||
while (result == nullptr && _progress_last_gc.is_set()) {
|
||||
tries++;
|
||||
control_thread()->handle_alloc_failure(req);
|
||||
result = allocate_memory_under_lock(req, in_new_region);
|
||||
}
|
||||
|
||||
while (result == NULL && tries <= ShenandoahFullGCThreshold) {
|
||||
while (result == nullptr && tries <= ShenandoahFullGCThreshold) {
|
||||
tries++;
|
||||
control_thread()->handle_alloc_failure(req);
|
||||
result = allocate_memory_under_lock(req, in_new_region);
|
||||
@@ -868,7 +868,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
|
||||
control_thread()->notify_heap_changed();
|
||||
}
|
||||
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
size_t requested = req.size();
|
||||
size_t actual = req.actual_size();
|
||||
|
||||
@@ -917,7 +917,7 @@ MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo
|
||||
|
||||
// Expand and retry allocation
|
||||
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -926,18 +926,18 @@ MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* lo
|
||||
|
||||
// Retry allocation
|
||||
result = loader_data->metaspace_non_null()->allocate(size, mdtype);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Expand and retry allocation
|
||||
result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
|
||||
if (result != NULL) {
|
||||
if (result != nullptr) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
|
||||
@@ -949,7 +949,7 @@ public:
|
||||
_heap(heap), _thread(Thread::current()) {}
|
||||
|
||||
void do_object(oop p) {
|
||||
shenandoah_assert_marked(NULL, p);
|
||||
shenandoah_assert_marked(nullptr, p);
|
||||
if (!p->is_forwarded()) {
|
||||
_heap->evacuate_object(p, _thread);
|
||||
}
|
||||
@@ -988,7 +988,7 @@ private:
|
||||
void do_work() {
|
||||
ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
|
||||
ShenandoahHeapRegion* r;
|
||||
while ((r =_cs->claim_next()) != NULL) {
|
||||
while ((r =_cs->claim_next()) != nullptr) {
|
||||
assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
|
||||
_sh->marked_object_iterate(r, &cl);
|
||||
|
||||
@@ -1014,7 +1014,7 @@ void ShenandoahHeap::trash_cset_regions() {
|
||||
ShenandoahCollectionSet* set = collection_set();
|
||||
ShenandoahHeapRegion* r;
|
||||
set->clear_current_index();
|
||||
while ((r = set->next()) != NULL) {
|
||||
while ((r = set->next()) != nullptr) {
|
||||
r->make_trash();
|
||||
}
|
||||
collection_set()->clear();
|
||||
@@ -1059,7 +1059,7 @@ public:
|
||||
ShenandoahCheckCleanGCLABClosure() {}
|
||||
void do_thread(Thread* thread) {
|
||||
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
|
||||
assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
|
||||
assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
|
||||
assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
|
||||
}
|
||||
};
|
||||
@@ -1071,7 +1071,7 @@ public:
|
||||
ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
|
||||
void do_thread(Thread* thread) {
|
||||
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
|
||||
assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
|
||||
assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
|
||||
gclab->retire();
|
||||
if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
|
||||
ShenandoahThreadLocalData::set_gclab_size(thread, 0);
|
||||
@@ -1128,7 +1128,7 @@ void ShenandoahHeap::gclabs_retire(bool resize) {
|
||||
}
|
||||
workers()->threads_do(&cl);
|
||||
|
||||
if (safepoint_workers() != NULL) {
|
||||
if (safepoint_workers() != nullptr) {
|
||||
safepoint_workers()->threads_do(&cl);
|
||||
}
|
||||
}
|
||||
@@ -1159,10 +1159,10 @@ void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
|
||||
HeapWord* ShenandoahHeap::block_start(const void* addr) const {
|
||||
ShenandoahHeapRegion* r = heap_region_containing(addr);
|
||||
if (r != NULL) {
|
||||
if (r != nullptr) {
|
||||
return r->block_start(addr);
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
|
||||
@@ -1183,7 +1183,7 @@ void ShenandoahHeap::prepare_for_verify() {
|
||||
void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
|
||||
tcl->do_thread(_control_thread);
|
||||
workers()->threads_do(tcl);
|
||||
if (_safepoint_workers != NULL) {
|
||||
if (_safepoint_workers != nullptr) {
|
||||
_safepoint_workers->threads_do(tcl);
|
||||
}
|
||||
if (ShenandoahStringDedup::is_enabled()) {
|
||||
@@ -1321,7 +1321,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
|
||||
// This populates the work stack with initial objects
|
||||
// It is important to relinquish the associated locks before diving
|
||||
// into heap dumper
|
||||
uint n_workers = safepoint_workers() != NULL ? safepoint_workers()->active_workers() : 1;
|
||||
uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
|
||||
ShenandoahHeapIterationRootScanner rp(n_workers);
|
||||
rp.roots_do(oops);
|
||||
}
|
||||
@@ -1400,16 +1400,16 @@ public:
|
||||
// Reclaim bitmap
|
||||
_heap->reclaim_aux_bitmap_for_iteration();
|
||||
// Reclaim queue for workers
|
||||
if (_task_queues!= NULL) {
|
||||
if (_task_queues!= nullptr) {
|
||||
for (uint i = 0; i < _num_workers; ++i) {
|
||||
ShenandoahObjToScanQueue* q = _task_queues->queue(i);
|
||||
if (q != NULL) {
|
||||
if (q != nullptr) {
|
||||
delete q;
|
||||
_task_queues->register_queue(i, NULL);
|
||||
_task_queues->register_queue(i, nullptr);
|
||||
}
|
||||
}
|
||||
delete _task_queues;
|
||||
_task_queues = NULL;
|
||||
_task_queues = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1449,10 +1449,10 @@ private:
|
||||
uint worker_id,
|
||||
ShenandoahObjToScanQueueSet* queue_set) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
|
||||
assert(queue_set != NULL, "task queue must not be NULL");
|
||||
assert(queue_set != nullptr, "task queue must not be null");
|
||||
|
||||
ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
|
||||
assert(q != NULL, "object iterate queue must not be NULL");
|
||||
assert(q != nullptr, "object iterate queue must not be null");
|
||||
|
||||
ShenandoahMarkTask t;
|
||||
ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
|
||||
@@ -1475,7 +1475,7 @@ ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint worker
|
||||
|
||||
// Keep alive an object that was loaded with AS_NO_KEEPALIVE.
|
||||
void ShenandoahHeap::keep_alive(oop obj) {
|
||||
if (is_concurrent_mark_in_progress() && (obj != NULL)) {
|
||||
if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
|
||||
ShenandoahBarrierSet::barrier_set()->enqueue(obj);
|
||||
}
|
||||
}
|
||||
@@ -1872,7 +1872,7 @@ bool ShenandoahHeap::unload_classes() const {
|
||||
|
||||
address ShenandoahHeap::in_cset_fast_test_addr() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
assert(heap->collection_set() != NULL, "Sanity");
|
||||
assert(heap->collection_set() != nullptr, "Sanity");
|
||||
return (address) heap->collection_set()->biased_map_address();
|
||||
}
|
||||
|
||||
@@ -1923,7 +1923,7 @@ void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
|
||||
|
||||
void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
|
||||
ShenandoahHeapRegion* r = heap_region_containing(o);
|
||||
assert(r != NULL, "Sanity");
|
||||
assert(r != nullptr, "Sanity");
|
||||
assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
|
||||
r->record_unpin();
|
||||
}
|
||||
@@ -2005,7 +2005,7 @@ void ShenandoahHeap::assert_gc_workers(uint nworkers) {
|
||||
|
||||
ShenandoahVerifier* ShenandoahHeap::verifier() {
|
||||
guarantee(ShenandoahVerify, "Should be enabled");
|
||||
assert (_verifier != NULL, "sanity");
|
||||
assert (_verifier != nullptr, "sanity");
|
||||
return _verifier;
|
||||
}
|
||||
|
||||
@@ -2038,7 +2038,7 @@ private:
|
||||
T cl;
|
||||
ShenandoahHeapRegion* r = _regions->next();
|
||||
ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
|
||||
while (r != NULL) {
|
||||
while (r != nullptr) {
|
||||
HeapWord* update_watermark = r->get_update_watermark();
|
||||
assert (update_watermark >= r->bottom(), "sanity");
|
||||
if (r->is_active() && !r->is_cset()) {
|
||||
@@ -2283,7 +2283,7 @@ char ShenandoahHeap::gc_state() const {
|
||||
|
||||
ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
|
||||
#ifdef ASSERT
|
||||
assert(_liveness_cache != NULL, "sanity");
|
||||
assert(_liveness_cache != nullptr, "sanity");
|
||||
assert(worker_id < _max_workers, "sanity");
|
||||
for (uint i = 0; i < num_regions(); i++) {
|
||||
assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
|
||||
@@ -2294,7 +2294,7 @@ ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
|
||||
|
||||
void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
|
||||
assert(worker_id < _max_workers, "sanity");
|
||||
assert(_liveness_cache != NULL, "sanity");
|
||||
assert(_liveness_cache != nullptr, "sanity");
|
||||
ShenandoahLiveData* ld = _liveness_cache[worker_id];
|
||||
for (uint i = 0; i < num_regions(); i++) {
|
||||
ShenandoahLiveData live = ld[i];
|
||||
|
||||
@@ -93,7 +93,7 @@ public:
|
||||
// Reset iterator to default state
|
||||
void reset();
|
||||
|
||||
// Returns next region, or NULL if there are no more regions.
|
||||
// Returns next region, or null if there are no more regions.
|
||||
// This is multi-thread-safe.
|
||||
inline ShenandoahHeapRegion* next();
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ inline ShenandoahHeap* ShenandoahHeap::heap() {
|
||||
|
||||
inline ShenandoahHeapRegion* ShenandoahRegionIterator::next() {
|
||||
size_t new_index = Atomic::add(&_index, (size_t) 1, memory_order_relaxed);
|
||||
// get_region() provides the bounds-check and returns NULL on OOB.
|
||||
// get_region() provides the bounds-check and returns null on OOB.
|
||||
return _heap->get_region(new_index - 1);
|
||||
}
|
||||
|
||||
@@ -219,8 +219,8 @@ inline bool ShenandoahHeap::atomic_update_oop_check(oop update, narrowOop* addr,
|
||||
return CompressedOops::decode(Atomic::cmpxchg(addr, c, u, memory_order_release)) == compare;
|
||||
}
|
||||
|
||||
// The memory ordering discussion above does not apply for methods that store NULLs:
|
||||
// then, there is no transitive reads in mutator (as we see NULLs), and we can do
|
||||
// The memory ordering discussion above does not apply for methods that store nulls:
|
||||
// then, there is no transitive reads in mutator (as we see nulls), and we can do
|
||||
// relaxed memory ordering there.
|
||||
|
||||
inline void ShenandoahHeap::atomic_clear_oop(oop* addr, oop compare) {
|
||||
@@ -274,14 +274,14 @@ inline HeapWord* ShenandoahHeap::allocate_from_gclab(Thread* thread, size_t size
|
||||
assert(UseTLAB, "TLABs should be enabled");
|
||||
|
||||
PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
|
||||
if (gclab == NULL) {
|
||||
if (gclab == nullptr) {
|
||||
assert(!thread->is_Java_thread() && !thread->is_Worker_thread(),
|
||||
"Performance: thread should have GCLAB: %s", thread->name());
|
||||
// No GCLABs in this thread, fallback to shared allocation
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
HeapWord* obj = gclab->allocate(size);
|
||||
if (obj != NULL) {
|
||||
if (obj != nullptr) {
|
||||
return obj;
|
||||
}
|
||||
// Otherwise...
|
||||
@@ -302,18 +302,18 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
|
||||
assert(!heap_region_containing(p)->is_humongous(), "never evacuate humongous objects");
|
||||
|
||||
bool alloc_from_gclab = true;
|
||||
HeapWord* copy = NULL;
|
||||
HeapWord* copy = nullptr;
|
||||
|
||||
#ifdef ASSERT
|
||||
if (ShenandoahOOMDuringEvacALot &&
|
||||
(os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
|
||||
copy = NULL;
|
||||
copy = nullptr;
|
||||
} else {
|
||||
#endif
|
||||
if (UseTLAB) {
|
||||
copy = allocate_from_gclab(thread, size);
|
||||
}
|
||||
if (copy == NULL) {
|
||||
if (copy == nullptr) {
|
||||
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size);
|
||||
copy = allocate_memory(req);
|
||||
alloc_from_gclab = false;
|
||||
@@ -322,7 +322,7 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
|
||||
}
|
||||
#endif
|
||||
|
||||
if (copy == NULL) {
|
||||
if (copy == nullptr) {
|
||||
control_thread()->handle_alloc_failure_evac(size);
|
||||
|
||||
_oom_evac_handler.handle_out_of_memory_during_evacuation();
|
||||
@@ -340,7 +340,7 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
|
||||
oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
|
||||
if (result == copy_val) {
|
||||
// Successfully evacuated. Our copy is now the public one!
|
||||
shenandoah_assert_correct(NULL, copy_val);
|
||||
shenandoah_assert_correct(nullptr, copy_val);
|
||||
return copy_val;
|
||||
} else {
|
||||
// Failed to evacuate. We need to deal with the object that is left behind. Since this
|
||||
@@ -358,9 +358,9 @@ inline oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
|
||||
ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
|
||||
} else {
|
||||
fill_with_object(copy, size);
|
||||
shenandoah_assert_correct(NULL, copy_val);
|
||||
shenandoah_assert_correct(nullptr, copy_val);
|
||||
}
|
||||
shenandoah_assert_correct(NULL, result);
|
||||
shenandoah_assert_correct(nullptr, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
@@ -371,12 +371,12 @@ inline bool ShenandoahHeap::requires_marking(const void* entry) const {
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeap::in_collection_set(oop p) const {
|
||||
assert(collection_set() != NULL, "Sanity");
|
||||
assert(collection_set() != nullptr, "Sanity");
|
||||
return collection_set()->is_in(p);
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeap::in_collection_set_loc(void* p) const {
|
||||
assert(collection_set() != NULL, "Sanity");
|
||||
assert(collection_set() != nullptr, "Sanity");
|
||||
return collection_set()->is_in_loc(p);
|
||||
}
|
||||
|
||||
@@ -565,7 +565,7 @@ inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx)
|
||||
if (region_idx < _num_regions) {
|
||||
return _regions[region_idx];
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -60,7 +60,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
|
||||
_index(index),
|
||||
_bottom(start),
|
||||
_end(start + RegionSizeWords),
|
||||
_new_top(NULL),
|
||||
_new_top(nullptr),
|
||||
_empty_time(os::elapsedTime()),
|
||||
_state(committed ? _empty_committed : _empty_uncommitted),
|
||||
_top(start),
|
||||
@@ -455,7 +455,7 @@ HeapWord* ShenandoahHeapRegion::block_start(const void* p) const {
|
||||
last = cur;
|
||||
cur += cast_to_oop(cur)->size();
|
||||
}
|
||||
shenandoah_assert_correct(NULL, cast_to_oop(last));
|
||||
shenandoah_assert_correct(nullptr, cast_to_oop(last));
|
||||
return last;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -334,7 +334,7 @@ public:
|
||||
return _index;
|
||||
}
|
||||
|
||||
// Allocation (return NULL if full)
|
||||
// Allocation (return null if full)
|
||||
inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type);
|
||||
|
||||
inline void clear_live_data();
|
||||
|
||||
@@ -48,7 +48,7 @@ HeapWord* ShenandoahHeapRegion::allocate(size_t size, ShenandoahAllocRequest::Ty
|
||||
|
||||
return obj;
|
||||
} else {
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ ShenandoahHeapRegionCounters::ShenandoahHeapRegionCounters() :
|
||||
}
|
||||
|
||||
ShenandoahHeapRegionCounters::~ShenandoahHeapRegionCounters() {
|
||||
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
|
||||
if (_name_space != nullptr) FREE_C_HEAP_ARRAY(char, _name_space);
|
||||
}
|
||||
|
||||
void ShenandoahHeapRegionCounters::update() {
|
||||
|
||||
@@ -77,7 +77,7 @@ ShenandoahHeapRegion* ShenandoahHeapRegionSetIterator::next() {
|
||||
return _heap->get_region(index);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void ShenandoahHeapRegionSet::print_on(outputStream* out) const {
|
||||
|
||||
@@ -44,7 +44,7 @@ void ShenandoahSimpleLock::unlock() {
|
||||
}
|
||||
|
||||
ShenandoahReentrantLock::ShenandoahReentrantLock() :
|
||||
ShenandoahSimpleLock(), _owner(NULL), _count(0) {
|
||||
ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
|
||||
assert(os::mutex_init_done(), "Too early!");
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ void ShenandoahReentrantLock::unlock() {
|
||||
_count--;
|
||||
|
||||
if (_count == 0) {
|
||||
Atomic::store(&_owner, (Thread*)NULL);
|
||||
Atomic::store(&_owner, (Thread*)nullptr);
|
||||
ShenandoahSimpleLock::unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ private:
|
||||
shenandoah_padding(2);
|
||||
|
||||
public:
|
||||
ShenandoahLock() : _state(unlocked), _owner(NULL) {};
|
||||
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
|
||||
|
||||
void lock() {
|
||||
#ifdef ASSERT
|
||||
@@ -50,7 +50,7 @@ public:
|
||||
Thread::SpinAcquire(&_state, "Shenandoah Heap Lock");
|
||||
#ifdef ASSERT
|
||||
assert(_state == locked, "must be locked");
|
||||
assert(_owner == NULL, "must not be owned");
|
||||
assert(_owner == nullptr, "must not be owned");
|
||||
_owner = Thread::current();
|
||||
#endif
|
||||
}
|
||||
@@ -58,7 +58,7 @@ public:
|
||||
void unlock() {
|
||||
#ifdef ASSERT
|
||||
assert (_owner == Thread::current(), "sanity");
|
||||
_owner = NULL;
|
||||
_owner = nullptr;
|
||||
#endif
|
||||
Thread::SpinRelease(&_state);
|
||||
}
|
||||
@@ -78,13 +78,13 @@ private:
|
||||
ShenandoahLock* const _lock;
|
||||
public:
|
||||
ShenandoahLocker(ShenandoahLock* lock) : _lock(lock) {
|
||||
if (_lock != NULL) {
|
||||
if (_lock != nullptr) {
|
||||
_lock->lock();
|
||||
}
|
||||
}
|
||||
|
||||
~ShenandoahLocker() {
|
||||
if (_lock != NULL) {
|
||||
if (_lock != nullptr) {
|
||||
_lock->unlock();
|
||||
}
|
||||
}
|
||||
@@ -123,13 +123,13 @@ private:
|
||||
public:
|
||||
ShenandoahReentrantLocker(ShenandoahReentrantLock* lock) :
|
||||
_lock(lock) {
|
||||
if (_lock != NULL) {
|
||||
if (_lock != nullptr) {
|
||||
_lock->lock();
|
||||
}
|
||||
}
|
||||
|
||||
~ShenandoahReentrantLocker() {
|
||||
if (_lock != NULL) {
|
||||
if (_lock != nullptr) {
|
||||
assert(_lock->owned_by_self(), "Must be owner");
|
||||
_lock->unlock();
|
||||
}
|
||||
|
||||
@@ -139,7 +139,7 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w
|
||||
"Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
|
||||
|
||||
q = queues->claim_next();
|
||||
while (q != NULL) {
|
||||
while (q != nullptr) {
|
||||
if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -60,9 +60,9 @@ template <class T, StringDedupMode STRING_DEDUP>
|
||||
void ShenandoahMark::do_task(ShenandoahObjToScanQueue* q, T* cl, ShenandoahLiveData* live_data, StringDedup::Requests* const req, ShenandoahMarkTask* task) {
|
||||
oop obj = task->obj();
|
||||
|
||||
shenandoah_assert_not_forwarded(NULL, obj);
|
||||
shenandoah_assert_marked(NULL, obj);
|
||||
shenandoah_assert_not_in_cset_except(NULL, obj, ShenandoahHeap::heap()->cancelled_gc());
|
||||
shenandoah_assert_not_forwarded(nullptr, obj);
|
||||
shenandoah_assert_marked(nullptr, obj);
|
||||
shenandoah_assert_not_in_cset_except(nullptr, obj, ShenandoahHeap::heap()->cancelled_gc());
|
||||
|
||||
// Are we in weak subgraph scan?
|
||||
bool weak = task->is_weak();
|
||||
@@ -121,7 +121,7 @@ inline void ShenandoahMark::count_liveness(ShenandoahLiveData* live_data, oop ob
|
||||
live_data[region_idx] = (ShenandoahLiveData) new_val;
|
||||
}
|
||||
} else {
|
||||
shenandoah_assert_in_correct_region(NULL, obj);
|
||||
shenandoah_assert_in_correct_region(nullptr, obj);
|
||||
size_t num_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
|
||||
|
||||
for (size_t i = region_idx; i < region_idx + num_regions; i++) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -45,7 +45,7 @@ size_t ShenandoahMarkBitMap::mark_distance() {
|
||||
|
||||
HeapWord* ShenandoahMarkBitMap::get_next_marked_addr(const HeapWord* addr,
|
||||
const HeapWord* limit) const {
|
||||
assert(limit != NULL, "limit must not be NULL");
|
||||
assert(limit != nullptr, "limit must not be null");
|
||||
// Round addr up to a possible object boundary to be safe.
|
||||
size_t const addr_offset = address_to_index(align_up(addr, HeapWordSize << LogMinObjAlignment));
|
||||
size_t const limit_offset = address_to_index(limit);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -160,8 +160,8 @@ public:
|
||||
inline bool is_marked_weak(HeapWord* addr) const;
|
||||
|
||||
// Return the address corresponding to the next marked bit at or after
|
||||
// "addr", and before "limit", if "limit" is non-NULL. If there is no
|
||||
// such bit, returns "limit" if that is non-NULL, or else "endWord()".
|
||||
// "addr", and before "limit", if "limit" is non-null. If there is no
|
||||
// such bit, returns "limit" if that is non-null, or else "endWord()".
|
||||
HeapWord* get_next_marked_addr(const HeapWord* addr,
|
||||
const HeapWord* limit) const;
|
||||
|
||||
|
||||
@@ -57,8 +57,8 @@ public:
|
||||
};
|
||||
|
||||
ShenandoahMonitoringSupport::ShenandoahMonitoringSupport(ShenandoahHeap* heap) :
|
||||
_partial_counters(NULL),
|
||||
_full_counters(NULL)
|
||||
_partial_counters(nullptr),
|
||||
_full_counters(nullptr)
|
||||
{
|
||||
// Collection counters do not fit Shenandoah very well.
|
||||
// We record partial cycles as "young", and full cycles (including full STW GC) as "old".
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
#include "runtime/continuation.hpp"
|
||||
|
||||
ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
|
||||
_nm(nm), _oops(NULL), _oops_count(0), _unregistered(false) {
|
||||
_nm(nm), _oops(nullptr), _oops_count(0), _unregistered(false) {
|
||||
|
||||
if (!oops.is_empty()) {
|
||||
_oops_count = oops.length();
|
||||
@@ -47,7 +47,7 @@ ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, boo
|
||||
}
|
||||
|
||||
ShenandoahNMethod::~ShenandoahNMethod() {
|
||||
if (_oops != NULL) {
|
||||
if (_oops != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(oop*, _oops);
|
||||
}
|
||||
}
|
||||
@@ -92,9 +92,9 @@ void ShenandoahNMethod::update() {
|
||||
|
||||
detect_reloc_oops(nm(), oops, non_immediate_oops);
|
||||
if (oops.length() != _oops_count) {
|
||||
if (_oops != NULL) {
|
||||
if (_oops != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(oop*, _oops);
|
||||
_oops = NULL;
|
||||
_oops = nullptr;
|
||||
}
|
||||
|
||||
_oops_count = oops.length();
|
||||
@@ -129,14 +129,14 @@ void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops
|
||||
}
|
||||
|
||||
oop value = r->oop_value();
|
||||
if (value != NULL) {
|
||||
if (value != nullptr) {
|
||||
oop* addr = r->oop_addr();
|
||||
shenandoah_assert_correct(addr, value);
|
||||
shenandoah_assert_not_in_cset_except(addr, value, ShenandoahHeap::heap()->cancelled_gc());
|
||||
shenandoah_assert_not_forwarded(addr, value);
|
||||
// Non-NULL immediate oop found. NULL oops can safely be
|
||||
// Non-null immediate oop found. null oops can safely be
|
||||
// ignored since the method will be re-registered if they
|
||||
// are later patched to be non-NULL.
|
||||
// are later patched to be non-null.
|
||||
oops.push(addr);
|
||||
}
|
||||
}
|
||||
@@ -153,7 +153,7 @@ ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
|
||||
|
||||
void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
|
||||
ShenandoahNMethod* data = gc_data(nm);
|
||||
assert(data != NULL, "Sanity");
|
||||
assert(data != nullptr, "Sanity");
|
||||
assert(data->lock()->owned_by_self(), "Must hold the lock");
|
||||
|
||||
ShenandoahHeap* const heap = ShenandoahHeap::heap();
|
||||
@@ -178,7 +178,7 @@ void ShenandoahNMethod::assert_correct() {
|
||||
oop *loc = _oops[c];
|
||||
assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
|
||||
oop o = RawAccess<>::oop_load(loc);
|
||||
shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress());
|
||||
shenandoah_assert_correct_except(loc, o, o == nullptr || heap->is_full_gc_move_in_progress());
|
||||
}
|
||||
|
||||
oop* const begin = _nm->oops_begin();
|
||||
@@ -186,7 +186,7 @@ void ShenandoahNMethod::assert_correct() {
|
||||
for (oop* p = begin; p < end; p++) {
|
||||
if (*p != Universe::non_oop_word()) {
|
||||
oop o = RawAccess<>::oop_load(p);
|
||||
shenandoah_assert_correct_except(p, o, o == NULL || heap->is_full_gc_move_in_progress());
|
||||
shenandoah_assert_correct_except(p, o, o == nullptr || heap->is_full_gc_move_in_progress());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -263,7 +263,7 @@ ShenandoahNMethodTable::ShenandoahNMethodTable() :
|
||||
}
|
||||
|
||||
ShenandoahNMethodTable::~ShenandoahNMethodTable() {
|
||||
assert(_list != NULL, "Sanity");
|
||||
assert(_list != nullptr, "Sanity");
|
||||
_list->release();
|
||||
}
|
||||
|
||||
@@ -273,7 +273,7 @@ void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
|
||||
|
||||
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
|
||||
|
||||
if (data != NULL) {
|
||||
if (data != nullptr) {
|
||||
assert(contain(nm), "Must have been registered");
|
||||
assert(nm == data->nm(), "Must be same nmethod");
|
||||
// Prevent updating a nmethod while concurrent iteration is in progress.
|
||||
@@ -284,7 +284,7 @@ void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
|
||||
// For a new nmethod, we can safely append it to the list, because
|
||||
// concurrent iteration will not touch it.
|
||||
data = ShenandoahNMethod::for_nmethod(nm);
|
||||
assert(data != NULL, "Sanity");
|
||||
assert(data != nullptr, "Sanity");
|
||||
ShenandoahNMethod::attach_gc_data(nm, data);
|
||||
ShenandoahLocker locker(&_lock);
|
||||
log_register_nmethod(nm);
|
||||
@@ -298,14 +298,14 @@ void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
|
||||
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
|
||||
assert(data != NULL, "Sanity");
|
||||
assert(data != nullptr, "Sanity");
|
||||
log_unregister_nmethod(nm);
|
||||
ShenandoahLocker locker(&_lock);
|
||||
assert(contain(nm), "Must have been registered");
|
||||
|
||||
int idx = index_of(nm);
|
||||
assert(idx >= 0 && idx < _index, "Invalid index");
|
||||
ShenandoahNMethod::attach_gc_data(nm, NULL);
|
||||
ShenandoahNMethod::attach_gc_data(nm, nullptr);
|
||||
remove(idx);
|
||||
}
|
||||
|
||||
@@ -376,7 +376,7 @@ ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration()
|
||||
void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
|
||||
assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
|
||||
assert(iteration_in_progress(), "Why we here?");
|
||||
assert(snapshot != NULL, "No snapshot");
|
||||
assert(snapshot != nullptr, "No snapshot");
|
||||
_itr_cnt--;
|
||||
|
||||
delete snapshot;
|
||||
@@ -429,7 +429,7 @@ ShenandoahNMethodList::ShenandoahNMethodList(int size) :
|
||||
}
|
||||
|
||||
ShenandoahNMethodList::~ShenandoahNMethodList() {
|
||||
assert(_list != NULL, "Sanity");
|
||||
assert(_list != nullptr, "Sanity");
|
||||
assert(_ref_count == 0, "Must be");
|
||||
FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _list);
|
||||
}
|
||||
@@ -478,7 +478,7 @@ void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
|
||||
|
||||
for (size_t idx = start; idx < end; idx++) {
|
||||
ShenandoahNMethod* nmr = list[idx];
|
||||
assert(nmr != NULL, "Sanity");
|
||||
assert(nmr != nullptr, "Sanity");
|
||||
if (nmr->is_unregistered()) {
|
||||
continue;
|
||||
}
|
||||
@@ -502,7 +502,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
|
||||
|
||||
for (size_t idx = start; idx < end; idx++) {
|
||||
ShenandoahNMethod* data = list[idx];
|
||||
assert(data != NULL, "Should not be NULL");
|
||||
assert(data != nullptr, "Should not be null");
|
||||
if (!data->is_unregistered()) {
|
||||
cl->do_nmethod(data->nm());
|
||||
}
|
||||
@@ -511,7 +511,7 @@ void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl)
|
||||
}
|
||||
|
||||
ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
|
||||
_table(table), _table_snapshot(NULL) {
|
||||
_table(table), _table_snapshot(nullptr) {
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() {
|
||||
@@ -520,7 +520,7 @@ void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() {
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
|
||||
assert(_table_snapshot != NULL, "Must first call nmethod_do_begin()");
|
||||
assert(_table_snapshot != nullptr, "Must first call nmethod_do_begin()");
|
||||
_table_snapshot->concurrent_nmethods_do(cl);
|
||||
}
|
||||
|
||||
|
||||
@@ -80,9 +80,9 @@ void ShenandoahNMethod::heal_nmethod_metadata(ShenandoahNMethod* nmethod_data) {
|
||||
|
||||
void ShenandoahNMethod::disarm_nmethod(nmethod* nm) {
|
||||
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
assert(bs != NULL || !ShenandoahNMethodBarrier,
|
||||
assert(bs != nullptr || !ShenandoahNMethodBarrier,
|
||||
"Must have nmethod barrier for concurrent GC");
|
||||
if (bs != NULL && bs->is_armed(nm)) {
|
||||
if (bs != nullptr && bs->is_armed(nm)) {
|
||||
bs->disarm(nm);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,14 +30,14 @@
|
||||
HdrSeq::HdrSeq() {
|
||||
_hdr = NEW_C_HEAP_ARRAY(int*, MagBuckets, mtInternal);
|
||||
for (int c = 0; c < MagBuckets; c++) {
|
||||
_hdr[c] = NULL;
|
||||
_hdr[c] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
HdrSeq::~HdrSeq() {
|
||||
for (int c = 0; c < MagBuckets; c++) {
|
||||
int* sub = _hdr[c];
|
||||
if (sub != NULL) {
|
||||
if (sub != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(int, sub);
|
||||
}
|
||||
}
|
||||
@@ -93,7 +93,7 @@ void HdrSeq::add(double val) {
|
||||
}
|
||||
|
||||
int* b = _hdr[bucket];
|
||||
if (b == NULL) {
|
||||
if (b == nullptr) {
|
||||
b = NEW_C_HEAP_ARRAY(int, ValBuckets, mtInternal);
|
||||
for (int c = 0; c < ValBuckets; c++) {
|
||||
b[c] = 0;
|
||||
@@ -108,7 +108,7 @@ double HdrSeq::percentile(double level) const {
|
||||
int target = MAX2(1, (int) (level * num() / 100));
|
||||
int cnt = 0;
|
||||
for (int mag = 0; mag < MagBuckets; mag++) {
|
||||
if (_hdr[mag] != NULL) {
|
||||
if (_hdr[mag] != nullptr) {
|
||||
for (int val = 0; val < ValBuckets; val++) {
|
||||
cnt += _hdr[mag][val];
|
||||
if (cnt >= target) {
|
||||
|
||||
@@ -56,7 +56,7 @@ ShenandoahPhaseTimings::ShenandoahPhaseTimings(uint max_workers) :
|
||||
// Initialize everything to sane defaults
|
||||
for (uint i = 0; i < _num_phases; i++) {
|
||||
#define SHENANDOAH_WORKER_DATA_NULL(type, title) \
|
||||
_worker_data[i] = NULL;
|
||||
_worker_data[i] = nullptr;
|
||||
SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_NULL)
|
||||
#undef SHENANDOAH_WORKER_DATA_NULL
|
||||
_cycle_data[i] = uninitialized();
|
||||
@@ -69,14 +69,14 @@ ShenandoahPhaseTimings::ShenandoahPhaseTimings(uint max_workers) :
|
||||
if (is_worker_phase(Phase(i))) {
|
||||
int c = 0;
|
||||
#define SHENANDOAH_WORKER_DATA_INIT(type, title) \
|
||||
if (c++ != 0) _worker_data[i + c] = new ShenandoahWorkerData(NULL, title, _max_workers);
|
||||
if (c++ != 0) _worker_data[i + c] = new ShenandoahWorkerData(nullptr, title, _max_workers);
|
||||
SHENANDOAH_PAR_PHASE_DO(,, SHENANDOAH_WORKER_DATA_INIT)
|
||||
#undef SHENANDOAH_WORKER_DATA_INIT
|
||||
}
|
||||
}
|
||||
|
||||
_policy = ShenandoahHeap::heap()->shenandoah_policy();
|
||||
assert(_policy != NULL, "Can not be NULL");
|
||||
assert(_policy != nullptr, "Can not be null");
|
||||
}
|
||||
|
||||
ShenandoahPhaseTimings::Phase ShenandoahPhaseTimings::worker_par_phase(Phase phase, ParPhase par_phase) {
|
||||
@@ -89,7 +89,7 @@ ShenandoahPhaseTimings::Phase ShenandoahPhaseTimings::worker_par_phase(Phase pha
|
||||
ShenandoahWorkerData* ShenandoahPhaseTimings::worker_data(Phase phase, ParPhase par_phase) {
|
||||
Phase p = worker_par_phase(phase, par_phase);
|
||||
ShenandoahWorkerData* wd = _worker_data[p];
|
||||
assert(wd != NULL, "Counter initialized: %s", phase_name(p));
|
||||
assert(wd != nullptr, "Counter initialized: %s", phase_name(p));
|
||||
return wd;
|
||||
}
|
||||
|
||||
@@ -219,7 +219,7 @@ void ShenandoahPhaseTimings::flush_cycle_to_global() {
|
||||
_global_data[i].add(_cycle_data[i]);
|
||||
_cycle_data[i] = uninitialized();
|
||||
}
|
||||
if (_worker_data[i] != NULL) {
|
||||
if (_worker_data[i] != nullptr) {
|
||||
_worker_data[i]->reset();
|
||||
}
|
||||
}
|
||||
@@ -243,7 +243,7 @@ void ShenandoahPhaseTimings::print_cycle_on(outputStream* out) const {
|
||||
}
|
||||
}
|
||||
|
||||
if (_worker_data[i] != NULL) {
|
||||
if (_worker_data[i] != nullptr) {
|
||||
out->print(", workers (us): ");
|
||||
for (uint c = 0; c < _max_workers; c++) {
|
||||
double tv = _worker_data[i]->get(c);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -53,7 +53,7 @@ static const char* reference_type_name(ReferenceType type) {
|
||||
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ void set_oop_field<narrowOop>(narrowOop* field, oop value) {
|
||||
}
|
||||
|
||||
static oop lrb(oop obj) {
|
||||
if (obj != NULL && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
|
||||
if (obj != nullptr && ShenandoahHeap::heap()->marking_context()->is_marked(obj)) {
|
||||
return ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
|
||||
} else {
|
||||
return obj;
|
||||
@@ -120,7 +120,7 @@ void reference_set_discovered<narrowOop>(oop reference, oop discovered) {
|
||||
template<typename T>
|
||||
static bool reference_cas_discovered(oop reference, oop discovered) {
|
||||
T* addr = reinterpret_cast<T *>(java_lang_ref_Reference::discovered_addr_raw(reference));
|
||||
return ShenandoahHeap::atomic_update_oop_check(discovered, addr, NULL);
|
||||
return ShenandoahHeap::atomic_update_oop_check(discovered, addr, nullptr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@@ -144,15 +144,15 @@ static void soft_reference_update_clock() {
|
||||
}
|
||||
|
||||
ShenandoahRefProcThreadLocal::ShenandoahRefProcThreadLocal() :
|
||||
_discovered_list(NULL),
|
||||
_discovered_list(nullptr),
|
||||
_encountered_count(),
|
||||
_discovered_count(),
|
||||
_enqueued_count() {
|
||||
}
|
||||
|
||||
void ShenandoahRefProcThreadLocal::reset() {
|
||||
_discovered_list = NULL;
|
||||
_mark_closure = NULL;
|
||||
_discovered_list = nullptr;
|
||||
_mark_closure = nullptr;
|
||||
for (uint i = 0; i < reference_type_count; i++) {
|
||||
_encountered_count[i] = 0;
|
||||
_discovered_count[i] = 0;
|
||||
@@ -186,9 +186,9 @@ void ShenandoahRefProcThreadLocal::set_discovered_list_head<oop>(oop head) {
|
||||
}
|
||||
|
||||
ShenandoahReferenceProcessor::ShenandoahReferenceProcessor(uint max_workers) :
|
||||
_soft_reference_policy(NULL),
|
||||
_soft_reference_policy(nullptr),
|
||||
_ref_proc_thread_locals(NEW_C_HEAP_ARRAY(ShenandoahRefProcThreadLocal, max_workers, mtGC)),
|
||||
_pending_list(NULL),
|
||||
_pending_list(nullptr),
|
||||
_pending_list_tail(&_pending_list),
|
||||
_iterate_discovered_list_id(0U),
|
||||
_stats() {
|
||||
@@ -227,11 +227,11 @@ bool ShenandoahReferenceProcessor::is_inactive(oop reference, oop referent, Refe
|
||||
if (type == REF_FINAL) {
|
||||
// A FinalReference is inactive if its next field is non-null. An application can't
|
||||
// call enqueue() or clear() on a FinalReference.
|
||||
return reference_next<T>(reference) != NULL;
|
||||
return reference_next<T>(reference) != nullptr;
|
||||
} else {
|
||||
// A non-FinalReference is inactive if the referent is null. The referent can only
|
||||
// be null if the application called Reference.enqueue() or Reference.clear().
|
||||
return referent == NULL;
|
||||
return referent == nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ bool ShenandoahReferenceProcessor::is_softly_live(oop reference, ReferenceType t
|
||||
// Ask SoftReference policy
|
||||
const jlong clock = java_lang_ref_SoftReference::clock();
|
||||
assert(clock != 0, "Clock not initialized");
|
||||
assert(_soft_reference_policy != NULL, "Policy not initialized");
|
||||
assert(_soft_reference_policy != nullptr, "Policy not initialized");
|
||||
return !_soft_reference_policy->should_clear_reference(reference, clock);
|
||||
}
|
||||
|
||||
@@ -279,7 +279,7 @@ bool ShenandoahReferenceProcessor::should_discover(oop reference, ReferenceType
|
||||
template <typename T>
|
||||
bool ShenandoahReferenceProcessor::should_drop(oop reference, ReferenceType type) const {
|
||||
const oop referent = reference_referent<T>(reference);
|
||||
if (referent == NULL) {
|
||||
if (referent == nullptr) {
|
||||
// Reference has been cleared, by a call to Reference.enqueue()
|
||||
// or Reference.clear() from the application, which means we
|
||||
// should drop the reference.
|
||||
@@ -302,7 +302,7 @@ void ShenandoahReferenceProcessor::make_inactive(oop reference, ReferenceType ty
|
||||
// to finalize(). A FinalReference is instead made inactive by self-looping the
|
||||
// next field. An application can't call FinalReference.enqueue(), so there is
|
||||
// no race to worry about when setting the next field.
|
||||
assert(reference_next<T>(reference) == NULL, "Already inactive");
|
||||
assert(reference_next<T>(reference) == nullptr, "Already inactive");
|
||||
assert(ShenandoahHeap::heap()->marking_context()->is_marked(reference_referent<T>(reference)), "only make inactive final refs with alive referents");
|
||||
reference_set_next(reference, reference);
|
||||
} else {
|
||||
@@ -318,7 +318,7 @@ bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, u
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reference_discovered<T>(reference) != NULL) {
|
||||
if (reference_discovered<T>(reference) != nullptr) {
|
||||
// Already discovered. This can happen if the reference is marked finalizable first, and then strong,
|
||||
// in which case it will be seen 2x by marking.
|
||||
log_trace(gc,ref)("Reference already discovered: " PTR_FORMAT, p2i(reference));
|
||||
@@ -340,9 +340,9 @@ bool ShenandoahReferenceProcessor::discover(oop reference, ReferenceType type, u
|
||||
// Add reference to discovered list
|
||||
ShenandoahRefProcThreadLocal& refproc_data = _ref_proc_thread_locals[worker_id];
|
||||
oop discovered_head = refproc_data.discovered_list_head<T>();
|
||||
if (discovered_head == NULL) {
|
||||
if (discovered_head == nullptr) {
|
||||
// Self-loop tail of list. We distinguish discovered from not-discovered references by looking at their
|
||||
// discovered field: if it is NULL, then it is not-yet discovered, otherwise it is discovered
|
||||
// discovered field: if it is null, then it is not-yet discovered, otherwise it is discovered
|
||||
discovered_head = reference;
|
||||
}
|
||||
if (reference_cas_discovered<T>(reference, discovered_head)) {
|
||||
@@ -377,13 +377,13 @@ oop ShenandoahReferenceProcessor::drop(oop reference, ReferenceType type) {
|
||||
|
||||
#ifdef ASSERT
|
||||
oop referent = reference_referent<T>(reference);
|
||||
assert(referent == NULL || ShenandoahHeap::heap()->marking_context()->is_marked(referent),
|
||||
assert(referent == nullptr || ShenandoahHeap::heap()->marking_context()->is_marked(referent),
|
||||
"only drop references with alive referents");
|
||||
#endif
|
||||
|
||||
// Unlink and return next in list
|
||||
oop next = reference_discovered<T>(reference);
|
||||
reference_set_discovered<T>(reference, NULL);
|
||||
reference_set_discovered<T>(reference, nullptr);
|
||||
return next;
|
||||
}
|
||||
|
||||
@@ -414,7 +414,7 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
|
||||
T* p = list;
|
||||
while (true) {
|
||||
const oop reference = lrb(CompressedOops::decode(*p));
|
||||
if (reference == NULL) {
|
||||
if (reference == nullptr) {
|
||||
break;
|
||||
}
|
||||
log_trace(gc, ref)("Processing reference: " PTR_FORMAT, p2i(reference));
|
||||
@@ -428,8 +428,8 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
|
||||
|
||||
const oop discovered = lrb(reference_discovered<T>(reference));
|
||||
if (reference == discovered) {
|
||||
// Reset terminating self-loop to NULL
|
||||
reference_set_discovered<T>(reference, oop(NULL));
|
||||
// Reset terminating self-loop to null
|
||||
reference_set_discovered<T>(reference, oop(nullptr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -440,13 +440,13 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
|
||||
shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
|
||||
oop prev = Atomic::xchg(&_pending_list, head);
|
||||
RawAccess<>::oop_store(p, prev);
|
||||
if (prev == NULL) {
|
||||
if (prev == nullptr) {
|
||||
// First to prepend to list, record tail
|
||||
_pending_list_tail = reinterpret_cast<void*>(p);
|
||||
}
|
||||
|
||||
// Clear discovered list
|
||||
set_oop_field(list, oop(NULL));
|
||||
set_oop_field(list, oop(nullptr));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,7 +519,7 @@ void ShenandoahReferenceProcessor::enqueue_references_locked() {
|
||||
}
|
||||
|
||||
void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
|
||||
if (_pending_list == NULL) {
|
||||
if (_pending_list == nullptr) {
|
||||
// Nothing to enqueue
|
||||
return;
|
||||
}
|
||||
@@ -538,7 +538,7 @@ void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
|
||||
}
|
||||
|
||||
// Reset internal pending list
|
||||
_pending_list = NULL;
|
||||
_pending_list = nullptr;
|
||||
_pending_list_tail = &_pending_list;
|
||||
}
|
||||
|
||||
@@ -547,7 +547,7 @@ void ShenandoahReferenceProcessor::clean_discovered_list(T* list) {
|
||||
T discovered = *list;
|
||||
while (!CompressedOops::is_null(discovered)) {
|
||||
oop discovered_ref = CompressedOops::decode_not_null(discovered);
|
||||
set_oop_field<T>(list, oop(NULL));
|
||||
set_oop_field<T>(list, oop(nullptr));
|
||||
list = reference_discovered_addr<T>(discovered_ref);
|
||||
discovered = *list;
|
||||
}
|
||||
@@ -562,9 +562,9 @@ void ShenandoahReferenceProcessor::abandon_partial_discovery() {
|
||||
clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>());
|
||||
}
|
||||
}
|
||||
if (_pending_list != NULL) {
|
||||
if (_pending_list != nullptr) {
|
||||
oop pending = _pending_list;
|
||||
_pending_list = NULL;
|
||||
_pending_list = nullptr;
|
||||
if (UseCompressedOops) {
|
||||
narrowOop* list = reference_discovered_addr<narrowOop>(pending);
|
||||
clean_discovered_list<narrowOop>(list);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2021, Red Hat, Inc. and/or its affiliates.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -72,7 +72,7 @@ typedef size_t Counters[reference_type_count];
|
||||
* be processed (e.g. enqueued in its ReferenceQueue) by the Java ReferenceHandler thread.
|
||||
*
|
||||
* In order to prevent resurrection by Java threads calling Reference.get() concurrently while we are clearing
|
||||
* referents, we employ a special barrier, the native LRB, which returns NULL when the referent is unreachable.
|
||||
* referents, we employ a special barrier, the native LRB, which returns nullptr when the referent is unreachable.
|
||||
*/
|
||||
|
||||
class ShenandoahRefProcThreadLocal : public CHeapObj<mtGC> {
|
||||
|
||||
@@ -128,7 +128,7 @@ ShenandoahConcurrentRootScanner::ShenandoahConcurrentRootScanner(uint n_workers,
|
||||
_java_threads(phase, n_workers),
|
||||
_vm_roots(phase),
|
||||
_cld_roots(phase, n_workers, false /*heap iteration*/),
|
||||
_codecache_snapshot(NULL),
|
||||
_codecache_snapshot(nullptr),
|
||||
_phase(phase) {
|
||||
if (!ShenandoahHeap::heap()->unload_classes()) {
|
||||
MutexLocker locker(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
@@ -218,7 +218,7 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
|
||||
|
||||
// Process heavy-weight/fully parallel roots the last
|
||||
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
|
||||
_thread_roots.oops_do(oops, NULL, worker_id);
|
||||
_thread_roots.oops_do(oops, nullptr, worker_id);
|
||||
}
|
||||
|
||||
ShenandoahHeapIterationRootScanner::ShenandoahHeapIterationRootScanner(uint n_workers) :
|
||||
@@ -258,7 +258,7 @@ void ShenandoahHeapIterationRootScanner::roots_do(OopClosure* oops) {
|
||||
// Must use _claim_other to avoid interfering with concurrent CLDG iteration
|
||||
CLDToOopClosure clds(oops, ClassLoaderData::_claim_other);
|
||||
ShenandoahMarkCodeBlobClosure code(oops);
|
||||
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, NULL);
|
||||
ShenandoahParallelOopsDoThreadClosure tc_cl(oops, &code, nullptr);
|
||||
|
||||
ResourceMark rm;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user