mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-10 03:19:40 +01:00
Compare commits
58 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7aa349820 | ||
|
|
3b32f6a8ec | ||
|
|
8f73357004 | ||
|
|
429158218b | ||
|
|
ef4cbec6fb | ||
|
|
e9216efefc | ||
|
|
e5196fc24d | ||
|
|
c98dffa186 | ||
|
|
7d7fc69355 | ||
|
|
42ab8fcfb9 | ||
|
|
bf7d40d048 | ||
|
|
5ae32c4c86 | ||
|
|
56ce70c5df | ||
|
|
abc76c6b5b | ||
|
|
9586817cea | ||
|
|
38b877e941 | ||
|
|
8f487d26c0 | ||
|
|
500a3a2d0a | ||
|
|
a2f99fd88b | ||
|
|
0582bd290d | ||
|
|
3ff83ec49e | ||
|
|
7c9c8ba363 | ||
|
|
ca7b885873 | ||
|
|
92be7821f5 | ||
|
|
bcf860703d | ||
|
|
d186dacdb7 | ||
|
|
ef45c8154c | ||
|
|
cd9b1bc820 | ||
|
|
fcb68ea22d | ||
|
|
eb256deb80 | ||
|
|
156187accc | ||
|
|
a377773fa7 | ||
|
|
cae1fd3385 | ||
|
|
eb8ee8bdc7 | ||
|
|
2103dc15cb | ||
|
|
1c72b350e4 | ||
|
|
52338c94f6 | ||
|
|
91f12600d2 | ||
|
|
6c616c71ec | ||
|
|
e94ad551c6 | ||
|
|
d735255919 | ||
|
|
d024f58e61 | ||
|
|
026975a1aa | ||
|
|
8adb052b46 | ||
|
|
9658cecde3 | ||
|
|
b2e7cda6a0 | ||
|
|
65fda5c02a | ||
|
|
d1b788005b | ||
|
|
bb2611ad43 | ||
|
|
e918a59b1d | ||
|
|
28acca609b | ||
|
|
029e3bf8f5 | ||
|
|
78158f30ae | ||
|
|
c793de989f | ||
|
|
15178aa298 | ||
|
|
fe3be498b8 | ||
|
|
62fde68708 | ||
|
|
af87035b71 |
@@ -1,7 +1,7 @@
|
||||
[general]
|
||||
project=jdk
|
||||
jbs=JDK
|
||||
version=25
|
||||
version=26
|
||||
|
||||
[checks]
|
||||
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
################################################################################
|
||||
|
||||
# Minimum supported versions
|
||||
JTREG_MINIMUM_VERSION=7.5.1
|
||||
JTREG_MINIMUM_VERSION=7.5.2
|
||||
GTEST_MINIMUM_VERSION=1.14.0
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
# Versions and download locations for dependencies used by GitHub Actions (GHA)
|
||||
|
||||
GTEST_VERSION=1.14.0
|
||||
JTREG_VERSION=7.5.1+1
|
||||
JTREG_VERSION=7.5.2+1
|
||||
|
||||
LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz
|
||||
|
||||
@@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
jtreg: {
|
||||
server: "jpg",
|
||||
product: "jtreg",
|
||||
version: "7.5.1",
|
||||
version: "7.5.2",
|
||||
build_number: "1",
|
||||
file: "bundles/jtreg-7.5.1+1.zip",
|
||||
file: "bundles/jtreg-7.5.2+1.zip",
|
||||
environment_name: "JT_HOME",
|
||||
environment_path: input.get("jtreg", "home_path") + "/bin",
|
||||
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
|
||||
@@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
server: "jpg",
|
||||
product: "jcov",
|
||||
version: "3.0",
|
||||
build_number: "1",
|
||||
file: "bundles/jcov-3.0+1.zip",
|
||||
build_number: "3",
|
||||
file: "bundles/jcov-3.0+3.zip",
|
||||
environment_name: "JCOV_HOME",
|
||||
},
|
||||
|
||||
|
||||
@@ -26,17 +26,17 @@
|
||||
# Default version, product, and vendor information to use,
|
||||
# unless overridden by configure
|
||||
|
||||
DEFAULT_VERSION_FEATURE=25
|
||||
DEFAULT_VERSION_FEATURE=26
|
||||
DEFAULT_VERSION_INTERIM=0
|
||||
DEFAULT_VERSION_UPDATE=0
|
||||
DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_EXTRA1=0
|
||||
DEFAULT_VERSION_EXTRA2=0
|
||||
DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2025-09-16
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_DATE=2026-03-17
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=25
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25 26"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
|
||||
@@ -46,6 +46,8 @@ CLDR_GEN_DONE := $(GENSRC_DIR)/_cldr-gensrc.marker
|
||||
TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata
|
||||
ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template
|
||||
|
||||
# The `-utf8` option is used even for US English, as some names
|
||||
# may contain non-ASCII characters, such as “Türkiye”.
|
||||
$(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \
|
||||
@@ -61,7 +63,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-basemodule \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-zntempfile $(ZONENAME_TEMPLATE) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -45,7 +45,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-baselocales "en-US" \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-o $(GENSRC_DIR) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -187,22 +187,18 @@ public class HelloWorld {
|
||||
new Run("none", "Hello from Cupertino")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u53F0\u5317\u554F\u5019\u60A8\u0021")
|
||||
new Run("none", "台北問候您!")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0391\u03B8\u03B7\u03BD\u03B1\u03B9\u0020" // Greek
|
||||
+ "\u03B1\u03C3\u03C0\u03B1\u03B6\u03BF\u03BD"
|
||||
+ "\u03C4\u03B1\u03B9\u0020\u03C5\u03BC\u03B1"
|
||||
+ "\u03C2\u0021")
|
||||
new Run("none", "Αθηναι ασπαζονται υμας!") // Greek
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u6771\u4eac\u304b\u3089\u4eca\u65e5\u306f")
|
||||
new Run("none", "東京から今日は")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u05e9\u05dc\u05d5\u05dd \u05de\u05d9\u05e8\u05d5"
|
||||
+ "\u05e9\u05dc\u05d9\u05dd")
|
||||
new Run("none", "שלום מירושלים")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0633\u0644\u0627\u0645")
|
||||
new Run("none", "سلام")
|
||||
}), };
|
||||
}
|
||||
|
||||
@@ -3921,6 +3921,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -106,6 +106,13 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks, as long as
|
||||
// legitimize_address() does not require a preceding lea instruction to
|
||||
// materialize the memory operand. The absence of a preceding lea instruction
|
||||
// is guaranteed for immLoffset8 memory operands, because these do not lead to
|
||||
// out-of-range offsets (see definition of immLoffset8). Fortunately,
|
||||
// immLoffset8 memory operands are the most common ones in practice.
|
||||
ins_is_late_expanded_null_check_candidate(opnd_array(1)->opcode() == INDOFFL8);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
@@ -117,7 +124,11 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
// Fix up any out-of-range offsets.
|
||||
assert_different_registers(rscratch2, as_Register($mem$$base));
|
||||
assert_different_registers(rscratch2, $dst$$Register);
|
||||
ref_addr = __ legitimize_address(ref_addr, 8, rscratch2);
|
||||
int size = 8;
|
||||
assert(!this->is_late_expanded_null_check_candidate() ||
|
||||
!MacroAssembler::legitimize_address_requires_lea(ref_addr, size),
|
||||
"an instruction that can be used for implicit null checking should emit the candidate memory access first");
|
||||
ref_addr = __ legitimize_address(ref_addr, size, rscratch2);
|
||||
}
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
|
||||
|
||||
@@ -129,16 +129,21 @@ class MacroAssembler: public Assembler {
|
||||
a.lea(this, r);
|
||||
}
|
||||
|
||||
// Whether materializing the given address for a LDR/STR requires an
|
||||
// additional lea instruction.
|
||||
static bool legitimize_address_requires_lea(const Address &a, int size) {
|
||||
return a.getMode() == Address::base_plus_offset &&
|
||||
!Address::offset_ok_for_immed(a.offset(), exact_log2(size));
|
||||
}
|
||||
|
||||
/* Sometimes we get misaligned loads and stores, usually from Unsafe
|
||||
accesses, and these can exceed the offset range. */
|
||||
Address legitimize_address(const Address &a, int size, Register scratch) {
|
||||
if (a.getMode() == Address::base_plus_offset) {
|
||||
if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
if (legitimize_address_requires_lea(a, size)) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
@@ -141,6 +141,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
|
||||
@@ -160,6 +161,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
|
||||
@@ -4036,6 +4036,10 @@ ins_attrib ins_field_cbuf_insts_offset(-1);
|
||||
ins_attrib ins_field_load_ic_hi_node(0);
|
||||
ins_attrib ins_field_load_ic_node(0);
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct
|
||||
// parsing in the ADLC because operands constitute user defined types
|
||||
|
||||
@@ -96,6 +96,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, TEMP tmp, KILL cr);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
|
||||
@@ -2619,6 +2619,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -410,7 +410,7 @@
|
||||
|
||||
// C2I adapter frames:
|
||||
//
|
||||
// STACK (interpreted called from compiled, on entry to frame manager):
|
||||
// STACK (interpreted called from compiled, on entry to template interpreter):
|
||||
//
|
||||
// [TOP_C2I_FRAME]
|
||||
// [JIT_FRAME]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -414,7 +414,7 @@ constexpr FloatRegister Z_FARG2 = Z_F2;
|
||||
constexpr FloatRegister Z_FARG3 = Z_F4;
|
||||
constexpr FloatRegister Z_FARG4 = Z_F6;
|
||||
|
||||
// Register declarations to be used in frame manager assembly code.
|
||||
// Register declarations to be used in template interpreter assembly code.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
|
||||
// Register to cache the integer value on top of the operand stack.
|
||||
@@ -439,7 +439,7 @@ constexpr Register Z_bcp = Z_R13;
|
||||
// Bytecode which is dispatched (short lived!).
|
||||
constexpr Register Z_bytecode = Z_R14;
|
||||
|
||||
// Temporary registers to be used within frame manager. We can use
|
||||
// Temporary registers to be used within template interpreter. We can use
|
||||
// the nonvolatile ones because the call stub has saved them.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
constexpr Register Z_tmp_1 = Z_R10;
|
||||
|
||||
@@ -118,7 +118,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
|
||||
__ z_lgr(Z_SP, saved_sp);
|
||||
|
||||
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
|
||||
// C2I adapter extensions are now removed by a resize in the frame manager
|
||||
// C2I adapter extensions are now removed by a resize in the template interpreter
|
||||
// (unwind_initial_activation_pending_exception).
|
||||
#ifdef ASSERT
|
||||
__ z_ltgr(handle_exception, handle_exception);
|
||||
|
||||
@@ -2139,7 +2139,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
Register value = Z_R12;
|
||||
|
||||
// Remember the senderSP so we can pop the interpreter arguments off of the stack.
|
||||
// In addition, frame manager expects initial_caller_sp in Z_R10.
|
||||
// In addition, template interpreter expects initial_caller_sp in Z_R10.
|
||||
__ z_lgr(sender_SP, Z_SP);
|
||||
|
||||
// This should always fit in 14 bit immediate.
|
||||
|
||||
@@ -115,7 +115,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// [SP+176] - thread : Thread*
|
||||
//
|
||||
address generate_call_stub(address& return_address) {
|
||||
// Set up a new C frame, copy Java arguments, call frame manager
|
||||
// Set up a new C frame, copy Java arguments, call template interpreter
|
||||
// or native_entry, and process result.
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
@@ -272,10 +272,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
BLOCK_COMMENT("call {");
|
||||
{
|
||||
// Call frame manager or native entry.
|
||||
// Call template interpreter or native entry.
|
||||
|
||||
//
|
||||
// Register state on entry to frame manager / native entry:
|
||||
// Register state on entry to template interpreter / native entry:
|
||||
//
|
||||
// Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed)
|
||||
// Lesp = (SP) + copied_arguments_offset - 8
|
||||
@@ -290,7 +290,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ z_lgr(Z_esp, r_top_of_arguments_addr);
|
||||
|
||||
//
|
||||
// Stack on entry to frame manager / native entry:
|
||||
// Stack on entry to template interpreter / native entry:
|
||||
//
|
||||
// F0 [TOP_IJAVA_FRAME_ABI]
|
||||
// [outgoing Java arguments]
|
||||
@@ -300,7 +300,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
|
||||
// Do a light-weight C-call here, r_new_arg_entry holds the address
|
||||
// of the interpreter entry point (frame manager or native entry)
|
||||
// of the interpreter entry point (template interpreter or native entry)
|
||||
// and save runtime-value of return_pc in return_address
|
||||
// (call by reference argument).
|
||||
return_address = __ call_stub(r_new_arg_entry);
|
||||
@@ -309,11 +309,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
{
|
||||
BLOCK_COMMENT("restore registers {");
|
||||
// Returned from frame manager or native entry.
|
||||
// Returned from template interpreter or native entry.
|
||||
// Now pop frame, process result, and return to caller.
|
||||
|
||||
//
|
||||
// Stack on exit from frame manager / native entry:
|
||||
// Stack on exit from template interpreter / native entry:
|
||||
//
|
||||
// F0 [ABI]
|
||||
// ...
|
||||
@@ -330,7 +330,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ pop_frame();
|
||||
|
||||
// Reload some volatile registers which we've spilled before the call
|
||||
// to frame manager / native entry.
|
||||
// to template interpreter / native entry.
|
||||
// Access all locals via frame pointer, because we know nothing about
|
||||
// the topmost frame's size.
|
||||
__ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp);
|
||||
|
||||
@@ -1217,7 +1217,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
// Math function, frame manager must set up an interpreter state, etc.
|
||||
// Math function, template interpreter must set up an interpreter state, etc.
|
||||
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
// Decide what to do: Use same platform specific instructions and runtime calls as compilers.
|
||||
|
||||
@@ -118,6 +118,10 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks. The
|
||||
// barrier's slow path includes an identical reload, which does not need to be
|
||||
// registered in the exception table because it is dominated by the main one.
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(125);
|
||||
|
||||
|
||||
@@ -465,13 +465,19 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_tanh) {
|
||||
assert(StubRoutines::dtanh() != nullptr, "not initialized");
|
||||
if (StubRoutines::dtanh() != nullptr) {
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtanh())));
|
||||
} else {
|
||||
return nullptr; // Fallback to default implementation
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_cbrt) {
|
||||
assert(StubRoutines::dcbrt() != nullptr, "not initialized");
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
|
||||
if (StubRoutines::dcbrt() != nullptr) {
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
|
||||
} else {
|
||||
return nullptr; // Fallback to default implementation
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_abs) {
|
||||
assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
|
||||
@@ -2055,6 +2055,10 @@ ins_attrib ins_alignment(1); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
@@ -10527,7 +10531,8 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
|
||||
// Xor Register with Immediate
|
||||
instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseAPX);
|
||||
// Strict predicate check to make selection of xorI_rReg_im1 cost agnostic if immI src is -1.
|
||||
predicate(!UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI dst src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -10541,7 +10546,8 @@ instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
|
||||
|
||||
instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
// Strict predicate check to make selection of xorI_rReg_im1_ndd cost agnostic if immI src2 is -1.
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -10559,6 +10565,7 @@ instruct xorI_rReg_mem_imm_ndd(rRegI dst, memory src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorI (LoadI src1) src2));
|
||||
effect(KILL cr);
|
||||
ins_cost(150);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
|
||||
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
|
||||
@@ -11201,7 +11208,8 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
|
||||
// Xor Register with Immediate
|
||||
instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseAPX);
|
||||
// Strict predicate check to make selection of xorL_rReg_im1 cost agnostic if immL32 src is -1.
|
||||
predicate(!UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL dst src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -11215,7 +11223,8 @@ instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
|
||||
|
||||
instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
// Strict predicate check to make selection of xorL_rReg_im1_ndd cost agnostic if immL32 src2 is -1.
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -11234,6 +11243,7 @@ instruct xorL_rReg_mem_imm(rRegL dst, memory src1, immL32 src2, rFlagsReg cr)
|
||||
match(Set dst (XorL (LoadL src1) src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
ins_cost(150);
|
||||
|
||||
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
|
||||
@@ -1261,69 +1261,6 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do beyond of what os::print_cpu_info() does.
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so.
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
Dl_info dlinfo;
|
||||
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
char* rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
|
||||
assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
|
||||
saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
|
||||
@@ -154,7 +154,8 @@ julong os::Bsd::available_memory() {
|
||||
assert(kerr == KERN_SUCCESS,
|
||||
"host_statistics64 failed - check mach_host_self() and count");
|
||||
if (kerr == KERN_SUCCESS) {
|
||||
available = vmstat.free_count * os::vm_page_size();
|
||||
// free_count is just a lowerbound, other page categories can be freed too and make memory available
|
||||
available = (vmstat.free_count + vmstat.inactive_count + vmstat.purgeable_count) * os::vm_page_size();
|
||||
}
|
||||
#endif
|
||||
return available;
|
||||
@@ -1482,83 +1483,6 @@ void os::print_memory_info(outputStream* st) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
char *rp = nullptr;
|
||||
if (ret && dli_fname[0] != '\0') {
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm"
|
||||
const char* p = strrchr(buf, '/');
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer space");
|
||||
// Add the appropriate library and JVM variant subdirs
|
||||
ss.print("%s/lib/%s", buf, Abstract_VM_Version::vm_variant());
|
||||
|
||||
if (0 != access(buf, F_OK)) {
|
||||
ss.reset();
|
||||
ss.print("%s/lib", buf);
|
||||
}
|
||||
|
||||
// If the path exists within JAVA_HOME, add the JVM library name
|
||||
// to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm"
|
||||
ss.print("/libjvm%s", JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Fall back to path of current library
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,9 +35,6 @@
|
||||
range, \
|
||||
constraint) \
|
||||
\
|
||||
product(bool, UseOprofile, false, \
|
||||
"(Deprecated) enable support for Oprofile profiler") \
|
||||
\
|
||||
product(bool, UseTransparentHugePages, false, \
|
||||
"Use MADV_HUGEPAGE for large pages") \
|
||||
\
|
||||
|
||||
@@ -2746,118 +2746,9 @@ void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
char *rp = nullptr;
|
||||
if (ret && dli_fname[0] != '\0') {
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
// Rationale behind this function:
|
||||
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
|
||||
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
|
||||
// samples for JITted code. Here we create private executable mapping over the code cache
|
||||
// and then we can use standard (well, almost, as mapping can change) way to provide
|
||||
// info for the reporting script by storing timestamp and location of symbol
|
||||
void linux_wrap_code(char* base, size_t size) {
|
||||
static volatile jint cnt = 0;
|
||||
|
||||
static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
|
||||
|
||||
if (!UseOprofile) {
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[PATH_MAX+1];
|
||||
int num = Atomic::add(&cnt, 1);
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
|
||||
os::get_temp_directory(), os::current_process_id(), num);
|
||||
unlink(buf);
|
||||
|
||||
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
|
||||
|
||||
if (fd != -1) {
|
||||
off_t rv = ::lseek(fd, size-2, SEEK_SET);
|
||||
if (rv != (off_t)-1) {
|
||||
if (::write(fd, "", 1) == 1) {
|
||||
mmap(base, size,
|
||||
PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
|
||||
}
|
||||
}
|
||||
::close(fd);
|
||||
unlink(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static bool recoverable_mmap_error(int err) {
|
||||
// See if the error is one we can let the caller handle. This
|
||||
// list of errno values comes from JBS-6843484. I can't find a
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#ifdef AIX
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "os_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#endif
|
||||
#ifdef LINUX
|
||||
#include "os_linux.hpp"
|
||||
@@ -1060,6 +1061,95 @@ bool os::same_files(const char* file1, const char* file2) {
|
||||
return is_same;
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
const char* fname;
|
||||
#ifdef AIX
|
||||
Dl_info dlinfo;
|
||||
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
if (ret == 0) {
|
||||
return;
|
||||
}
|
||||
fname = dlinfo.dli_fname;
|
||||
#else
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
if (!ret) {
|
||||
return;
|
||||
}
|
||||
fname = dli_fname;
|
||||
#endif // AIX
|
||||
char* rp = nullptr;
|
||||
if (fname[0] != '\0') {
|
||||
rp = os::realpath(fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
// If the path exists within JAVA_HOME, add the VM variant directory and JVM
|
||||
// library name to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath(fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
// Called when creating the thread. The minimum stack sizes have already been calculated
|
||||
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
|
||||
size_t stack_size;
|
||||
|
||||
@@ -81,14 +81,12 @@
|
||||
#endif
|
||||
|
||||
#define SPELL_REG_SP "sp"
|
||||
#define SPELL_REG_FP "fp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
// see darwin-xnu/osfmk/mach/arm/_structs.h
|
||||
|
||||
// 10.5 UNIX03 member name prefixes
|
||||
#define DU3_PREFIX(s, m) __ ## s.__ ## m
|
||||
#endif
|
||||
|
||||
#define context_x uc_mcontext->DU3_PREFIX(ss,x)
|
||||
#define context_fp uc_mcontext->DU3_PREFIX(ss,fp)
|
||||
@@ -97,6 +95,31 @@
|
||||
#define context_pc uc_mcontext->DU3_PREFIX(ss,pc)
|
||||
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
|
||||
#define context_esr uc_mcontext->DU3_PREFIX(es,esr)
|
||||
#endif
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
# define context_x uc_mcontext.mc_gpregs.gp_x
|
||||
# define context_fp context_x[REG_FP]
|
||||
# define context_lr uc_mcontext.mc_gpregs.gp_lr
|
||||
# define context_sp uc_mcontext.mc_gpregs.gp_sp
|
||||
# define context_pc uc_mcontext.mc_gpregs.gp_elr
|
||||
#endif
|
||||
|
||||
#ifdef __NetBSD__
|
||||
# define context_x uc_mcontext.__gregs
|
||||
# define context_fp uc_mcontext.__gregs[_REG_FP]
|
||||
# define context_lr uc_mcontext.__gregs[_REG_LR]
|
||||
# define context_sp uc_mcontext.__gregs[_REG_SP]
|
||||
# define context_pc uc_mcontext.__gregs[_REG_ELR]
|
||||
#endif
|
||||
|
||||
#ifdef __OpenBSD__
|
||||
# define context_x sc_x
|
||||
# define context_fp sc_x[REG_FP]
|
||||
# define context_lr sc_lr
|
||||
# define context_sp sc_sp
|
||||
# define context_pc sc_elr
|
||||
#endif
|
||||
|
||||
#define REG_BCP context_x[22]
|
||||
|
||||
@@ -497,9 +520,11 @@ int os::extra_bang_size_in_bytes() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
void os::current_thread_enable_wx(WXMode mode) {
|
||||
pthread_jit_write_protect_np(mode == WXExec);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
|
||||
*(jlong *) dst = *(const jlong *) src;
|
||||
|
||||
@@ -481,7 +481,3 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
|
||||
*legal_text = legal_start;
|
||||
return (int) (legal_end - legal_start);
|
||||
}
|
||||
|
||||
void *operator new( size_t size, int, const char *, int ) throw() {
|
||||
return ::operator new( size );
|
||||
}
|
||||
|
||||
@@ -1626,6 +1626,8 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
while (attr != nullptr) {
|
||||
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
|
||||
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
|
||||
} else if (strcmp (attr->_ident, "ins_is_late_expanded_null_check_candidate") == 0) {
|
||||
fprintf(fp, " virtual bool is_late_expanded_null_check_candidate() const { return %s; }\n", attr->_val);
|
||||
} else if (strcmp (attr->_ident, "ins_cost") != 0 &&
|
||||
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
|
||||
// Must match function in node.hpp: return type bool, no prefix "ins_".
|
||||
|
||||
@@ -818,7 +818,7 @@ JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
|
||||
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
|
||||
|
||||
if (action == Deoptimization::Action_make_not_entrant) {
|
||||
if (nm->make_not_entrant("C1 deoptimize")) {
|
||||
if (nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize)) {
|
||||
if (reason == Deoptimization::Reason_tenured) {
|
||||
MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
|
||||
if (trap_mdo != nullptr) {
|
||||
@@ -1110,7 +1110,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
|
||||
// safepoint, but if it's still alive then make it not_entrant.
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("C1 code patch");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::C1_codepatch);
|
||||
}
|
||||
|
||||
Deoptimization::deoptimize_frame(current, caller_frame.id());
|
||||
@@ -1358,7 +1358,7 @@ void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
|
||||
// Make sure the nmethod is invalidated, i.e. made not entrant.
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("C1 deoptimize for patching");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize_for_patching);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1486,7 +1486,7 @@ JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
|
||||
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
assert (nm != nullptr, "no more nmethod?");
|
||||
nm->make_not_entrant("C1 predicate failed trap");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::C1_predicate_failed_trap);
|
||||
|
||||
methodHandle m(current, nm->method());
|
||||
MethodData* mdo = m->method_data();
|
||||
|
||||
@@ -110,12 +110,24 @@ const char* CDSConfig::default_archive_path() {
|
||||
// before CDSConfig::ergo_initialize() is called.
|
||||
assert(_cds_ergo_initialize_started, "sanity");
|
||||
if (_default_archive_path == nullptr) {
|
||||
char jvm_path[JVM_MAXPATHLEN];
|
||||
os::jvm_path(jvm_path, sizeof(jvm_path));
|
||||
char *end = strrchr(jvm_path, *os::file_separator());
|
||||
if (end != nullptr) *end = '\0';
|
||||
stringStream tmp;
|
||||
tmp.print("%s%sclasses", jvm_path, os::file_separator());
|
||||
if (is_vm_statically_linked()) {
|
||||
// It's easier to form the path using JAVA_HOME as os::jvm_path
|
||||
// gives the path to the launcher executable on static JDK.
|
||||
const char* subdir = WINDOWS_ONLY("bin") NOT_WINDOWS("lib");
|
||||
tmp.print("%s%s%s%s%s%sclasses",
|
||||
Arguments::get_java_home(), os::file_separator(),
|
||||
subdir, os::file_separator(),
|
||||
Abstract_VM_Version::vm_variant(), os::file_separator());
|
||||
} else {
|
||||
// Assume .jsa is in the same directory where libjvm resides on
|
||||
// non-static JDK.
|
||||
char jvm_path[JVM_MAXPATHLEN];
|
||||
os::jvm_path(jvm_path, sizeof(jvm_path));
|
||||
char *end = strrchr(jvm_path, *os::file_separator());
|
||||
if (end != nullptr) *end = '\0';
|
||||
tmp.print("%s%sclasses", jvm_path, os::file_separator());
|
||||
}
|
||||
#ifdef _LP64
|
||||
if (!UseCompressedOops) {
|
||||
tmp.print_raw("_nocoops");
|
||||
|
||||
@@ -802,7 +802,7 @@ class CompileReplay : public StackObj {
|
||||
// Make sure the existence of a prior compile doesn't stop this one
|
||||
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("CI replay");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::CI_replay);
|
||||
}
|
||||
replay_state = this;
|
||||
CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level,
|
||||
|
||||
@@ -154,6 +154,8 @@
|
||||
|
||||
#define JAVA_25_VERSION 69
|
||||
|
||||
#define JAVA_26_VERSION 70
|
||||
|
||||
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
|
||||
assert((bad_constant == JVM_CONSTANT_Module ||
|
||||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
|
||||
|
||||
@@ -289,8 +289,6 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_dsin:
|
||||
case vmIntrinsics::_dcos:
|
||||
case vmIntrinsics::_dtan:
|
||||
case vmIntrinsics::_dtanh:
|
||||
case vmIntrinsics::_dcbrt:
|
||||
case vmIntrinsics::_dlog:
|
||||
case vmIntrinsics::_dexp:
|
||||
case vmIntrinsics::_dpow:
|
||||
@@ -316,6 +314,13 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_fmaF:
|
||||
if (!InlineMathNatives || !UseFMA) return true;
|
||||
break;
|
||||
case vmIntrinsics::_dtanh:
|
||||
case vmIntrinsics::_dcbrt:
|
||||
if (!InlineMathNatives || !InlineIntrinsics) return true;
|
||||
#if defined(AMD64) && (defined(COMPILER1) || defined(COMPILER2))
|
||||
if (!UseLibmIntrinsic) return true;
|
||||
#endif
|
||||
break;
|
||||
case vmIntrinsics::_floatToFloat16:
|
||||
case vmIntrinsics::_float16ToFloat:
|
||||
if (!InlineIntrinsics) return true;
|
||||
|
||||
@@ -1361,7 +1361,7 @@ void CodeCache::make_marked_nmethods_deoptimized() {
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
|
||||
nm->make_not_entrant("marked for deoptimization");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::marked_for_deoptimization);
|
||||
nm->make_deoptimized();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1975,14 +1975,12 @@ void nmethod::invalidate_osr_method() {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::log_state_change(const char* reason) const {
|
||||
assert(reason != nullptr, "Must provide a reason");
|
||||
|
||||
void nmethod::log_state_change(ChangeReason change_reason) const {
|
||||
if (LogCompilation) {
|
||||
if (xtty != nullptr) {
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
|
||||
os::current_thread_id(), reason);
|
||||
os::current_thread_id(), change_reason_to_string(change_reason));
|
||||
log_identity(xtty);
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
@@ -1991,7 +1989,7 @@ void nmethod::log_state_change(const char* reason) const {
|
||||
|
||||
ResourceMark rm;
|
||||
stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
|
||||
ss.print("made not entrant: %s", reason);
|
||||
ss.print("made not entrant: %s", change_reason_to_string(change_reason));
|
||||
|
||||
CompileTask::print_ul(this, ss.freeze());
|
||||
if (PrintCompilation) {
|
||||
@@ -2006,9 +2004,7 @@ void nmethod::unlink_from_method() {
|
||||
}
|
||||
|
||||
// Invalidate code
|
||||
bool nmethod::make_not_entrant(const char* reason) {
|
||||
assert(reason != nullptr, "Must provide a reason");
|
||||
|
||||
bool nmethod::make_not_entrant(ChangeReason change_reason) {
|
||||
// This can be called while the system is already at a safepoint which is ok
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
@@ -2077,7 +2073,7 @@ bool nmethod::make_not_entrant(const char* reason) {
|
||||
assert(success, "Transition can't fail");
|
||||
|
||||
// Log the transition once
|
||||
log_state_change(reason);
|
||||
log_state_change(change_reason);
|
||||
|
||||
// Remove nmethod from method.
|
||||
unlink_from_method();
|
||||
|
||||
@@ -471,6 +471,85 @@ class nmethod : public CodeBlob {
|
||||
void oops_do_set_strong_done(nmethod* old_head);
|
||||
|
||||
public:
|
||||
enum class ChangeReason : u1 {
|
||||
C1_codepatch,
|
||||
C1_deoptimize,
|
||||
C1_deoptimize_for_patching,
|
||||
C1_predicate_failed_trap,
|
||||
CI_replay,
|
||||
JVMCI_invalidate_nmethod,
|
||||
JVMCI_invalidate_nmethod_mirror,
|
||||
JVMCI_materialize_virtual_object,
|
||||
JVMCI_new_installation,
|
||||
JVMCI_register_method,
|
||||
JVMCI_replacing_with_new_code,
|
||||
JVMCI_reprofile,
|
||||
marked_for_deoptimization,
|
||||
missing_exception_handler,
|
||||
not_used,
|
||||
OSR_invalidation_back_branch,
|
||||
OSR_invalidation_for_compiling_with_C1,
|
||||
OSR_invalidation_of_lower_level,
|
||||
set_native_function,
|
||||
uncommon_trap,
|
||||
whitebox_deoptimization,
|
||||
zombie,
|
||||
};
|
||||
|
||||
|
||||
static const char* change_reason_to_string(ChangeReason change_reason) {
|
||||
switch (change_reason) {
|
||||
case ChangeReason::C1_codepatch:
|
||||
return "C1 code patch";
|
||||
case ChangeReason::C1_deoptimize:
|
||||
return "C1 deoptimized";
|
||||
case ChangeReason::C1_deoptimize_for_patching:
|
||||
return "C1 deoptimize for patching";
|
||||
case ChangeReason::C1_predicate_failed_trap:
|
||||
return "C1 predicate failed trap";
|
||||
case ChangeReason::CI_replay:
|
||||
return "CI replay";
|
||||
case ChangeReason::JVMCI_invalidate_nmethod:
|
||||
return "JVMCI invalidate nmethod";
|
||||
case ChangeReason::JVMCI_invalidate_nmethod_mirror:
|
||||
return "JVMCI invalidate nmethod mirror";
|
||||
case ChangeReason::JVMCI_materialize_virtual_object:
|
||||
return "JVMCI materialize virtual object";
|
||||
case ChangeReason::JVMCI_new_installation:
|
||||
return "JVMCI new installation";
|
||||
case ChangeReason::JVMCI_register_method:
|
||||
return "JVMCI register method";
|
||||
case ChangeReason::JVMCI_replacing_with_new_code:
|
||||
return "JVMCI replacing with new code";
|
||||
case ChangeReason::JVMCI_reprofile:
|
||||
return "JVMCI reprofile";
|
||||
case ChangeReason::marked_for_deoptimization:
|
||||
return "marked for deoptimization";
|
||||
case ChangeReason::missing_exception_handler:
|
||||
return "missing exception handler";
|
||||
case ChangeReason::not_used:
|
||||
return "not used";
|
||||
case ChangeReason::OSR_invalidation_back_branch:
|
||||
return "OSR invalidation back branch";
|
||||
case ChangeReason::OSR_invalidation_for_compiling_with_C1:
|
||||
return "OSR invalidation for compiling with C1";
|
||||
case ChangeReason::OSR_invalidation_of_lower_level:
|
||||
return "OSR invalidation of lower level";
|
||||
case ChangeReason::set_native_function:
|
||||
return "set native function";
|
||||
case ChangeReason::uncommon_trap:
|
||||
return "uncommon trap";
|
||||
case ChangeReason::whitebox_deoptimization:
|
||||
return "whitebox deoptimization";
|
||||
case ChangeReason::zombie:
|
||||
return "zombie";
|
||||
default: {
|
||||
assert(false, "Unhandled reason");
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create nmethod with entry_bci
|
||||
static nmethod* new_nmethod(const methodHandle& method,
|
||||
int compile_id,
|
||||
@@ -633,8 +712,8 @@ public:
|
||||
// alive. It is used when an uncommon trap happens. Returns true
|
||||
// if this thread changed the state of the nmethod or false if
|
||||
// another thread performed the transition.
|
||||
bool make_not_entrant(const char* reason);
|
||||
bool make_not_used() { return make_not_entrant("not used"); }
|
||||
bool make_not_entrant(ChangeReason change_reason);
|
||||
bool make_not_used() { return make_not_entrant(ChangeReason::not_used); }
|
||||
|
||||
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
|
||||
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
|
||||
@@ -947,7 +1026,7 @@ public:
|
||||
// Logging
|
||||
void log_identity(xmlStream* log) const;
|
||||
void log_new_nmethod() const;
|
||||
void log_state_change(const char* reason) const;
|
||||
void log_state_change(ChangeReason change_reason) const;
|
||||
|
||||
// Prints block-level comments, including nmethod specific block labels:
|
||||
void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
|
||||
|
||||
@@ -924,7 +924,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
|
||||
nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
|
||||
if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
|
||||
// Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
|
||||
osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
|
||||
osr_nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_for_compiling_with_C1);
|
||||
}
|
||||
compile(mh, bci, CompLevel_simple, THREAD);
|
||||
}
|
||||
@@ -1516,7 +1516,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
|
||||
int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
|
||||
print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
|
||||
}
|
||||
nm->make_not_entrant("OSR invalidation, back branch");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_back_branch);
|
||||
}
|
||||
}
|
||||
// Fix up next_level if necessary to avoid deopts
|
||||
|
||||
@@ -98,15 +98,15 @@ void ParallelArguments::initialize() {
|
||||
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
|
||||
}
|
||||
|
||||
// The alignment used for boundary between young gen and old gen
|
||||
static size_t default_gen_alignment() {
|
||||
// The alignment used for spaces in young gen and old gen
|
||||
static size_t default_space_alignment() {
|
||||
return 64 * K * HeapWordSize;
|
||||
}
|
||||
|
||||
void ParallelArguments::initialize_alignments() {
|
||||
// Initialize card size before initializing alignments
|
||||
CardTable::initialize_card_size();
|
||||
SpaceAlignment = GenAlignment = default_gen_alignment();
|
||||
SpaceAlignment = default_space_alignment();
|
||||
HeapAlignment = compute_heap_alignment();
|
||||
}
|
||||
|
||||
@@ -123,9 +123,8 @@ void ParallelArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Can a page size be something else than a power of two?
|
||||
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
|
||||
size_t new_alignment = align_up(page_sz, GenAlignment);
|
||||
if (new_alignment != GenAlignment) {
|
||||
GenAlignment = new_alignment;
|
||||
size_t new_alignment = align_up(page_sz, SpaceAlignment);
|
||||
if (new_alignment != SpaceAlignment) {
|
||||
SpaceAlignment = new_alignment;
|
||||
// Redo everything from the start
|
||||
initialize_heap_flags_and_sizes_one_pass();
|
||||
|
||||
@@ -29,10 +29,8 @@
|
||||
void ParallelInitLogger::print_heap() {
|
||||
log_info_p(gc, init)("Alignments:"
|
||||
" Space " EXACTFMT ","
|
||||
" Generation " EXACTFMT ","
|
||||
" Heap " EXACTFMT,
|
||||
EXACTFMTARGS(SpaceAlignment),
|
||||
EXACTFMTARGS(GenAlignment),
|
||||
EXACTFMTARGS(HeapAlignment));
|
||||
GCInitLogger::print_heap();
|
||||
}
|
||||
|
||||
@@ -69,8 +69,8 @@ jint ParallelScavengeHeap::initialize() {
|
||||
|
||||
initialize_reserved_region(heap_rs);
|
||||
// Layout the reserved space for the generations.
|
||||
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, GenAlignment);
|
||||
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment);
|
||||
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, SpaceAlignment);
|
||||
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
|
||||
assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
|
||||
|
||||
PSCardTable* card_table = new PSCardTable(_reserved);
|
||||
@@ -107,7 +107,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
new PSAdaptiveSizePolicy(eden_capacity,
|
||||
initial_promo_size,
|
||||
young_gen()->to_space()->capacity_in_bytes(),
|
||||
GenAlignment,
|
||||
SpaceAlignment,
|
||||
max_gc_pause_sec,
|
||||
GCTimeRatio
|
||||
);
|
||||
|
||||
@@ -41,7 +41,7 @@ PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,
|
||||
_min_gen_size(min_size),
|
||||
_max_gen_size(max_size)
|
||||
{
|
||||
initialize(rs, initial_size, GenAlignment);
|
||||
initialize(rs, initial_size, SpaceAlignment);
|
||||
}
|
||||
|
||||
void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment) {
|
||||
|
||||
@@ -47,7 +47,7 @@ PSYoungGen::PSYoungGen(ReservedSpace rs, size_t initial_size, size_t min_size, s
|
||||
_from_counters(nullptr),
|
||||
_to_counters(nullptr)
|
||||
{
|
||||
initialize(rs, initial_size, GenAlignment);
|
||||
initialize(rs, initial_size, SpaceAlignment);
|
||||
}
|
||||
|
||||
void PSYoungGen::initialize_virtual_space(ReservedSpace rs,
|
||||
@@ -746,7 +746,7 @@ size_t PSYoungGen::available_to_live() {
|
||||
}
|
||||
|
||||
size_t delta_in_bytes = unused_committed + delta_in_survivor;
|
||||
delta_in_bytes = align_down(delta_in_bytes, GenAlignment);
|
||||
delta_in_bytes = align_down(delta_in_bytes, SpaceAlignment);
|
||||
return delta_in_bytes;
|
||||
}
|
||||
|
||||
|
||||
@@ -188,8 +188,8 @@ jint SerialHeap::initialize() {
|
||||
|
||||
initialize_reserved_region(heap_rs);
|
||||
|
||||
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, GenAlignment);
|
||||
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, GenAlignment);
|
||||
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, SpaceAlignment);
|
||||
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, SpaceAlignment);
|
||||
|
||||
_rem_set = new CardTableRS(_reserved);
|
||||
_rem_set->initialize(young_rs.base(), old_rs.base());
|
||||
|
||||
@@ -35,7 +35,7 @@ extern size_t SpaceAlignment;
|
||||
|
||||
class GCArguments {
|
||||
protected:
|
||||
// Initialize HeapAlignment, SpaceAlignment, and extra alignments (E.g. GenAlignment)
|
||||
// Initialize HeapAlignment, SpaceAlignment
|
||||
virtual void initialize_alignments() = 0;
|
||||
virtual void initialize_heap_flags_and_sizes();
|
||||
virtual void initialize_size_info();
|
||||
|
||||
@@ -42,17 +42,15 @@ size_t MaxOldSize = 0;
|
||||
// See more in JDK-8346005
|
||||
size_t OldSize = ScaleForWordSize(4*M);
|
||||
|
||||
size_t GenAlignment = 0;
|
||||
|
||||
size_t GenArguments::conservative_max_heap_alignment() { return (size_t)Generation::GenGrain; }
|
||||
|
||||
static size_t young_gen_size_lower_bound() {
|
||||
// The young generation must be aligned and have room for eden + two survivors
|
||||
return align_up(3 * SpaceAlignment, GenAlignment);
|
||||
return 3 * SpaceAlignment;
|
||||
}
|
||||
|
||||
static size_t old_gen_size_lower_bound() {
|
||||
return align_up(SpaceAlignment, GenAlignment);
|
||||
return SpaceAlignment;
|
||||
}
|
||||
|
||||
size_t GenArguments::scale_by_NewRatio_aligned(size_t base_size, size_t alignment) {
|
||||
@@ -69,23 +67,20 @@ static size_t bound_minus_alignment(size_t desired_size,
|
||||
void GenArguments::initialize_alignments() {
|
||||
// Initialize card size before initializing alignments
|
||||
CardTable::initialize_card_size();
|
||||
SpaceAlignment = GenAlignment = (size_t)Generation::GenGrain;
|
||||
SpaceAlignment = (size_t)Generation::GenGrain;
|
||||
HeapAlignment = compute_heap_alignment();
|
||||
}
|
||||
|
||||
void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
GCArguments::initialize_heap_flags_and_sizes();
|
||||
|
||||
assert(GenAlignment != 0, "Generation alignment not set up properly");
|
||||
assert(HeapAlignment >= GenAlignment,
|
||||
"HeapAlignment: %zu less than GenAlignment: %zu",
|
||||
HeapAlignment, GenAlignment);
|
||||
assert(GenAlignment % SpaceAlignment == 0,
|
||||
"GenAlignment: %zu not aligned by SpaceAlignment: %zu",
|
||||
GenAlignment, SpaceAlignment);
|
||||
assert(HeapAlignment % GenAlignment == 0,
|
||||
"HeapAlignment: %zu not aligned by GenAlignment: %zu",
|
||||
HeapAlignment, GenAlignment);
|
||||
assert(SpaceAlignment != 0, "Generation alignment not set up properly");
|
||||
assert(HeapAlignment >= SpaceAlignment,
|
||||
"HeapAlignment: %zu less than SpaceAlignment: %zu",
|
||||
HeapAlignment, SpaceAlignment);
|
||||
assert(HeapAlignment % SpaceAlignment == 0,
|
||||
"HeapAlignment: %zu not aligned by SpaceAlignment: %zu",
|
||||
HeapAlignment, SpaceAlignment);
|
||||
|
||||
// All generational heaps have a young gen; handle those flags here
|
||||
|
||||
@@ -106,7 +101,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Make sure NewSize allows an old generation to fit even if set on the command line
|
||||
if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) {
|
||||
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment);
|
||||
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment);
|
||||
log_warning(gc, ergo)("NewSize (%zuk) is equal to or greater than initial heap size (%zuk). A new "
|
||||
"NewSize of %zuk will be used to accomodate an old generation.",
|
||||
NewSize/K, InitialHeapSize/K, revised_new_size/K);
|
||||
@@ -115,8 +110,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Now take the actual NewSize into account. We will silently increase NewSize
|
||||
// if the user specified a smaller or unaligned value.
|
||||
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, GenAlignment);
|
||||
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, GenAlignment));
|
||||
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, SpaceAlignment);
|
||||
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, SpaceAlignment));
|
||||
if (bounded_new_size != NewSize) {
|
||||
FLAG_SET_ERGO(NewSize, bounded_new_size);
|
||||
}
|
||||
@@ -125,7 +120,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
|
||||
if (MaxNewSize >= MaxHeapSize) {
|
||||
// Make sure there is room for an old generation
|
||||
size_t smaller_max_new_size = MaxHeapSize - GenAlignment;
|
||||
size_t smaller_max_new_size = MaxHeapSize - SpaceAlignment;
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("MaxNewSize (%zuk) is equal to or greater than the entire "
|
||||
"heap (%zuk). A new max generation size of %zuk will be used.",
|
||||
@@ -137,8 +132,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
}
|
||||
} else if (MaxNewSize < NewSize) {
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
} else if (!is_aligned(MaxNewSize, GenAlignment)) {
|
||||
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, GenAlignment));
|
||||
} else if (!is_aligned(MaxNewSize, SpaceAlignment)) {
|
||||
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, SpaceAlignment));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,13 +161,13 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
// exceed it. Adjust New/OldSize as necessary.
|
||||
size_t calculated_size = NewSize + OldSize;
|
||||
double shrink_factor = (double) MaxHeapSize / calculated_size;
|
||||
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), GenAlignment);
|
||||
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), SpaceAlignment);
|
||||
FLAG_SET_ERGO(NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
|
||||
|
||||
// OldSize is already aligned because above we aligned MaxHeapSize to
|
||||
// HeapAlignment, and we just made sure that NewSize is aligned to
|
||||
// GenAlignment. In initialize_flags() we verified that HeapAlignment
|
||||
// is a multiple of GenAlignment.
|
||||
// SpaceAlignment. In initialize_flags() we verified that HeapAlignment
|
||||
// is a multiple of SpaceAlignment.
|
||||
OldSize = MaxHeapSize - NewSize;
|
||||
} else {
|
||||
FLAG_SET_ERGO(MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
|
||||
@@ -200,7 +195,7 @@ void GenArguments::initialize_size_info() {
|
||||
// Determine maximum size of the young generation.
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxNewSize)) {
|
||||
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, GenAlignment);
|
||||
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, SpaceAlignment);
|
||||
// Bound the maximum size by NewSize below (since it historically
|
||||
// would have been NewSize and because the NewRatio calculation could
|
||||
// yield a size that is too small) and bound it by MaxNewSize above.
|
||||
@@ -229,18 +224,18 @@ void GenArguments::initialize_size_info() {
|
||||
// If NewSize is set on the command line, we should use it as
|
||||
// the initial size, but make sure it is within the heap bounds.
|
||||
initial_young_size =
|
||||
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
|
||||
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, GenAlignment);
|
||||
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment));
|
||||
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, SpaceAlignment);
|
||||
} else {
|
||||
// For the case where NewSize is not set on the command line, use
|
||||
// NewRatio to size the initial generation size. Use the current
|
||||
// NewSize as the floor, because if NewRatio is overly large, the resulting
|
||||
// size can be too small.
|
||||
initial_young_size =
|
||||
clamp(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize, max_young_size);
|
||||
clamp(scale_by_NewRatio_aligned(InitialHeapSize, SpaceAlignment), NewSize, max_young_size);
|
||||
|
||||
// Derive MinNewSize from MinHeapSize
|
||||
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, GenAlignment), initial_young_size);
|
||||
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, SpaceAlignment), initial_young_size);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,7 +247,7 @@ void GenArguments::initialize_size_info() {
|
||||
// The maximum old size can be determined from the maximum young
|
||||
// and maximum heap size since no explicit flags exist
|
||||
// for setting the old generation maximum.
|
||||
MaxOldSize = MAX2(MaxHeapSize - max_young_size, GenAlignment);
|
||||
MaxOldSize = MAX2(MaxHeapSize - max_young_size, SpaceAlignment);
|
||||
MinOldSize = MIN3(MaxOldSize,
|
||||
InitialHeapSize - initial_young_size,
|
||||
MinHeapSize - MinNewSize);
|
||||
@@ -315,10 +310,10 @@ void GenArguments::assert_flags() {
|
||||
assert(NewSize >= MinNewSize, "Ergonomics decided on a too small young gen size");
|
||||
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
|
||||
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
|
||||
assert(NewSize % GenAlignment == 0, "NewSize alignment");
|
||||
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
|
||||
assert(NewSize % SpaceAlignment == 0, "NewSize alignment");
|
||||
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
|
||||
assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
|
||||
assert(OldSize % GenAlignment == 0, "OldSize alignment");
|
||||
assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
|
||||
}
|
||||
|
||||
void GenArguments::assert_size_info() {
|
||||
@@ -327,19 +322,19 @@ void GenArguments::assert_size_info() {
|
||||
assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
|
||||
assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes");
|
||||
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
|
||||
assert(MinNewSize % GenAlignment == 0, "_min_young_size alignment");
|
||||
assert(NewSize % GenAlignment == 0, "_initial_young_size alignment");
|
||||
assert(MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
|
||||
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, GenAlignment),
|
||||
assert(MinNewSize % SpaceAlignment == 0, "_min_young_size alignment");
|
||||
assert(NewSize % SpaceAlignment == 0, "_initial_young_size alignment");
|
||||
assert(MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
|
||||
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, SpaceAlignment),
|
||||
"Ergonomics made minimum young generation larger than minimum heap");
|
||||
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment),
|
||||
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment),
|
||||
"Ergonomics made initial young generation larger than initial heap");
|
||||
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, GenAlignment),
|
||||
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, SpaceAlignment),
|
||||
"Ergonomics made maximum young generation lager than maximum heap");
|
||||
assert(MinOldSize <= OldSize, "Ergonomics decided on incompatible minimum and initial old gen sizes");
|
||||
assert(OldSize <= MaxOldSize, "Ergonomics decided on incompatible initial and maximum old gen sizes");
|
||||
assert(MaxOldSize % GenAlignment == 0, "MaxOldSize alignment");
|
||||
assert(OldSize % GenAlignment == 0, "OldSize alignment");
|
||||
assert(MaxOldSize % SpaceAlignment == 0, "MaxOldSize alignment");
|
||||
assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
|
||||
assert(MaxHeapSize <= (MaxNewSize + MaxOldSize), "Total maximum heap sizes must be sum of generation maximum sizes");
|
||||
assert(MinNewSize + MinOldSize <= MinHeapSize, "Minimum generation sizes exceed minimum heap size");
|
||||
assert(NewSize + OldSize == InitialHeapSize, "Initial generation sizes should match initial heap size");
|
||||
|
||||
@@ -35,8 +35,6 @@ extern size_t MaxOldSize;
|
||||
|
||||
extern size_t OldSize;
|
||||
|
||||
extern size_t GenAlignment;
|
||||
|
||||
class GenArguments : public GCArguments {
|
||||
friend class TestGenCollectorPolicy; // Testing
|
||||
private:
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
|
||||
#include "gc/z/zAllocator.hpp"
|
||||
#include "gc/z/zObjectAllocator.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
|
||||
ZAllocatorEden* ZAllocator::_eden;
|
||||
ZAllocatorForRelocation* ZAllocator::_relocation[ZAllocator::_relocation_allocators];
|
||||
@@ -47,7 +48,7 @@ ZPageAge ZAllocatorForRelocation::install() {
|
||||
for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
|
||||
if (_relocation[i] == nullptr) {
|
||||
_relocation[i] = this;
|
||||
return static_cast<ZPageAge>(i + 1);
|
||||
return to_zpageage(i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,7 +35,7 @@ class ZPage;
|
||||
|
||||
class ZAllocator {
|
||||
public:
|
||||
static constexpr uint _relocation_allocators = static_cast<uint>(ZPageAge::old);
|
||||
static constexpr uint _relocation_allocators = ZPageAgeCount - 1;
|
||||
|
||||
protected:
|
||||
ZObjectAllocator _object_allocator;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -28,13 +28,14 @@
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
|
||||
inline ZAllocatorEden* ZAllocator::eden() {
|
||||
return _eden;
|
||||
}
|
||||
|
||||
inline ZAllocatorForRelocation* ZAllocator::relocation(ZPageAge page_age) {
|
||||
return _relocation[static_cast<uint>(page_age) - 1];
|
||||
return _relocation[untype(page_age - 1)];
|
||||
}
|
||||
|
||||
inline ZAllocatorForRelocation* ZAllocator::old() {
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zJNICritical.hpp"
|
||||
#include "gc/z/zMark.inline.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zRelocationSet.inline.hpp"
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
@@ -699,11 +700,10 @@ uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats st
|
||||
uint last_populated_age = 0;
|
||||
size_t last_populated_live = 0;
|
||||
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
const size_t young_live = stats.small(age).live() + stats.medium(age).live() + stats.large(age).live();
|
||||
if (young_live > 0) {
|
||||
last_populated_age = i;
|
||||
last_populated_age = untype(age);
|
||||
last_populated_live = young_live;
|
||||
if (young_live_last > 0) {
|
||||
young_life_expectancy_sum += double(young_live) / double(young_live_last);
|
||||
@@ -842,8 +842,8 @@ void ZGenerationYoung::mark_start() {
|
||||
|
||||
// Retire allocating pages
|
||||
ZAllocator::eden()->retire_pages();
|
||||
for (ZPageAge i = ZPageAge::survivor1; i <= ZPageAge::survivor14; i = static_cast<ZPageAge>(static_cast<uint>(i) + 1)) {
|
||||
ZAllocator::relocation(i)->retire_pages();
|
||||
for (ZPageAge age : ZPageAgeRangeSurvivor) {
|
||||
ZAllocator::relocation(age)->retire_pages();
|
||||
}
|
||||
|
||||
// Reset allocated/reclaimed/used statistics
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -24,6 +24,7 @@
|
||||
#ifndef SHARE_GC_Z_ZPAGEAGE_HPP
|
||||
#define SHARE_GC_Z_ZPAGEAGE_HPP
|
||||
|
||||
#include "utilities/enumIterator.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
enum class ZPageAge : uint8_t {
|
||||
@@ -45,6 +46,19 @@ enum class ZPageAge : uint8_t {
|
||||
old
|
||||
};
|
||||
|
||||
constexpr uint ZPageAgeMax = static_cast<uint>(ZPageAge::old);
|
||||
constexpr uint ZPageAgeCount = static_cast<uint>(ZPageAge::old) + 1;
|
||||
constexpr ZPageAge ZPageAgeLastPlusOne = static_cast<ZPageAge>(ZPageAgeCount);
|
||||
|
||||
ENUMERATOR_RANGE(ZPageAge,
|
||||
ZPageAge::eden,
|
||||
ZPageAge::old);
|
||||
|
||||
using ZPageAgeRange = EnumRange<ZPageAge>;
|
||||
|
||||
constexpr ZPageAgeRange ZPageAgeRangeEden = ZPageAgeRange::create<ZPageAge::eden, ZPageAge::survivor1>();
|
||||
constexpr ZPageAgeRange ZPageAgeRangeYoung = ZPageAgeRange::create<ZPageAge::eden, ZPageAge::old>();
|
||||
constexpr ZPageAgeRange ZPageAgeRangeSurvivor = ZPageAgeRange::create<ZPageAge::survivor1, ZPageAge::old>();
|
||||
constexpr ZPageAgeRange ZPageAgeRangeRelocation = ZPageAgeRange::create<ZPageAge::survivor1, ZPageAgeLastPlusOne>();
|
||||
constexpr ZPageAgeRange ZPageAgeRangeOld = ZPageAgeRange::create<ZPageAge::old, ZPageAgeLastPlusOne>();
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGEAGE_HPP
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
@@ -23,13 +21,32 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.text.resources;
|
||||
#ifndef SHARE_GC_Z_ZPAGEAGE_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZPAGEAGE_INLINE_HPP
|
||||
|
||||
import java.util.spi.ResourceBundleProvider;
|
||||
#include "gc/z/zPageAge.hpp"
|
||||
|
||||
/**
|
||||
* An interface for the internal locale data provider for which {@code ResourceBundle}
|
||||
* searches.
|
||||
*/
|
||||
public interface JavaTimeSupplementaryProvider extends ResourceBundleProvider {
|
||||
#include "utilities/checkedCast.hpp"
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
inline uint untype(ZPageAge age) {
|
||||
return static_cast<uint>(age);
|
||||
}
|
||||
|
||||
inline ZPageAge to_zpageage(uint age) {
|
||||
assert(age < ZPageAgeCount, "Invalid age");
|
||||
return static_cast<ZPageAge>(age);
|
||||
}
|
||||
|
||||
inline ZPageAge operator+(ZPageAge age, size_t size) {
|
||||
const auto size_value = checked_cast<std::underlying_type_t<ZPageAge>>(size);
|
||||
return to_zpageage(untype(age) + size_value);
|
||||
}
|
||||
|
||||
inline ZPageAge operator-(ZPageAge age, size_t size) {
|
||||
const auto size_value = checked_cast<std::underlying_type_t<ZPageAge>>(size);
|
||||
return to_zpageage(untype(age) - size_value);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGEAGE_INLINE_HPP
|
||||
@@ -488,11 +488,11 @@ public:
|
||||
}
|
||||
|
||||
ZPage* shared(ZPageAge age) {
|
||||
return _shared[static_cast<uint>(age) - 1];
|
||||
return _shared[untype(age - 1)];
|
||||
}
|
||||
|
||||
void set_shared(ZPageAge age, ZPage* page) {
|
||||
_shared[static_cast<uint>(age) - 1] = page;
|
||||
_shared[untype(age - 1)] = page;
|
||||
}
|
||||
|
||||
ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) {
|
||||
@@ -570,11 +570,11 @@ private:
|
||||
|
||||
|
||||
ZPage* target(ZPageAge age) {
|
||||
return _target[static_cast<uint>(age) - 1];
|
||||
return _target[untype(age - 1)];
|
||||
}
|
||||
|
||||
void set_target(ZPageAge age, ZPage* page) {
|
||||
_target[static_cast<uint>(age) - 1] = page;
|
||||
_target[untype(age - 1)] = page;
|
||||
}
|
||||
|
||||
size_t object_alignment() const {
|
||||
@@ -1232,12 +1232,12 @@ ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
|
||||
return ZPageAge::old;
|
||||
}
|
||||
|
||||
const uint age = static_cast<uint>(from_age);
|
||||
const uint age = untype(from_age);
|
||||
if (age >= ZGeneration::young()->tenuring_threshold()) {
|
||||
return ZPageAge::old;
|
||||
}
|
||||
|
||||
return static_cast<ZPageAge>(age + 1);
|
||||
return to_zpageage(age + 1);
|
||||
}
|
||||
|
||||
class ZFlipAgePagesTask : public ZTask {
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zForwarding.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
@@ -117,8 +118,8 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
const int npages = _live_pages.length();
|
||||
int selected_from = 0;
|
||||
int selected_to = 0;
|
||||
size_t npages_selected[ZPageAgeMax + 1] = { 0 };
|
||||
size_t selected_live_bytes[ZPageAgeMax + 1] = { 0 };
|
||||
size_t npages_selected[ZPageAgeCount] = { 0 };
|
||||
size_t selected_live_bytes[ZPageAgeCount] = { 0 };
|
||||
size_t selected_forwarding_entries = 0;
|
||||
|
||||
size_t from_live_bytes = 0;
|
||||
@@ -149,8 +150,8 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
if (diff_reclaimable > _fragmentation_limit) {
|
||||
selected_from = from;
|
||||
selected_to = to;
|
||||
selected_live_bytes[static_cast<uint>(page->age())] += page_live_bytes;
|
||||
npages_selected[static_cast<uint>(page->age())] += 1;
|
||||
selected_live_bytes[untype(page->age())] += page_live_bytes;
|
||||
npages_selected[untype(page->age())] += 1;
|
||||
selected_forwarding_entries = from_forwarding_entries;
|
||||
}
|
||||
|
||||
@@ -172,7 +173,7 @@ void ZRelocationSetSelectorGroup::select_inner() {
|
||||
_forwarding_entries = selected_forwarding_entries;
|
||||
|
||||
// Update statistics
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
for (uint i = 0; i < ZPageAgeCount; ++i) {
|
||||
_stats[i]._relocate = selected_live_bytes[i];
|
||||
_stats[i]._npages_selected = npages_selected[i];
|
||||
}
|
||||
@@ -200,7 +201,7 @@ void ZRelocationSetSelectorGroup::select() {
|
||||
}
|
||||
|
||||
ZRelocationSetSelectorGroupStats s{};
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
for (uint i = 0; i < ZPageAgeCount; ++i) {
|
||||
s._npages_candidates += _stats[i].npages_candidates();
|
||||
s._total += _stats[i].total();
|
||||
s._empty += _stats[i].empty();
|
||||
@@ -239,8 +240,8 @@ void ZRelocationSetSelector::select() {
|
||||
ZRelocationSetSelectorStats ZRelocationSetSelector::stats() const {
|
||||
ZRelocationSetSelectorStats stats;
|
||||
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
const uint i = untype(age);
|
||||
stats._small[i] = _small.stats(age);
|
||||
stats._medium[i] = _medium.stats(age);
|
||||
stats._large[i] = _large.stats(age);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -62,9 +62,9 @@ class ZRelocationSetSelectorStats {
|
||||
friend class ZRelocationSetSelector;
|
||||
|
||||
private:
|
||||
ZRelocationSetSelectorGroupStats _small[ZPageAgeMax + 1];
|
||||
ZRelocationSetSelectorGroupStats _medium[ZPageAgeMax + 1];
|
||||
ZRelocationSetSelectorGroupStats _large[ZPageAgeMax + 1];
|
||||
ZRelocationSetSelectorGroupStats _small[ZPageAgeCount];
|
||||
ZRelocationSetSelectorGroupStats _medium[ZPageAgeCount];
|
||||
ZRelocationSetSelectorGroupStats _large[ZPageAgeCount];
|
||||
|
||||
size_t _has_relocatable_pages;
|
||||
|
||||
@@ -90,7 +90,7 @@ private:
|
||||
ZArray<ZPage*> _live_pages;
|
||||
ZArray<ZPage*> _not_selected_pages;
|
||||
size_t _forwarding_entries;
|
||||
ZRelocationSetSelectorGroupStats _stats[ZPageAgeMax + 1];
|
||||
ZRelocationSetSelectorGroupStats _stats[ZPageAgeCount];
|
||||
|
||||
bool is_disabled();
|
||||
bool is_selectable();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
inline size_t ZRelocationSetSelectorGroupStats::npages_candidates() const {
|
||||
@@ -60,15 +61,15 @@ inline bool ZRelocationSetSelectorStats::has_relocatable_pages() const {
|
||||
}
|
||||
|
||||
inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::small(ZPageAge age) const {
|
||||
return _small[static_cast<uint>(age)];
|
||||
return _small[untype(age)];
|
||||
}
|
||||
|
||||
inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::medium(ZPageAge age) const {
|
||||
return _medium[static_cast<uint>(age)];
|
||||
return _medium[untype(age)];
|
||||
}
|
||||
|
||||
inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorStats::large(ZPageAge age) const {
|
||||
return _large[static_cast<uint>(age)];
|
||||
return _large[untype(age)];
|
||||
}
|
||||
|
||||
inline bool ZRelocationSetSelectorGroup::pre_filter_page(const ZPage* page, size_t live_bytes) const {
|
||||
@@ -113,7 +114,7 @@ inline void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) {
|
||||
}
|
||||
|
||||
const size_t size = page->size();
|
||||
const uint age = static_cast<uint>(page->age());
|
||||
const uint age = untype(page->age());
|
||||
_stats[age]._npages_candidates++;
|
||||
_stats[age]._total += size;
|
||||
_stats[age]._live += live;
|
||||
@@ -122,7 +123,7 @@ inline void ZRelocationSetSelectorGroup::register_live_page(ZPage* page) {
|
||||
inline void ZRelocationSetSelectorGroup::register_empty_page(ZPage* page) {
|
||||
const size_t size = page->size();
|
||||
|
||||
const uint age = static_cast<uint>(page->age());
|
||||
const uint age = untype(page->age());
|
||||
_stats[age]._npages_candidates++;
|
||||
_stats[age]._total += size;
|
||||
_stats[age]._empty += size;
|
||||
@@ -141,7 +142,7 @@ inline size_t ZRelocationSetSelectorGroup::forwarding_entries() const {
|
||||
}
|
||||
|
||||
inline const ZRelocationSetSelectorGroupStats& ZRelocationSetSelectorGroup::stats(ZPageAge age) const {
|
||||
return _stats[static_cast<uint>(age)];
|
||||
return _stats[untype(age)];
|
||||
}
|
||||
|
||||
inline void ZRelocationSetSelector::register_live_page(ZPage* page) {
|
||||
@@ -188,8 +189,7 @@ inline void ZRelocationSetSelector::clear_empty_pages() {
|
||||
|
||||
inline size_t ZRelocationSetSelector::total() const {
|
||||
size_t sum = 0;
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
sum += _small.stats(age).total() + _medium.stats(age).total() + _large.stats(age).total();
|
||||
}
|
||||
return sum;
|
||||
@@ -197,8 +197,7 @@ inline size_t ZRelocationSetSelector::total() const {
|
||||
|
||||
inline size_t ZRelocationSetSelector::empty() const {
|
||||
size_t sum = 0;
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
sum += _small.stats(age).empty() + _medium.stats(age).empty() + _large.stats(age).empty();
|
||||
}
|
||||
return sum;
|
||||
@@ -206,8 +205,7 @@ inline size_t ZRelocationSetSelector::empty() const {
|
||||
|
||||
inline size_t ZRelocationSetSelector::relocate() const {
|
||||
size_t sum = 0;
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
sum += _small.stats(age).relocate() + _medium.stats(age).relocate() + _large.stats(age).relocate();
|
||||
}
|
||||
return sum;
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "gc/z/zGeneration.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zNMethodTable.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
#include "gc/z/zPageAllocator.inline.hpp"
|
||||
#include "gc/z/zRelocationSetSelector.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
@@ -1499,9 +1500,7 @@ void ZStatRelocation::print_page_summary() {
|
||||
summary.relocate += stats.relocate();
|
||||
};
|
||||
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
account_page_size(small_summary, _selector_stats.small(age));
|
||||
account_page_size(medium_summary, _selector_stats.medium(age));
|
||||
account_page_size(large_summary, _selector_stats.large(age));
|
||||
@@ -1557,13 +1556,13 @@ void ZStatRelocation::print_age_table() {
|
||||
.center("Large")
|
||||
.end());
|
||||
|
||||
size_t live[ZPageAgeMax + 1] = {};
|
||||
size_t total[ZPageAgeMax + 1] = {};
|
||||
size_t live[ZPageAgeCount] = {};
|
||||
size_t total[ZPageAgeCount] = {};
|
||||
|
||||
uint oldest_none_empty_age = 0;
|
||||
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
uint i = untype(age);
|
||||
auto summarize_pages = [&](const ZRelocationSetSelectorGroupStats& stats) {
|
||||
live[i] += stats.live();
|
||||
total[i] += stats.total();
|
||||
@@ -1579,7 +1578,7 @@ void ZStatRelocation::print_age_table() {
|
||||
}
|
||||
|
||||
for (uint i = 0; i <= oldest_none_empty_age; ++i) {
|
||||
ZPageAge age = static_cast<ZPageAge>(i);
|
||||
ZPageAge age = to_zpageage(i);
|
||||
|
||||
FormatBuffer<> age_str("");
|
||||
if (age == ZPageAge::eden) {
|
||||
@@ -1791,8 +1790,7 @@ void ZStatHeap::at_select_relocation_set(const ZRelocationSetSelectorStats& stat
|
||||
ZLocker<ZLock> locker(&_stat_lock);
|
||||
|
||||
size_t live = 0;
|
||||
for (uint i = 0; i <= ZPageAgeMax; ++i) {
|
||||
const ZPageAge age = static_cast<ZPageAge>(i);
|
||||
for (ZPageAge age : ZPageAgeRange()) {
|
||||
live += stats.small(age).live() + stats.medium(age).live() + stats.large(age).live();
|
||||
}
|
||||
_at_mark_end.live = live;
|
||||
|
||||
@@ -113,7 +113,7 @@
|
||||
\
|
||||
product(int, ZTenuringThreshold, -1, DIAGNOSTIC, \
|
||||
"Young generation tenuring threshold, -1 for dynamic computation")\
|
||||
range(-1, static_cast<int>(ZPageAgeMax)) \
|
||||
range(-1, static_cast<int>(ZPageAgeCount) - 1) \
|
||||
\
|
||||
develop(bool, ZVerifyOops, false, \
|
||||
"Verify accessed oops") \
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compilerEvent.hpp"
|
||||
@@ -1206,7 +1207,7 @@ C2V_VMENTRY_0(jint, installCode0, (JNIEnv *env, jobject,
|
||||
assert(JVMCIENV->isa_HotSpotNmethod(installed_code_handle), "wrong type");
|
||||
// Clear the link to an old nmethod first
|
||||
JVMCIObject nmethod_mirror = installed_code_handle;
|
||||
JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, true, JVMCI_CHECK_0);
|
||||
JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, true, nmethod::ChangeReason::JVMCI_replacing_with_new_code, JVMCI_CHECK_0);
|
||||
} else {
|
||||
assert(JVMCIENV->isa_InstalledCode(installed_code_handle), "wrong type");
|
||||
}
|
||||
@@ -1382,7 +1383,7 @@ C2V_VMENTRY(void, reprofile, (JNIEnv* env, jobject, ARGUMENT_PAIR(method)))
|
||||
|
||||
nmethod* code = method->code();
|
||||
if (code != nullptr) {
|
||||
code->make_not_entrant("JVMCI reprofile");
|
||||
code->make_not_entrant(nmethod::ChangeReason::JVMCI_reprofile);
|
||||
}
|
||||
|
||||
MethodData* method_data = method->method_data();
|
||||
@@ -1397,7 +1398,7 @@ C2V_END
|
||||
|
||||
C2V_VMENTRY(void, invalidateHotSpotNmethod, (JNIEnv* env, jobject, jobject hs_nmethod, jboolean deoptimize))
|
||||
JVMCIObject nmethod_mirror = JVMCIENV->wrap(hs_nmethod);
|
||||
JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, deoptimize, JVMCI_CHECK);
|
||||
JVMCIENV->invalidate_nmethod_mirror(nmethod_mirror, deoptimize, nmethod::ChangeReason::JVMCI_invalidate_nmethod, JVMCI_CHECK);
|
||||
C2V_END
|
||||
|
||||
C2V_VMENTRY_NULL(jlongArray, collectCounters, (JNIEnv* env, jobject))
|
||||
@@ -1822,7 +1823,7 @@ C2V_VMENTRY(void, materializeVirtualObjects, (JNIEnv* env, jobject, jobject _hs_
|
||||
if (!fst.current()->is_compiled_frame()) {
|
||||
JVMCI_THROW_MSG(IllegalStateException, "compiled stack frame expected");
|
||||
}
|
||||
fst.current()->cb()->as_nmethod()->make_not_entrant("JVMCI materialize virtual objects");
|
||||
fst.current()->cb()->as_nmethod()->make_not_entrant(nmethod::ChangeReason::JVMCI_materialize_virtual_object);
|
||||
}
|
||||
Deoptimization::deoptimize(thread, *fst.current(), Deoptimization::Reason_none);
|
||||
// look for the frame again as it has been updated by deopt (pc, deopt state...)
|
||||
|
||||
@@ -1463,8 +1463,7 @@ JVMCIPrimitiveArray JVMCIEnv::new_byteArray(int length, JVMCI_TRAPS) {
|
||||
JVMCIObjectArray JVMCIEnv::new_byte_array_array(int length, JVMCI_TRAPS) {
|
||||
JavaThread* THREAD = JavaThread::current(); // For exception macros.
|
||||
if (is_hotspot()) {
|
||||
Klass* byteArrayArrayKlass = TypeArrayKlass::cast(Universe::byteArrayKlass())->array_klass(CHECK_(JVMCIObject()));
|
||||
objArrayOop result = ObjArrayKlass::cast(byteArrayArrayKlass) ->allocate(length, CHECK_(JVMCIObject()));
|
||||
objArrayOop result = oopFactory::new_objArray(Universe::byteArrayKlass(), length, CHECK_(JVMCIObject()));
|
||||
return wrap(result);
|
||||
} else {
|
||||
JNIAccessMark jni(this, THREAD);
|
||||
@@ -1750,7 +1749,7 @@ void JVMCIEnv::initialize_installed_code(JVMCIObject installed_code, CodeBlob* c
|
||||
}
|
||||
|
||||
|
||||
void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JVMCI_TRAPS) {
|
||||
void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, nmethod::ChangeReason change_reason, JVMCI_TRAPS) {
|
||||
if (mirror.is_null()) {
|
||||
JVMCI_THROW(NullPointerException);
|
||||
}
|
||||
@@ -1773,7 +1772,7 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JV
|
||||
|
||||
if (!deoptimize) {
|
||||
// Prevent future executions of the nmethod but let current executions complete.
|
||||
nm->make_not_entrant("JVMCI invalidate nmethod mirror");
|
||||
nm->make_not_entrant(change_reason);
|
||||
|
||||
// Do not clear the address field here as the Java code may still
|
||||
// want to later call this method with deoptimize == true. That requires
|
||||
@@ -1782,7 +1781,7 @@ void JVMCIEnv::invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimize, JV
|
||||
// Deoptimize the nmethod immediately.
|
||||
DeoptimizationScope deopt_scope;
|
||||
deopt_scope.mark(nm);
|
||||
nm->make_not_entrant("JVMCI invalidate nmethod mirror");
|
||||
nm->make_not_entrant(change_reason);
|
||||
nm->make_deoptimized();
|
||||
deopt_scope.deoptimize_marked();
|
||||
|
||||
|
||||
@@ -462,7 +462,7 @@ public:
|
||||
// field of `mirror` to prevent it from being called.
|
||||
// If `deoptimize` is true, the nmethod is immediately deoptimized.
|
||||
// The HotSpotNmethod.address field is zero upon returning.
|
||||
void invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimze, JVMCI_TRAPS);
|
||||
void invalidate_nmethod_mirror(JVMCIObject mirror, bool deoptimze, nmethod::ChangeReason change_reason, JVMCI_TRAPS);
|
||||
|
||||
void initialize_installed_code(JVMCIObject installed_code, CodeBlob* cb, JVMCI_TRAPS);
|
||||
|
||||
|
||||
@@ -2184,7 +2184,7 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
|
||||
tty->print_cr("Replacing method %s", method_name);
|
||||
}
|
||||
if (old != nullptr) {
|
||||
old->make_not_entrant("JVMCI register method");
|
||||
old->make_not_entrant(nmethod::ChangeReason::JVMCI_register_method);
|
||||
}
|
||||
|
||||
LogTarget(Info, nmethod, install) lt;
|
||||
|
||||
@@ -189,14 +189,6 @@ static size_t align_to_page_size(size_t size) {
|
||||
}
|
||||
|
||||
|
||||
void CodeHeap::on_code_mapping(char* base, size_t size) {
|
||||
#ifdef LINUX
|
||||
extern void linux_wrap_code(char* base, size_t size);
|
||||
linux_wrap_code(base, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) {
|
||||
assert(rs.size() >= committed_size, "reserved < committed");
|
||||
assert(is_aligned(committed_size, rs.page_size()), "must be page aligned");
|
||||
@@ -213,7 +205,6 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s
|
||||
return false;
|
||||
}
|
||||
|
||||
on_code_mapping(_memory.low(), _memory.committed_size());
|
||||
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
||||
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
|
||||
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
|
||||
@@ -250,7 +241,6 @@ bool CodeHeap::expand_by(size_t size) {
|
||||
}
|
||||
char* base = _memory.low() + _memory.committed_size();
|
||||
if (!_memory.expand_by(dm)) return false;
|
||||
on_code_mapping(base, dm);
|
||||
size_t i = _number_of_committed_segments;
|
||||
_number_of_committed_segments = size_to_segments(_memory.committed_size());
|
||||
assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -146,9 +146,6 @@ class CodeHeap : public CHeapObj<mtCode> {
|
||||
void* next_used(HeapBlock* b) const;
|
||||
HeapBlock* block_start(void* p) const;
|
||||
|
||||
// to perform additional actions on creation of executable code
|
||||
void on_code_mapping(char* base, size_t size);
|
||||
|
||||
public:
|
||||
CodeHeap(const char* name, const CodeBlobType code_blob_type);
|
||||
|
||||
|
||||
@@ -40,41 +40,41 @@
|
||||
#include "utilities/utf8.hpp"
|
||||
|
||||
typeArrayOop oopFactory::new_boolArray(int length, TRAPS) {
|
||||
return Universe::boolArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::boolArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_charArray(int length, TRAPS) {
|
||||
return Universe::charArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::charArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_floatArray(int length, TRAPS) {
|
||||
return Universe::floatArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::floatArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_doubleArray(int length, TRAPS) {
|
||||
return Universe::doubleArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::doubleArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_byteArray(int length, TRAPS) {
|
||||
return Universe::byteArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::byteArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_shortArray(int length, TRAPS) {
|
||||
return Universe::shortArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::shortArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_intArray(int length, TRAPS) {
|
||||
return Universe::intArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::intArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_longArray(int length, TRAPS) {
|
||||
return Universe::longArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::longArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
// create java.lang.Object[]
|
||||
objArrayOop oopFactory::new_objectArray(int length, TRAPS) {
|
||||
assert(Universe::objectArrayKlass() != nullptr, "Too early?");
|
||||
return Universe::objectArrayKlass()->allocate(length, THREAD);
|
||||
return Universe::objectArrayKlass()->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
typeArrayOop oopFactory::new_charArray(const char* utf8_str, TRAPS) {
|
||||
@@ -88,7 +88,7 @@ typeArrayOop oopFactory::new_charArray(const char* utf8_str, TRAPS) {
|
||||
|
||||
typeArrayOop oopFactory::new_typeArray(BasicType type, int length, TRAPS) {
|
||||
TypeArrayKlass* klass = Universe::typeArrayKlass(type);
|
||||
return klass->allocate(length, THREAD);
|
||||
return klass->allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
// Create a Java array that points to Symbol.
|
||||
|
||||
@@ -3492,7 +3492,7 @@ void InstanceKlass::add_osr_nmethod(nmethod* n) {
|
||||
for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
|
||||
nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
|
||||
if (inv != nullptr && inv->is_in_use()) {
|
||||
inv->make_not_entrant("OSR invalidation of lower levels");
|
||||
inv->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_of_lower_level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1028,7 +1028,7 @@ void Method::set_native_function(address function, bool post_event_flag) {
|
||||
// If so, we have to make it not_entrant.
|
||||
nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("set native function");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::set_native_function);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
ObjArrayKlass* ObjArrayKlass::allocate(ClassLoaderData* loader_data, int n, Klass* k, Symbol* name, TRAPS) {
|
||||
ObjArrayKlass* ObjArrayKlass::allocate_klass(ClassLoaderData* loader_data, int n, Klass* k, Symbol* name, TRAPS) {
|
||||
assert(ObjArrayKlass::header_size() <= InstanceKlass::header_size(),
|
||||
"array klasses must be same size as InstanceKlass");
|
||||
|
||||
@@ -100,7 +100,7 @@ ObjArrayKlass* ObjArrayKlass::allocate_objArray_klass(ClassLoaderData* loader_da
|
||||
}
|
||||
|
||||
// Initialize instance variables
|
||||
ObjArrayKlass* oak = ObjArrayKlass::allocate(loader_data, n, element_klass, name, CHECK_NULL);
|
||||
ObjArrayKlass* oak = ObjArrayKlass::allocate_klass(loader_data, n, element_klass, name, CHECK_NULL);
|
||||
|
||||
ModuleEntry* module = oak->module();
|
||||
assert(module != nullptr, "No module entry for array");
|
||||
@@ -149,7 +149,7 @@ size_t ObjArrayKlass::oop_size(oop obj) const {
|
||||
return objArrayOop(obj)->object_size();
|
||||
}
|
||||
|
||||
objArrayOop ObjArrayKlass::allocate(int length, TRAPS) {
|
||||
objArrayOop ObjArrayKlass::allocate_instance(int length, TRAPS) {
|
||||
check_array_allocation_length(length, arrayOopDesc::max_array_length(T_OBJECT), CHECK_NULL);
|
||||
size_t size = objArrayOopDesc::object_size(length);
|
||||
return (objArrayOop)Universe::heap()->array_allocate(this, size, length,
|
||||
@@ -160,7 +160,7 @@ oop ObjArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
|
||||
int length = *sizes;
|
||||
ArrayKlass* ld_klass = lower_dimension();
|
||||
// If length < 0 allocate will throw an exception.
|
||||
objArrayOop array = allocate(length, CHECK_NULL);
|
||||
objArrayOop array = allocate_instance(length, CHECK_NULL);
|
||||
objArrayHandle h_array (THREAD, array);
|
||||
if (rank > 1) {
|
||||
if (length != 0) {
|
||||
|
||||
@@ -33,8 +33,10 @@ class ClassLoaderData;
|
||||
// ObjArrayKlass is the klass for objArrays
|
||||
|
||||
class ObjArrayKlass : public ArrayKlass {
|
||||
friend class VMStructs;
|
||||
friend class Deoptimization;
|
||||
friend class JVMCIVMStructs;
|
||||
friend class oopFactory;
|
||||
friend class VMStructs;
|
||||
|
||||
public:
|
||||
static const KlassKind Kind = ObjArrayKlassKind;
|
||||
@@ -47,7 +49,9 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
|
||||
// Constructor
|
||||
ObjArrayKlass(int n, Klass* element_klass, Symbol* name);
|
||||
static ObjArrayKlass* allocate(ClassLoaderData* loader_data, int n, Klass* k, Symbol* name, TRAPS);
|
||||
static ObjArrayKlass* allocate_klass(ClassLoaderData* loader_data, int n, Klass* k, Symbol* name, TRAPS);
|
||||
|
||||
objArrayOop allocate_instance(int length, TRAPS);
|
||||
public:
|
||||
// For dummy objects
|
||||
ObjArrayKlass() {}
|
||||
@@ -78,7 +82,6 @@ class ObjArrayKlass : public ArrayKlass {
|
||||
static ObjArrayKlass* allocate_objArray_klass(ClassLoaderData* loader_data,
|
||||
int n, Klass* element_klass, TRAPS);
|
||||
|
||||
objArrayOop allocate(int length, TRAPS);
|
||||
oop multi_allocate(int rank, jint* sizes, TRAPS);
|
||||
|
||||
// Copying
|
||||
|
||||
@@ -50,7 +50,7 @@ TypeArrayKlass* TypeArrayKlass::create_klass(BasicType type,
|
||||
|
||||
ClassLoaderData* null_loader_data = ClassLoaderData::the_null_class_loader_data();
|
||||
|
||||
TypeArrayKlass* ak = TypeArrayKlass::allocate(null_loader_data, type, sym, CHECK_NULL);
|
||||
TypeArrayKlass* ak = TypeArrayKlass::allocate_klass(null_loader_data, type, sym, CHECK_NULL);
|
||||
|
||||
// Call complete_create_array_klass after all instance variables have been initialized.
|
||||
complete_create_array_klass(ak, ak->super(), ModuleEntryTable::javabase_moduleEntry(), CHECK_NULL);
|
||||
@@ -65,7 +65,7 @@ TypeArrayKlass* TypeArrayKlass::create_klass(BasicType type,
|
||||
return ak;
|
||||
}
|
||||
|
||||
TypeArrayKlass* TypeArrayKlass::allocate(ClassLoaderData* loader_data, BasicType type, Symbol* name, TRAPS) {
|
||||
TypeArrayKlass* TypeArrayKlass::allocate_klass(ClassLoaderData* loader_data, BasicType type, Symbol* name, TRAPS) {
|
||||
assert(TypeArrayKlass::header_size() <= InstanceKlass::header_size(),
|
||||
"array klasses must be same size as InstanceKlass");
|
||||
|
||||
@@ -101,7 +101,7 @@ oop TypeArrayKlass::multi_allocate(int rank, jint* last_size, TRAPS) {
|
||||
// For typeArrays this is only called for the last dimension
|
||||
assert(rank == 1, "just checking");
|
||||
int length = *last_size;
|
||||
return allocate(length, THREAD);
|
||||
return allocate_instance(length, THREAD);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -33,6 +33,8 @@ class ClassLoaderData;
|
||||
// It contains the type and size of the elements
|
||||
|
||||
class TypeArrayKlass : public ArrayKlass {
|
||||
friend class Deoptimization;
|
||||
friend class oopFactory;
|
||||
friend class VMStructs;
|
||||
|
||||
public:
|
||||
@@ -43,7 +45,10 @@ class TypeArrayKlass : public ArrayKlass {
|
||||
|
||||
// Constructor
|
||||
TypeArrayKlass(BasicType type, Symbol* name);
|
||||
static TypeArrayKlass* allocate(ClassLoaderData* loader_data, BasicType type, Symbol* name, TRAPS);
|
||||
static TypeArrayKlass* allocate_klass(ClassLoaderData* loader_data, BasicType type, Symbol* name, TRAPS);
|
||||
|
||||
typeArrayOop allocate_common(int length, bool do_zero, TRAPS);
|
||||
typeArrayOop allocate_instance(int length, TRAPS) { return allocate_common(length, true, THREAD); }
|
||||
public:
|
||||
TypeArrayKlass() {} // For dummy objects.
|
||||
|
||||
@@ -66,8 +71,6 @@ class TypeArrayKlass : public ArrayKlass {
|
||||
size_t oop_size(oop obj) const;
|
||||
|
||||
// Allocation
|
||||
typeArrayOop allocate_common(int length, bool do_zero, TRAPS);
|
||||
typeArrayOop allocate(int length, TRAPS) { return allocate_common(length, true, THREAD); }
|
||||
oop multi_allocate(int rank, jint* sizes, TRAPS);
|
||||
|
||||
oop protection_domain() const { return nullptr; }
|
||||
|
||||
@@ -464,6 +464,14 @@ class PhaseCFG : public Phase {
|
||||
Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
|
||||
void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
|
||||
|
||||
// Ensure that n happens at b or above, i.e. at a block that dominates b.
|
||||
// We expect n to be an orphan node without further inputs.
|
||||
void ensure_node_is_at_block_or_above(Node* n, Block* b);
|
||||
|
||||
// Move node n from its current placement into the end of block b.
|
||||
// Move also outgoing Mach projections.
|
||||
void move_node_and_its_projections_to_block(Node* n, Block* b);
|
||||
|
||||
// Detect implicit-null-check opportunities. Basically, find null checks
|
||||
// with suitable memory ops nearby. Use the memory op to do the null check.
|
||||
// I can generate a memory op if there is not one nearby.
|
||||
|
||||
@@ -155,7 +155,6 @@ void IdealGraphPrinter::init(const char* file_name, bool use_multiple_files, boo
|
||||
// in the mach where kill projections have no users but should
|
||||
// appear in the dump.
|
||||
_traverse_outs = true;
|
||||
_should_send_method = true;
|
||||
_output = nullptr;
|
||||
buffer[0] = 0;
|
||||
_depth = 0;
|
||||
@@ -300,13 +299,11 @@ void IdealGraphPrinter::print_inline_tree(InlineTree *tree) {
|
||||
void IdealGraphPrinter::print_inlining() {
|
||||
|
||||
// Print inline tree
|
||||
if (_should_send_method) {
|
||||
InlineTree *inlineTree = C->ilt();
|
||||
if (inlineTree != nullptr) {
|
||||
print_inline_tree(inlineTree);
|
||||
} else {
|
||||
// print this method only
|
||||
}
|
||||
InlineTree *inlineTree = C->ilt();
|
||||
if (inlineTree != nullptr) {
|
||||
print_inline_tree(inlineTree);
|
||||
} else {
|
||||
// print this method only
|
||||
}
|
||||
}
|
||||
|
||||
@@ -382,7 +379,6 @@ void IdealGraphPrinter::begin_method() {
|
||||
|
||||
tail(PROPERTIES_ELEMENT);
|
||||
|
||||
_should_send_method = true;
|
||||
this->_current_method = method;
|
||||
|
||||
_xml->flush();
|
||||
@@ -975,7 +971,7 @@ void IdealGraphPrinter::print_graph(const char* name, const frame* fr) {
|
||||
// Print current ideal graph
|
||||
void IdealGraphPrinter::print(const char* name, Node* node, GrowableArray<const Node*>& visible_nodes, const frame* fr) {
|
||||
|
||||
if (!_current_method || !_should_send_method || node == nullptr) return;
|
||||
if (!_current_method || node == nullptr) return;
|
||||
|
||||
if (name == nullptr) {
|
||||
stringStream graph_name;
|
||||
|
||||
@@ -110,7 +110,6 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
ciMethod *_current_method;
|
||||
int _depth;
|
||||
char buffer[2048];
|
||||
bool _should_send_method;
|
||||
PhaseChaitin* _chaitin;
|
||||
bool _traverse_outs;
|
||||
Compile *C;
|
||||
|
||||
@@ -76,6 +76,36 @@ static bool needs_explicit_null_check_for_read(Node *val) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void PhaseCFG::move_node_and_its_projections_to_block(Node* n, Block* b) {
|
||||
assert(!is_CFG(n), "cannot move CFG node");
|
||||
Block* old = get_block_for_node(n);
|
||||
old->find_remove(n);
|
||||
b->add_inst(n);
|
||||
map_node_to_block(n, b);
|
||||
// Check for Mach projections that also need to be moved.
|
||||
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
|
||||
Node* out = n->fast_out(i);
|
||||
if (!out->is_MachProj()) {
|
||||
continue;
|
||||
}
|
||||
assert(!n->is_MachProj(), "nested projections are not allowed");
|
||||
move_node_and_its_projections_to_block(out, b);
|
||||
}
|
||||
}
|
||||
|
||||
void PhaseCFG::ensure_node_is_at_block_or_above(Node* n, Block* b) {
|
||||
assert(!is_CFG(n), "cannot move CFG node");
|
||||
Block* current = get_block_for_node(n);
|
||||
if (current->dominates(b)) {
|
||||
return; // n is already placed above b, do nothing.
|
||||
}
|
||||
// We only expect nodes without further inputs, like MachTemp or load Base.
|
||||
assert(n->req() == 0 || (n->req() == 1 && n->in(0) == (Node*)C->root()),
|
||||
"need for recursive hoisting not expected");
|
||||
assert(b->dominates(current), "precondition: can only move n to b if b dominates n");
|
||||
move_node_and_its_projections_to_block(n, b);
|
||||
}
|
||||
|
||||
//------------------------------implicit_null_check----------------------------
|
||||
// Detect implicit-null-check opportunities. Basically, find null checks
|
||||
// with suitable memory ops nearby. Use the memory op to do the null check.
|
||||
@@ -160,12 +190,14 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
Node *m = val->out(i);
|
||||
if( !m->is_Mach() ) continue;
|
||||
MachNode *mach = m->as_Mach();
|
||||
if (mach->barrier_data() != 0) {
|
||||
if (mach->barrier_data() != 0 &&
|
||||
!mach->is_late_expanded_null_check_candidate()) {
|
||||
// Using memory accesses with barriers to perform implicit null checks is
|
||||
// not supported. These operations might expand into multiple assembly
|
||||
// instructions during code emission, including new memory accesses (e.g.
|
||||
// in G1's pre-barrier), which would invalidate the implicit null
|
||||
// exception table.
|
||||
// only supported if these are explicit marked as emitting a candidate
|
||||
// memory access instruction at their initial address. If not marked as
|
||||
// such, barrier-tagged operations might expand into one or several memory
|
||||
// access instructions located at arbitrary offsets from the initial
|
||||
// address, which would invalidate the implicit null exception table.
|
||||
continue;
|
||||
}
|
||||
was_store = false;
|
||||
@@ -321,6 +353,14 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
// Ignore DecodeN val which could be hoisted to where needed.
|
||||
if( is_decoden ) continue;
|
||||
}
|
||||
if (mach->in(j)->is_MachTemp()) {
|
||||
assert(mach->in(j)->outcnt() == 1, "MachTemp nodes should not be shared");
|
||||
// Ignore MachTemp inputs, they can be safely hoisted with the candidate.
|
||||
// MachTemp nodes have no inputs themselves and are only used to reserve
|
||||
// a scratch register for the implementation of the node (e.g. in
|
||||
// late-expanded GC barriers).
|
||||
continue;
|
||||
}
|
||||
// Block of memory-op input
|
||||
Block *inb = get_block_for_node(mach->in(j));
|
||||
Block *b = block; // Start from nul check
|
||||
@@ -388,38 +428,24 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
// Hoist it up to the end of the test block together with its inputs if they exist.
|
||||
for (uint i = 2; i < val->req(); i++) {
|
||||
// DecodeN has 2 regular inputs + optional MachTemp or load Base inputs.
|
||||
Node *temp = val->in(i);
|
||||
Block *tempb = get_block_for_node(temp);
|
||||
if (!tempb->dominates(block)) {
|
||||
assert(block->dominates(tempb), "sanity check: temp node placement");
|
||||
// We only expect nodes without further inputs, like MachTemp or load Base.
|
||||
assert(temp->req() == 0 || (temp->req() == 1 && temp->in(0) == (Node*)C->root()),
|
||||
"need for recursive hoisting not expected");
|
||||
tempb->find_remove(temp);
|
||||
block->add_inst(temp);
|
||||
map_node_to_block(temp, block);
|
||||
}
|
||||
}
|
||||
valb->find_remove(val);
|
||||
block->add_inst(val);
|
||||
map_node_to_block(val, block);
|
||||
// DecodeN on x86 may kill flags. Check for flag-killing projections
|
||||
// that also need to be hoisted.
|
||||
for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* n = val->fast_out(j);
|
||||
if( n->is_MachProj() ) {
|
||||
get_block_for_node(n)->find_remove(n);
|
||||
block->add_inst(n);
|
||||
map_node_to_block(n, block);
|
||||
}
|
||||
// Inputs of val may already be early enough, but if not move them together with val.
|
||||
ensure_node_is_at_block_or_above(val->in(i), block);
|
||||
}
|
||||
move_node_and_its_projections_to_block(val, block);
|
||||
}
|
||||
}
|
||||
|
||||
// Move any MachTemp inputs to the end of the test block.
|
||||
for (uint i = 0; i < best->req(); i++) {
|
||||
Node* n = best->in(i);
|
||||
if (n == nullptr || !n->is_MachTemp()) {
|
||||
continue;
|
||||
}
|
||||
ensure_node_is_at_block_or_above(n, block);
|
||||
}
|
||||
|
||||
// Hoist the memory candidate up to the end of the test block.
|
||||
Block *old_block = get_block_for_node(best);
|
||||
old_block->find_remove(best);
|
||||
block->add_inst(best);
|
||||
map_node_to_block(best, block);
|
||||
move_node_and_its_projections_to_block(best, block);
|
||||
|
||||
// Move the control dependence if it is pinned to not-null block.
|
||||
// Don't change it in other cases: null or dominating control.
|
||||
@@ -429,17 +455,6 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
|
||||
best->set_req(0, proj->in(0)->in(0));
|
||||
}
|
||||
|
||||
// Check for flag-killing projections that also need to be hoisted
|
||||
// Should be DU safe because no edge updates.
|
||||
for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
|
||||
Node* n = best->fast_out(j);
|
||||
if( n->is_MachProj() ) {
|
||||
get_block_for_node(n)->find_remove(n);
|
||||
block->add_inst(n);
|
||||
map_node_to_block(n, block);
|
||||
}
|
||||
}
|
||||
|
||||
// proj==Op_True --> ne test; proj==Op_False --> eq test.
|
||||
// One of two graph shapes got matched:
|
||||
// (IfTrue (If (Bool NE (CmpP ptr null))))
|
||||
|
||||
@@ -458,7 +458,7 @@ Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Nod
|
||||
return incr;
|
||||
}
|
||||
|
||||
Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi) {
|
||||
Node* PhaseIdealLoop::loop_iv_stride(Node* incr, Node*& xphi) {
|
||||
assert(incr->Opcode() == Op_AddI || incr->Opcode() == Op_AddL, "caller resp.");
|
||||
// Get merge point
|
||||
xphi = incr->in(1);
|
||||
@@ -474,7 +474,7 @@ Node* PhaseIdealLoop::loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xph
|
||||
return stride;
|
||||
}
|
||||
|
||||
PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop) {
|
||||
PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x) {
|
||||
if (!xphi->is_Phi()) {
|
||||
return nullptr; // Too much math on the trip counter
|
||||
}
|
||||
@@ -1481,11 +1481,11 @@ void PhaseIdealLoop::check_counted_loop_shape(IdealLoopTree* loop, Node* x, Basi
|
||||
assert(incr != nullptr && incr->Opcode() == Op_Add(bt), "no incr");
|
||||
|
||||
Node* xphi = nullptr;
|
||||
Node* stride = loop_iv_stride(incr, loop, xphi);
|
||||
Node* stride = loop_iv_stride(incr, xphi);
|
||||
|
||||
assert(stride != nullptr, "no stride");
|
||||
|
||||
PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
|
||||
PhiNode* phi = loop_iv_phi(xphi, phi_incr, x);
|
||||
|
||||
assert(phi != nullptr && phi->in(LoopNode::LoopBackControl) == incr, "No phi");
|
||||
|
||||
@@ -1650,7 +1650,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
assert(incr->Opcode() == Op_Add(iv_bt), "wrong increment code");
|
||||
|
||||
Node* xphi = nullptr;
|
||||
Node* stride = loop_iv_stride(incr, loop, xphi);
|
||||
Node* stride = loop_iv_stride(incr, xphi);
|
||||
|
||||
if (stride == nullptr) {
|
||||
return false;
|
||||
@@ -1664,7 +1664,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
jlong stride_con = stride->get_integer_as_long(iv_bt);
|
||||
assert(stride_con != 0, "missed some peephole opt");
|
||||
|
||||
PhiNode* phi = loop_iv_phi(xphi, phi_incr, x, loop);
|
||||
PhiNode* phi = loop_iv_phi(xphi, phi_incr, x);
|
||||
|
||||
if (phi == nullptr ||
|
||||
(trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
|
||||
|
||||
@@ -1280,8 +1280,8 @@ public:
|
||||
Node* loop_exit_control(Node* x, IdealLoopTree* loop);
|
||||
Node* loop_exit_test(Node* back_control, IdealLoopTree* loop, Node*& incr, Node*& limit, BoolTest::mask& bt, float& cl_prob);
|
||||
Node* loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Node*& phi_incr);
|
||||
Node* loop_iv_stride(Node* incr, IdealLoopTree* loop, Node*& xphi);
|
||||
PhiNode* loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealLoopTree* loop);
|
||||
Node* loop_iv_stride(Node* incr, Node*& xphi);
|
||||
PhiNode* loop_iv_phi(Node* xphi, Node* phi_incr, Node* x);
|
||||
|
||||
bool is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_bt);
|
||||
|
||||
|
||||
@@ -1676,6 +1676,10 @@ bool PhaseIdealLoop::safe_for_if_replacement(const Node* dom) const {
|
||||
// like various versions of induction variable+offset. Clone the
|
||||
// computation per usage to allow it to sink out of the loop.
|
||||
void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
|
||||
bool is_raw_to_oop_cast = n->is_ConstraintCast() &&
|
||||
n->in(1)->bottom_type()->isa_rawptr() &&
|
||||
!n->bottom_type()->isa_rawptr();
|
||||
|
||||
if (has_ctrl(n) &&
|
||||
!n->is_Phi() &&
|
||||
!n->is_Bool() &&
|
||||
@@ -1685,7 +1689,9 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
|
||||
!n->is_OpaqueNotNull() &&
|
||||
!n->is_OpaqueInitializedAssertionPredicate() &&
|
||||
!n->is_OpaqueTemplateAssertionPredicate() &&
|
||||
!n->is_Type()) {
|
||||
!is_raw_to_oop_cast && // don't extend live ranges of raw oops
|
||||
(KillPathsReachableByDeadTypeNode || !n->is_Type())
|
||||
) {
|
||||
Node *n_ctrl = get_ctrl(n);
|
||||
IdealLoopTree *n_loop = get_loop(n_ctrl);
|
||||
|
||||
@@ -4271,13 +4277,13 @@ bool PhaseIdealLoop::duplicate_loop_backedge(IdealLoopTree *loop, Node_List &old
|
||||
}
|
||||
assert(in->Opcode() == Op_AddI, "wrong increment code");
|
||||
Node* xphi = nullptr;
|
||||
Node* stride = loop_iv_stride(in, loop, xphi);
|
||||
Node* stride = loop_iv_stride(in, xphi);
|
||||
|
||||
if (stride == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
PhiNode* phi = loop_iv_phi(xphi, nullptr, head, loop);
|
||||
PhiNode* phi = loop_iv_phi(xphi, nullptr, head);
|
||||
if (phi == nullptr ||
|
||||
(trunc1 == nullptr && phi->in(LoopNode::LoopBackControl) != incr) ||
|
||||
(trunc1 != nullptr && phi->in(LoopNode::LoopBackControl) != trunc1)) {
|
||||
|
||||
@@ -386,6 +386,13 @@ public:
|
||||
|
||||
// Returns true if this node is a check that can be implemented with a trap.
|
||||
virtual bool is_TrapBasedCheckNode() const { return false; }
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
virtual bool is_late_expanded_null_check_candidate() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
void set_removed() { add_flag(Flag_is_removed_by_peephole); }
|
||||
bool get_removed() { return (flags() & Flag_is_removed_by_peephole) != 0; }
|
||||
|
||||
|
||||
@@ -2407,7 +2407,6 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
|
||||
}
|
||||
}
|
||||
// Next, attempt to eliminate allocations
|
||||
_has_locks = false;
|
||||
progress = true;
|
||||
while (progress) {
|
||||
progress = false;
|
||||
@@ -2431,7 +2430,6 @@ void PhaseMacroExpand::eliminate_macro_nodes() {
|
||||
case Node::Class_Lock:
|
||||
case Node::Class_Unlock:
|
||||
assert(!n->as_AbstractLock()->is_eliminated(), "sanity");
|
||||
_has_locks = true;
|
||||
break;
|
||||
case Node::Class_ArrayCopy:
|
||||
break;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -83,9 +83,6 @@ private:
|
||||
// projections extracted from a call node
|
||||
CallProjections _callprojs;
|
||||
|
||||
// Additional data collected during macro expansion
|
||||
bool _has_locks;
|
||||
|
||||
void expand_allocate(AllocateNode *alloc);
|
||||
void expand_allocate_array(AllocateArrayNode *alloc);
|
||||
void expand_allocate_common(AllocateNode* alloc,
|
||||
@@ -199,7 +196,7 @@ private:
|
||||
Node* make_arraycopy_load(ArrayCopyNode* ac, intptr_t offset, Node* ctl, Node* mem, BasicType ft, const Type *ftype, AllocateNode *alloc);
|
||||
|
||||
public:
|
||||
PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn), _has_locks(false) {
|
||||
PhaseMacroExpand(PhaseIterGVN &igvn) : Phase(Macro_Expand), _igvn(igvn) {
|
||||
_igvn.set_delay_transform(true);
|
||||
}
|
||||
|
||||
|
||||
@@ -2015,8 +2015,10 @@ void PhaseOutput::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_s
|
||||
|
||||
// Handle implicit null exception table updates
|
||||
if (n->is_MachNullCheck()) {
|
||||
assert(n->in(1)->as_Mach()->barrier_data() == 0,
|
||||
"Implicit null checks on memory accesses with barriers are not yet supported");
|
||||
MachNode* access = n->in(1)->as_Mach();
|
||||
assert(access->barrier_data() == 0 ||
|
||||
access->is_late_expanded_null_check_candidate(),
|
||||
"Implicit null checks on memory accesses with barriers are only supported on nodes explicitly marked as null-check candidates");
|
||||
uint block_num = block->non_connector_successor(0)->_pre_order;
|
||||
_inc_table.append(inct_starts[inct_cnt++], blk_labels[block_num].loc_pos());
|
||||
continue;
|
||||
|
||||
@@ -318,8 +318,10 @@ bool LibraryCallKit::inline_vector_nary_operation(int n) {
|
||||
const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr();
|
||||
const TypeInt* vlen = gvn().type(argument(4))->isa_int();
|
||||
|
||||
if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr ||
|
||||
!opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (opr == nullptr || !opr->is_con() ||
|
||||
vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con()) {
|
||||
log_if_needed(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -587,7 +589,11 @@ bool LibraryCallKit::inline_vector_mask_operation() {
|
||||
const TypeInt* vlen = gvn().type(argument(3))->isa_int();
|
||||
Node* mask = argument(4);
|
||||
|
||||
if (mask_klass == nullptr || elem_klass == nullptr || mask->is_top() || vlen == nullptr) {
|
||||
if (mask_klass == nullptr || mask_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ||
|
||||
oper == nullptr || !oper->is_con() ||
|
||||
mask->is_top()) {
|
||||
return false; // dead code
|
||||
}
|
||||
|
||||
@@ -647,9 +653,11 @@ bool LibraryCallKit::inline_vector_frombits_coerced() {
|
||||
// MODE_BITS_COERCED_LONG_TO_MASK for VectorMask.fromLong operation.
|
||||
const TypeInt* mode = gvn().type(argument(5))->isa_int();
|
||||
|
||||
if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || mode == nullptr ||
|
||||
bits_type == nullptr || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
!vlen->is_con() || !mode->is_con()) {
|
||||
if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ||
|
||||
bits_type == nullptr ||
|
||||
mode == nullptr || !mode->is_con()) {
|
||||
log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s bitwise=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -775,8 +783,10 @@ bool LibraryCallKit::inline_vector_mem_operation(bool is_store) {
|
||||
const TypeInt* vlen = gvn().type(argument(2))->isa_int();
|
||||
const TypeInt* from_ms = gvn().type(argument(6))->isa_int();
|
||||
|
||||
if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || !from_ms->is_con() ||
|
||||
vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ||
|
||||
from_ms == nullptr || !from_ms->is_con()) {
|
||||
log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s from_ms=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -983,9 +993,11 @@ bool LibraryCallKit::inline_vector_mem_masked_operation(bool is_store) {
|
||||
const TypeInt* vlen = gvn().type(argument(3))->isa_int();
|
||||
const TypeInt* from_ms = gvn().type(argument(7))->isa_int();
|
||||
|
||||
if (vector_klass == nullptr || mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr ||
|
||||
vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr || from_ms == nullptr ||
|
||||
elem_klass->const_oop() == nullptr || !vlen->is_con() || !from_ms->is_con()) {
|
||||
if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
mask_klass == nullptr || mask_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ||
|
||||
from_ms == nullptr || !from_ms->is_con()) {
|
||||
log_if_needed(" ** missing constant: vclass=%s mclass=%s etype=%s vlen=%s from_ms=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -1222,8 +1234,10 @@ bool LibraryCallKit::inline_vector_gather_scatter(bool is_scatter) {
|
||||
const TypeInt* vlen = gvn().type(argument(3))->isa_int();
|
||||
const TypeInstPtr* vector_idx_klass = gvn().type(argument(4))->isa_instptr();
|
||||
|
||||
if (vector_klass == nullptr || elem_klass == nullptr || vector_idx_klass == nullptr || vlen == nullptr ||
|
||||
vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || vector_idx_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ||
|
||||
vector_idx_klass == nullptr || vector_idx_klass->const_oop() == nullptr) {
|
||||
log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s viclass=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(2)->Opcode()],
|
||||
@@ -1409,8 +1423,10 @@ bool LibraryCallKit::inline_vector_reduction() {
|
||||
const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr();
|
||||
const TypeInt* vlen = gvn().type(argument(4))->isa_int();
|
||||
|
||||
if (opr == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr ||
|
||||
!opr->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (opr == nullptr || !opr->is_con() ||
|
||||
vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con()) {
|
||||
log_if_needed(" ** missing constant: opr=%s vclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -1547,8 +1563,10 @@ bool LibraryCallKit::inline_vector_test() {
|
||||
const TypeInstPtr* elem_klass = gvn().type(argument(2))->isa_instptr();
|
||||
const TypeInt* vlen = gvn().type(argument(3))->isa_int();
|
||||
|
||||
if (cond == nullptr || vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr ||
|
||||
!cond->is_con() || vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (cond == nullptr || !cond->is_con() ||
|
||||
vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con()) {
|
||||
log_if_needed(" ** missing constant: cond=%s vclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -2505,10 +2523,10 @@ bool LibraryCallKit::inline_vector_extract() {
|
||||
const TypeInt* vlen = gvn().type(argument(2))->isa_int();
|
||||
const TypeInt* idx = gvn().type(argument(4))->isa_int();
|
||||
|
||||
if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr || idx == nullptr) {
|
||||
return false; // dead code
|
||||
}
|
||||
if (vector_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ||
|
||||
idx == nullptr || !idx->is_con()) {
|
||||
log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -2811,9 +2829,11 @@ bool LibraryCallKit::inline_vector_compress_expand() {
|
||||
const TypeInstPtr* elem_klass = gvn().type(argument(3))->isa_instptr();
|
||||
const TypeInt* vlen = gvn().type(argument(4))->isa_int();
|
||||
|
||||
if (vector_klass == nullptr || elem_klass == nullptr || mask_klass == nullptr || vlen == nullptr ||
|
||||
vector_klass->const_oop() == nullptr || mask_klass->const_oop() == nullptr ||
|
||||
elem_klass->const_oop() == nullptr || !vlen->is_con() || !opr->is_con()) {
|
||||
if (opr == nullptr || !opr->is_con() ||
|
||||
vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
mask_klass == nullptr || mask_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con()) {
|
||||
log_if_needed(" ** missing constant: opr=%s vclass=%s mclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -2892,9 +2912,9 @@ bool LibraryCallKit::inline_index_vector() {
|
||||
const TypeInstPtr* elem_klass = gvn().type(argument(1))->isa_instptr();
|
||||
const TypeInt* vlen = gvn().type(argument(2))->isa_int();
|
||||
|
||||
if (vector_klass == nullptr || elem_klass == nullptr || vlen == nullptr ||
|
||||
vector_klass->const_oop() == nullptr || !vlen->is_con() ||
|
||||
elem_klass->const_oop() == nullptr) {
|
||||
if (vector_klass == nullptr || vector_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con() ) {
|
||||
log_if_needed(" ** missing constant: vclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
@@ -3026,8 +3046,9 @@ bool LibraryCallKit::inline_index_partially_in_upper_range() {
|
||||
const TypeInstPtr* elem_klass = gvn().type(argument(1))->isa_instptr();
|
||||
const TypeInt* vlen = gvn().type(argument(2))->isa_int();
|
||||
|
||||
if (mask_klass == nullptr || elem_klass == nullptr || vlen == nullptr ||
|
||||
mask_klass->const_oop() == nullptr || elem_klass->const_oop() == nullptr || !vlen->is_con()) {
|
||||
if (mask_klass == nullptr || mask_klass->const_oop() == nullptr ||
|
||||
elem_klass == nullptr || elem_klass->const_oop() == nullptr ||
|
||||
vlen == nullptr || !vlen->is_con()) {
|
||||
log_if_needed(" ** missing constant: mclass=%s etype=%s vlen=%s",
|
||||
NodeClassNames[argument(0)->Opcode()],
|
||||
NodeClassNames[argument(1)->Opcode()],
|
||||
|
||||
@@ -2285,9 +2285,11 @@ JNI_ENTRY(jobjectArray, jni_NewObjectArray(JNIEnv *env, jsize length, jclass ele
|
||||
jobjectArray ret = nullptr;
|
||||
DT_RETURN_MARK(NewObjectArray, jobjectArray, (const jobjectArray&)ret);
|
||||
Klass* ek = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(elementClass));
|
||||
Klass* ak = ek->array_klass(CHECK_NULL);
|
||||
ObjArrayKlass::cast(ak)->initialize(CHECK_NULL);
|
||||
objArrayOop result = ObjArrayKlass::cast(ak)->allocate(length, CHECK_NULL);
|
||||
|
||||
// Make sure bottom_klass is initialized.
|
||||
ek->initialize(CHECK_NULL);
|
||||
objArrayOop result = oopFactory::new_objArray(ek, length, CHECK_NULL);
|
||||
|
||||
oop initial_value = JNIHandles::resolve(initialElement);
|
||||
if (initial_value != nullptr) { // array already initialized with null
|
||||
for (int index = 0; index < length; index++) {
|
||||
|
||||
@@ -12862,16 +12862,17 @@ myInit() {
|
||||
parameters uniquely identify the current location
|
||||
(where the exception was detected) and allow
|
||||
the mapping to source file and line number when that information is
|
||||
available. The <code>exception</code> field identifies the thrown
|
||||
available. The <code>exception</code> parameter identifies the thrown
|
||||
exception object. The <code>catch_method</code>
|
||||
and <code>catch_location</code> identify the location of the catch clause,
|
||||
if any, that handles the thrown exception. If there is no such catch clause,
|
||||
each field is set to 0. There is no guarantee that the thread will ever
|
||||
the <code>catch_method</code> is set to null and the <code>catch_location</code>is set to 0.
|
||||
There is no guarantee that the thread will ever
|
||||
reach this catch clause. If there are native methods on the call stack
|
||||
between the throw location and the catch clause, the exception may
|
||||
be reset by one of those native methods.
|
||||
Similarly, exceptions that are reported as uncaught (<code>catch_klass</code>
|
||||
et al. set to 0) may in fact be caught by native code.
|
||||
Similarly, exceptions that are reported as uncaught (<code>catch_method</code>
|
||||
set to null) may in fact be caught by native code.
|
||||
Agents can check for these occurrences by monitoring
|
||||
<eventlink id="ExceptionCatch"></eventlink> events.
|
||||
Note that finally clauses are implemented as catch and re-throw. Therefore they
|
||||
@@ -12960,7 +12961,7 @@ myInit() {
|
||||
available. For exceptions caught in a Java programming language method, the
|
||||
<code>exception</code> object identifies the exception object. Exceptions
|
||||
caught in native methods are not necessarily available by the time the
|
||||
exception catch is reported, so the <code>exception</code> field is set
|
||||
exception catch is reported, so the <code>exception</code> parameter is set
|
||||
to null.
|
||||
</description>
|
||||
<origin>jvmdi</origin>
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "code/location.hpp"
|
||||
#include "jni.h"
|
||||
#include "jvm.h"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/typeArrayOop.inline.hpp"
|
||||
#include "prims/vectorSupport.hpp"
|
||||
@@ -109,9 +110,7 @@ Handle VectorSupport::allocate_vector_payload_helper(InstanceKlass* ik, frame* f
|
||||
int elem_size = type2aelembytes(elem_bt);
|
||||
|
||||
// On-heap vector values are represented as primitive arrays.
|
||||
TypeArrayKlass* tak = Universe::typeArrayKlass(elem_bt);
|
||||
|
||||
typeArrayOop arr = tak->allocate(num_elem, CHECK_NH); // safepoint
|
||||
typeArrayOop arr = oopFactory::new_typeArray(elem_bt, num_elem, CHECK_NH); // safepoint
|
||||
|
||||
if (location.is_register()) {
|
||||
// Value was in a callee-saved register.
|
||||
|
||||
@@ -583,28 +583,6 @@ WB_ENTRY(jboolean, WB_G1HasRegionsToUncommit(JNIEnv* env, jobject o))
|
||||
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_G1HasRegionsToUncommit: G1 GC is not enabled");
|
||||
WB_END
|
||||
|
||||
#endif // INCLUDE_G1GC
|
||||
|
||||
#if INCLUDE_PARALLELGC
|
||||
|
||||
WB_ENTRY(jlong, WB_PSVirtualSpaceAlignment(JNIEnv* env, jobject o))
|
||||
if (UseParallelGC) {
|
||||
return GenAlignment;
|
||||
}
|
||||
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSVirtualSpaceAlignment: Parallel GC is not enabled");
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_PSHeapGenerationAlignment(JNIEnv* env, jobject o))
|
||||
if (UseParallelGC) {
|
||||
return GenAlignment;
|
||||
}
|
||||
THROW_MSG_0(vmSymbols::java_lang_UnsupportedOperationException(), "WB_PSHeapGenerationAlignment: Parallel GC is not enabled");
|
||||
WB_END
|
||||
|
||||
#endif // INCLUDE_PARALLELGC
|
||||
|
||||
#if INCLUDE_G1GC
|
||||
|
||||
WB_ENTRY(jobject, WB_G1AuxiliaryMemoryUsage(JNIEnv* env))
|
||||
if (UseG1GC) {
|
||||
ResourceMark rm(THREAD);
|
||||
@@ -794,7 +772,7 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
|
||||
if (_make_not_entrant) {
|
||||
nmethod* nm = CodeCache::find_nmethod(f->pc());
|
||||
assert(nm != nullptr, "did not find nmethod");
|
||||
nm->make_not_entrant("Whitebox deoptimization");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::whitebox_deoptimization);
|
||||
}
|
||||
++_result;
|
||||
}
|
||||
@@ -1097,6 +1075,22 @@ bool WhiteBox::validate_cgroup(bool cgroups_v2_enabled,
|
||||
}
|
||||
#endif
|
||||
|
||||
bool WhiteBox::is_asan_enabled() {
|
||||
#ifdef ADDRESS_SANITIZER
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool WhiteBox::is_ubsan_enabled() {
|
||||
#ifdef UNDEFINED_BEHAVIOR_SANITIZER
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool WhiteBox::compile_method(Method* method, int comp_level, int bci, JavaThread* THREAD) {
|
||||
// Screen for unavailable/bad comp level or null method
|
||||
AbstractCompiler* comp = CompileBroker::compiler(comp_level);
|
||||
@@ -1908,6 +1902,14 @@ WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
|
||||
return (jboolean) obj_oop->mark().has_monitor();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsAsanEnabled(JNIEnv* env))
|
||||
return (jboolean) WhiteBox::is_asan_enabled();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsUbsanEnabled(JNIEnv* env))
|
||||
return (jboolean) WhiteBox::is_ubsan_enabled();
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_getInUseMonitorCount(JNIEnv* env, jobject wb))
|
||||
return (jlong) WhiteBox::get_in_use_monitor_count();
|
||||
WB_END
|
||||
@@ -2773,10 +2775,6 @@ static JNINativeMethod methods[] = {
|
||||
{CC"g1MemoryNodeIds", CC"()[I", (void*)&WB_G1MemoryNodeIds },
|
||||
{CC"g1GetMixedGCInfo", CC"(I)[J", (void*)&WB_G1GetMixedGCInfo },
|
||||
#endif // INCLUDE_G1GC
|
||||
#if INCLUDE_PARALLELGC
|
||||
{CC"psVirtualSpaceAlignment",CC"()J", (void*)&WB_PSVirtualSpaceAlignment},
|
||||
{CC"psHeapGenerationAlignment",CC"()J", (void*)&WB_PSHeapGenerationAlignment},
|
||||
#endif
|
||||
{CC"NMTMalloc", CC"(J)J", (void*)&WB_NMTMalloc },
|
||||
{CC"NMTMallocWithPseudoStack", CC"(JI)J", (void*)&WB_NMTMallocWithPseudoStack},
|
||||
{CC"NMTMallocWithPseudoStackAndType", CC"(JII)J", (void*)&WB_NMTMallocWithPseudoStackAndType},
|
||||
@@ -2908,6 +2906,8 @@ static JNINativeMethod methods[] = {
|
||||
(void*)&WB_AddModuleExportsToAll },
|
||||
{CC"deflateIdleMonitors", CC"()Z", (void*)&WB_DeflateIdleMonitors },
|
||||
{CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated },
|
||||
{CC"isAsanEnabled", CC"()Z", (void*)&WB_IsAsanEnabled },
|
||||
{CC"isUbsanEnabled", CC"()Z", (void*)&WB_IsUbsanEnabled },
|
||||
{CC"getInUseMonitorCount", CC"()J", (void*)&WB_getInUseMonitorCount },
|
||||
{CC"getLockStackCapacity", CC"()I", (void*)&WB_getLockStackCapacity },
|
||||
{CC"supportsRecursiveLightweightLocking", CC"()Z", (void*)&WB_supportsRecursiveLightweightLocking },
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -72,6 +72,9 @@ class WhiteBox : public AllStatic {
|
||||
#ifdef LINUX
|
||||
static bool validate_cgroup(bool cgroups_v2_enabled, const char* controllers_file, const char* proc_self_cgroup, const char* proc_self_mountinfo, u1* cg_flags);
|
||||
#endif
|
||||
// provide info about enabling of Address Sanitizer / Undefined Behavior Sanitizer
|
||||
static bool is_asan_enabled();
|
||||
static bool is_ubsan_enabled();
|
||||
};
|
||||
|
||||
#endif // SHARE_PRIMS_WHITEBOX_HPP
|
||||
|
||||
@@ -528,10 +528,6 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
{ "DynamicDumpSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
|
||||
{ "RequireSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
|
||||
{ "UseSharedSpaces", JDK_Version::jdk(18), JDK_Version::jdk(19), JDK_Version::undefined() },
|
||||
#ifdef LINUX
|
||||
{ "UseLinuxPosixThreadCPUClocks", JDK_Version::jdk(24), JDK_Version::jdk(25), JDK_Version::jdk(26) },
|
||||
{ "UseOprofile", JDK_Version::jdk(25), JDK_Version::jdk(26), JDK_Version::jdk(27) },
|
||||
#endif
|
||||
{ "LockingMode", JDK_Version::jdk(24), JDK_Version::jdk(26), JDK_Version::jdk(27) },
|
||||
#ifdef _LP64
|
||||
{ "UseCompressedClassPointers", JDK_Version::jdk(25), JDK_Version::jdk(26), JDK_Version::undefined() },
|
||||
@@ -541,7 +537,9 @@ static SpecialFlag const special_jvm_flags[] = {
|
||||
|
||||
// -------------- Obsolete Flags - sorted by expired_in --------------
|
||||
|
||||
{ "PerfDataSamplingInterval", JDK_Version::undefined(), JDK_Version::jdk(25), JDK_Version::jdk(26) },
|
||||
#ifdef LINUX
|
||||
{ "UseOprofile", JDK_Version::jdk(25), JDK_Version::jdk(26), JDK_Version::jdk(27) },
|
||||
#endif
|
||||
{ "MetaspaceReclaimPolicy", JDK_Version::undefined(), JDK_Version::jdk(21), JDK_Version::undefined() },
|
||||
{ "ZGenerational", JDK_Version::jdk(23), JDK_Version::jdk(24), JDK_Version::undefined() },
|
||||
{ "ZMarkStackSpaceLimit", JDK_Version::undefined(), JDK_Version::jdk(25), JDK_Version::undefined() },
|
||||
|
||||
@@ -1274,11 +1274,11 @@ bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap*
|
||||
assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
|
||||
int len = sv->field_size() / type2size[ak->element_type()];
|
||||
InternalOOMEMark iom(THREAD);
|
||||
obj = ak->allocate(len, THREAD);
|
||||
obj = ak->allocate_instance(len, THREAD);
|
||||
} else if (k->is_objArray_klass()) {
|
||||
ObjArrayKlass* ak = ObjArrayKlass::cast(k);
|
||||
InternalOOMEMark iom(THREAD);
|
||||
obj = ak->allocate(sv->field_size(), THREAD);
|
||||
obj = ak->allocate_instance(sv->field_size(), THREAD);
|
||||
}
|
||||
|
||||
if (obj == nullptr) {
|
||||
@@ -1826,7 +1826,7 @@ void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason
|
||||
#if INCLUDE_JVMCI
|
||||
address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
|
||||
// there is no exception handler for this pc => deoptimize
|
||||
nm->make_not_entrant("missing exception handler");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::missing_exception_handler);
|
||||
|
||||
// Use Deoptimization::deoptimize for all of its side-effects:
|
||||
// gathering traps statistics, logging...
|
||||
@@ -2455,7 +2455,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* current, jint tr
|
||||
|
||||
// Recompile
|
||||
if (make_not_entrant) {
|
||||
if (!nm->make_not_entrant("uncommon trap")) {
|
||||
if (!nm->make_not_entrant(nmethod::ChangeReason::uncommon_trap)) {
|
||||
return; // the call did not change nmethod's state
|
||||
}
|
||||
|
||||
|
||||
@@ -465,9 +465,7 @@ HandshakeState::HandshakeState(JavaThread* target) :
|
||||
_queue(),
|
||||
_lock(Monitor::nosafepoint, "HandshakeState_lock"),
|
||||
_active_handshaker(),
|
||||
_async_exceptions_blocked(false),
|
||||
_suspended(false),
|
||||
_async_suspend_handshake(false) {
|
||||
_async_exceptions_blocked(false) {
|
||||
}
|
||||
|
||||
HandshakeState::~HandshakeState() {
|
||||
@@ -699,128 +697,8 @@ HandshakeState::ProcessResult HandshakeState::try_process(HandshakeOperation* ma
|
||||
return op == match_op ? HandshakeState::_succeeded : HandshakeState::_processed;
|
||||
}
|
||||
|
||||
void HandshakeState::do_self_suspend() {
|
||||
assert(Thread::current() == _handshakee, "should call from _handshakee");
|
||||
assert(_lock.owned_by_self(), "Lock must be held");
|
||||
assert(!_handshakee->has_last_Java_frame() || _handshakee->frame_anchor()->walkable(), "should have walkable stack");
|
||||
assert(_handshakee->thread_state() == _thread_blocked, "Caller should have transitioned to _thread_blocked");
|
||||
|
||||
while (is_suspended()) {
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_handshakee));
|
||||
_lock.wait_without_safepoint_check();
|
||||
}
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_handshakee));
|
||||
}
|
||||
|
||||
// This is the closure that prevents a suspended JavaThread from
|
||||
// escaping the suspend request.
|
||||
class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
|
||||
public:
|
||||
ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
|
||||
void do_thread(Thread* thr) {
|
||||
JavaThread* current = JavaThread::cast(thr);
|
||||
assert(current == Thread::current(), "Must be self executed.");
|
||||
JavaThreadState jts = current->thread_state();
|
||||
|
||||
current->set_thread_state(_thread_blocked);
|
||||
current->handshake_state()->do_self_suspend();
|
||||
current->set_thread_state(jts);
|
||||
current->handshake_state()->set_async_suspend_handshake(false);
|
||||
}
|
||||
virtual bool is_suspend() { return true; }
|
||||
};
|
||||
|
||||
bool HandshakeState::suspend_with_handshake(bool register_vthread_SR) {
|
||||
assert(_handshakee->threadObj() != nullptr, "cannot suspend with a null threadObj");
|
||||
if (_handshakee->is_exiting()) {
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_handshakee));
|
||||
return false;
|
||||
}
|
||||
if (has_async_suspend_handshake()) {
|
||||
if (is_suspended()) {
|
||||
// Target is already suspended.
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended", p2i(_handshakee));
|
||||
return false;
|
||||
} else {
|
||||
// Target is going to wake up and leave suspension.
|
||||
// Let's just stop the thread from doing that.
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_handshakee));
|
||||
set_suspended(true, register_vthread_SR);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// no suspend request
|
||||
assert(!is_suspended(), "cannot be suspended without a suspend request");
|
||||
// Thread is safe, so it must execute the request, thus we can count it as suspended
|
||||
// from this point.
|
||||
set_suspended(true, register_vthread_SR);
|
||||
set_async_suspend_handshake(true);
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_handshakee));
|
||||
ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
|
||||
Handshake::execute(ts, _handshakee);
|
||||
return true;
|
||||
}
|
||||
|
||||
// This is the closure that synchronously honors the suspend request.
|
||||
class SuspendThreadHandshake : public HandshakeClosure {
|
||||
bool _register_vthread_SR;
|
||||
bool _did_suspend;
|
||||
public:
|
||||
SuspendThreadHandshake(bool register_vthread_SR) : HandshakeClosure("SuspendThread"),
|
||||
_register_vthread_SR(register_vthread_SR), _did_suspend(false) {}
|
||||
void do_thread(Thread* thr) {
|
||||
JavaThread* target = JavaThread::cast(thr);
|
||||
_did_suspend = target->handshake_state()->suspend_with_handshake(_register_vthread_SR);
|
||||
}
|
||||
bool did_suspend() { return _did_suspend; }
|
||||
};
|
||||
|
||||
bool HandshakeState::suspend(bool register_vthread_SR) {
|
||||
JVMTI_ONLY(assert(!_handshakee->is_in_VTMS_transition(), "no suspend allowed in VTMS transition");)
|
||||
JavaThread* self = JavaThread::current();
|
||||
if (_handshakee == self) {
|
||||
// If target is the current thread we can bypass the handshake machinery
|
||||
// and just suspend directly
|
||||
ThreadBlockInVM tbivm(self);
|
||||
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
|
||||
set_suspended(true, register_vthread_SR);
|
||||
do_self_suspend();
|
||||
return true;
|
||||
} else {
|
||||
SuspendThreadHandshake st(register_vthread_SR);
|
||||
Handshake::execute(&st, _handshakee);
|
||||
return st.did_suspend();
|
||||
}
|
||||
}
|
||||
|
||||
bool HandshakeState::resume(bool register_vthread_SR) {
|
||||
MutexLocker ml(&_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (!is_suspended()) {
|
||||
assert(!_handshakee->is_suspended(), "cannot be suspended without a suspend request");
|
||||
return false;
|
||||
}
|
||||
// Resume the thread.
|
||||
set_suspended(false, register_vthread_SR);
|
||||
_lock.notify();
|
||||
return true;
|
||||
}
|
||||
|
||||
void HandshakeState::set_suspended(bool is_suspend, bool register_vthread_SR) {
|
||||
#if INCLUDE_JVMTI
|
||||
if (register_vthread_SR) {
|
||||
assert(_handshakee->is_vthread_mounted(), "sanity check");
|
||||
if (is_suspend) {
|
||||
JvmtiVTSuspender::register_vthread_suspend(_handshakee->vthread());
|
||||
} else {
|
||||
JvmtiVTSuspender::register_vthread_resume(_handshakee->vthread());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Atomic::store(&_suspended, is_suspend);
|
||||
}
|
||||
|
||||
void HandshakeState::handle_unsafe_access_error() {
|
||||
if (is_suspended()) {
|
||||
if (_handshakee->is_suspended()) {
|
||||
// A suspend handshake was added to the queue after the
|
||||
// unsafe access error. Since the suspender has already
|
||||
// considered this JT as suspended and assumes it won't go
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,8 +35,6 @@
|
||||
class HandshakeOperation;
|
||||
class AsyncHandshakeOperation;
|
||||
class JavaThread;
|
||||
class SuspendThreadHandshake;
|
||||
class ThreadSelfSuspensionHandshake;
|
||||
class UnsafeAccessErrorHandshake;
|
||||
class ThreadsListHandle;
|
||||
|
||||
@@ -88,8 +86,6 @@ class JvmtiRawMonitor;
|
||||
// operation is only done by either VMThread/Handshaker on behalf of the
|
||||
// JavaThread or by the target JavaThread itself.
|
||||
class HandshakeState {
|
||||
friend ThreadSelfSuspensionHandshake;
|
||||
friend SuspendThreadHandshake;
|
||||
friend UnsafeAccessErrorHandshake;
|
||||
friend JavaThread;
|
||||
// This a back reference to the JavaThread,
|
||||
@@ -98,7 +94,7 @@ class HandshakeState {
|
||||
// The queue containing handshake operations to be performed on _handshakee.
|
||||
FilterQueue<HandshakeOperation*> _queue;
|
||||
// Provides mutual exclusion to this state and queue. Also used for
|
||||
// JavaThread suspend/resume operations.
|
||||
// JavaThread suspend/resume operations performed by SuspendResumeManager.
|
||||
Monitor _lock;
|
||||
// Set to the thread executing the handshake operation.
|
||||
Thread* volatile _active_handshaker;
|
||||
@@ -160,31 +156,5 @@ class HandshakeState {
|
||||
bool async_exceptions_blocked() { return _async_exceptions_blocked; }
|
||||
void set_async_exceptions_blocked(bool b) { _async_exceptions_blocked = b; }
|
||||
void handle_unsafe_access_error();
|
||||
|
||||
// Suspend/resume support
|
||||
private:
|
||||
// This flag is true when the thread owning this
|
||||
// HandshakeState (the _handshakee) is suspended.
|
||||
volatile bool _suspended;
|
||||
// This flag is true while there is async handshake (trap)
|
||||
// on queue. Since we do only need one, we can reuse it if
|
||||
// thread gets suspended again (after a resume)
|
||||
// and we have not yet processed it.
|
||||
bool _async_suspend_handshake;
|
||||
|
||||
// Called from the suspend handshake.
|
||||
bool suspend_with_handshake(bool register_vthread_SR);
|
||||
// Called from the async handshake (the trap)
|
||||
// to stop a thread from continuing execution when suspended.
|
||||
void do_self_suspend();
|
||||
|
||||
bool is_suspended() { return Atomic::load(&_suspended); }
|
||||
void set_suspended(bool to, bool register_vthread_SR);
|
||||
bool has_async_suspend_handshake() { return _async_suspend_handshake; }
|
||||
void set_async_suspend_handshake(bool to) { _async_suspend_handshake = to; }
|
||||
|
||||
bool suspend(bool register_vthread_SR);
|
||||
bool resume(bool register_vthread_SR);
|
||||
};
|
||||
|
||||
#endif // SHARE_RUNTIME_HANDSHAKE_HPP
|
||||
|
||||
@@ -498,6 +498,7 @@ JavaThread::JavaThread(MemTag mem_tag) :
|
||||
_pending_interrupted_exception(false),
|
||||
|
||||
_handshake(this),
|
||||
_suspend_resume_manager(this, &_handshake._lock),
|
||||
|
||||
_popframe_preserved_args(nullptr),
|
||||
_popframe_preserved_args_size(0),
|
||||
@@ -1200,13 +1201,13 @@ bool JavaThread::java_suspend(bool register_vthread_SR) {
|
||||
|
||||
guarantee(Thread::is_JavaThread_protected(/* target */ this),
|
||||
"target JavaThread is not protected in calling context.");
|
||||
return this->handshake_state()->suspend(register_vthread_SR);
|
||||
return this->suspend_resume_manager()->suspend(register_vthread_SR);
|
||||
}
|
||||
|
||||
bool JavaThread::java_resume(bool register_vthread_SR) {
|
||||
guarantee(Thread::is_JavaThread_protected_by_TLH(/* target */ this),
|
||||
"missing ThreadsListHandle in calling context.");
|
||||
return this->handshake_state()->resume(register_vthread_SR);
|
||||
return this->suspend_resume_manager()->resume(register_vthread_SR);
|
||||
}
|
||||
|
||||
// Wait for another thread to perform object reallocation and relocking on behalf of
|
||||
@@ -1337,7 +1338,7 @@ void JavaThread::make_zombies() {
|
||||
// it is a Java nmethod
|
||||
nmethod* nm = CodeCache::find_nmethod(fst.current()->pc());
|
||||
assert(nm != nullptr, "did not find nmethod");
|
||||
nm->make_not_entrant("zombie");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::zombie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@
|
||||
#include "runtime/safepointMechanism.hpp"
|
||||
#include "runtime/stackWatermarkSet.hpp"
|
||||
#include "runtime/stackOverflow.hpp"
|
||||
#include "runtime/suspendResumeManager.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "runtime/threadHeapSampler.hpp"
|
||||
#include "runtime/threadIdentifier.hpp"
|
||||
@@ -694,9 +695,13 @@ private:
|
||||
|
||||
// Suspend/resume support for JavaThread
|
||||
// higher-level suspension/resume logic called by the public APIs
|
||||
private:
|
||||
SuspendResumeManager _suspend_resume_manager;
|
||||
public:
|
||||
bool java_suspend(bool register_vthread_SR);
|
||||
bool java_resume(bool register_vthread_SR);
|
||||
bool is_suspended() { return _handshake.is_suspended(); }
|
||||
bool is_suspended() { return _suspend_resume_manager.is_suspended(); }
|
||||
SuspendResumeManager* suspend_resume_manager() { return &_suspend_resume_manager; }
|
||||
|
||||
// Check for async exception in addition to safepoint.
|
||||
static void check_special_condition_for_native_trans(JavaThread *thread);
|
||||
|
||||
@@ -320,9 +320,16 @@ void Reflection::array_set(jvalue* value, arrayOop a, int index, BasicType value
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Conversion
|
||||
static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror) {
|
||||
assert(java_lang_Class::is_primitive(basic_type_mirror),
|
||||
"just checking");
|
||||
return java_lang_Class::primitive_type(basic_type_mirror);
|
||||
}
|
||||
|
||||
static Klass* basic_type_mirror_to_arrayklass(oop basic_type_mirror, TRAPS) {
|
||||
assert(java_lang_Class::is_primitive(basic_type_mirror), "just checking");
|
||||
BasicType type = java_lang_Class::primitive_type(basic_type_mirror);
|
||||
BasicType type = basic_type_mirror_to_basic_type(basic_type_mirror);
|
||||
if (type == T_VOID) {
|
||||
THROW_NULL(vmSymbols::java_lang_IllegalArgumentException());
|
||||
}
|
||||
@@ -339,8 +346,11 @@ arrayOop Reflection::reflect_new_array(oop element_mirror, jint length, TRAPS) {
|
||||
THROW_MSG_NULL(vmSymbols::java_lang_NegativeArraySizeException(), err_msg("%d", length));
|
||||
}
|
||||
if (java_lang_Class::is_primitive(element_mirror)) {
|
||||
Klass* tak = basic_type_mirror_to_arrayklass(element_mirror, CHECK_NULL);
|
||||
return TypeArrayKlass::cast(tak)->allocate(length, THREAD);
|
||||
BasicType type = basic_type_mirror_to_basic_type(element_mirror);
|
||||
if (type == T_VOID) {
|
||||
THROW_NULL(vmSymbols::java_lang_IllegalArgumentException());
|
||||
}
|
||||
return oopFactory::new_typeArray(type, length, CHECK_NULL);
|
||||
} else {
|
||||
Klass* k = java_lang_Class::as_Klass(element_mirror);
|
||||
if (k->is_array_klass() && ArrayKlass::cast(k)->dimension() >= MAX_DIM) {
|
||||
@@ -907,13 +917,6 @@ static methodHandle resolve_interface_call(InstanceKlass* klass,
|
||||
return methodHandle(THREAD, info.selected_method());
|
||||
}
|
||||
|
||||
// Conversion
|
||||
static BasicType basic_type_mirror_to_basic_type(oop basic_type_mirror) {
|
||||
assert(java_lang_Class::is_primitive(basic_type_mirror),
|
||||
"just checking");
|
||||
return java_lang_Class::primitive_type(basic_type_mirror);
|
||||
}
|
||||
|
||||
// Narrowing of basic types. Used to create correct jvalues for
|
||||
// boolean, byte, char and short return return values from interpreter
|
||||
// which are returned as ints. Throws IllegalArgumentException.
|
||||
|
||||
158
src/hotspot/share/runtime/suspendResumeManager.cpp
Normal file
158
src/hotspot/share/runtime/suspendResumeManager.cpp
Normal file
@@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "prims/jvmtiThreadState.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/handshake.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/suspendResumeManager.hpp"
|
||||
|
||||
// This is the closure that prevents a suspended JavaThread from
|
||||
// escaping the suspend request.
|
||||
class ThreadSelfSuspensionHandshake : public AsyncHandshakeClosure {
|
||||
public:
|
||||
ThreadSelfSuspensionHandshake() : AsyncHandshakeClosure("ThreadSelfSuspensionHandshake") {}
|
||||
void do_thread(Thread* thr) {
|
||||
JavaThread* current = JavaThread::cast(thr);
|
||||
assert(current == Thread::current(), "Must be self executed.");
|
||||
JavaThreadState jts = current->thread_state();
|
||||
|
||||
current->set_thread_state(_thread_blocked);
|
||||
current->suspend_resume_manager()->do_owner_suspend();
|
||||
current->set_thread_state(jts);
|
||||
current->suspend_resume_manager()->set_async_suspend_handshake(false);
|
||||
}
|
||||
virtual bool is_suspend() { return true; }
|
||||
};
|
||||
|
||||
// This is the closure that synchronously honors the suspend request.
|
||||
class SuspendThreadHandshake : public HandshakeClosure {
|
||||
bool _register_vthread_SR;
|
||||
bool _did_suspend;
|
||||
public:
|
||||
SuspendThreadHandshake(bool register_vthread_SR) : HandshakeClosure("SuspendThread"),
|
||||
_register_vthread_SR(register_vthread_SR), _did_suspend(false) {
|
||||
}
|
||||
void do_thread(Thread* thr) {
|
||||
JavaThread* target = JavaThread::cast(thr);
|
||||
_did_suspend = target->suspend_resume_manager()->suspend_with_handshake(_register_vthread_SR);
|
||||
}
|
||||
bool did_suspend() { return _did_suspend; }
|
||||
};
|
||||
|
||||
void SuspendResumeManager::set_suspended(bool is_suspend, bool register_vthread_SR) {
|
||||
#if INCLUDE_JVMTI
|
||||
if (register_vthread_SR) {
|
||||
assert(_target->is_vthread_mounted(), "sanity check");
|
||||
if (is_suspend) {
|
||||
JvmtiVTSuspender::register_vthread_suspend(_target->vthread());
|
||||
}
|
||||
else {
|
||||
JvmtiVTSuspender::register_vthread_resume(_target->vthread());
|
||||
}
|
||||
}
|
||||
#endif
|
||||
Atomic::store(&_suspended, is_suspend);
|
||||
}
|
||||
|
||||
bool SuspendResumeManager::suspend(bool register_vthread_SR) {
|
||||
JVMTI_ONLY(assert(!_target->is_in_VTMS_transition(), "no suspend allowed in VTMS transition");)
|
||||
JavaThread* self = JavaThread::current();
|
||||
if (_target == self) {
|
||||
// If target is the current thread we can bypass the handshake machinery
|
||||
// and just suspend directly
|
||||
ThreadBlockInVM tbivm(self);
|
||||
MutexLocker ml(_state_lock, Mutex::_no_safepoint_check_flag);
|
||||
set_suspended(true, register_vthread_SR);
|
||||
do_owner_suspend();
|
||||
return true;
|
||||
} else {
|
||||
SuspendThreadHandshake st(register_vthread_SR);
|
||||
Handshake::execute(&st, _target);
|
||||
return st.did_suspend();
|
||||
}
|
||||
}
|
||||
|
||||
bool SuspendResumeManager::resume(bool register_vthread_SR) {
|
||||
MutexLocker ml(_state_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (!is_suspended()) {
|
||||
assert(!_target->is_suspended(), "cannot be suspended without a suspend request");
|
||||
return false;
|
||||
}
|
||||
// Resume the thread.
|
||||
set_suspended(false, register_vthread_SR);
|
||||
_state_lock->notify();
|
||||
return true;
|
||||
}
|
||||
|
||||
void SuspendResumeManager::do_owner_suspend() {
|
||||
assert(Thread::current() == _target, "should call from _target");
|
||||
assert(_state_lock->owned_by_self(), "Lock must be held");
|
||||
assert(!_target->has_last_Java_frame() || _target->frame_anchor()->walkable(), "should have walkable stack");
|
||||
assert(_target->thread_state() == _thread_blocked, "Caller should have transitioned to _thread_blocked");
|
||||
|
||||
while (is_suspended()) {
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended", p2i(_target));
|
||||
_state_lock->wait_without_safepoint_check();
|
||||
}
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " resumed", p2i(_target));
|
||||
}
|
||||
|
||||
bool SuspendResumeManager::suspend_with_handshake(bool register_vthread_SR) {
|
||||
assert(_target->threadObj() != nullptr, "cannot suspend with a null threadObj");
|
||||
if (_target->is_exiting()) {
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " exiting", p2i(_target));
|
||||
return false;
|
||||
}
|
||||
if (has_async_suspend_handshake()) {
|
||||
if (is_suspended()) {
|
||||
// Target is already suspended.
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " already suspended", p2i(_target));
|
||||
return false;
|
||||
} else {
|
||||
// Target is going to wake up and leave suspension.
|
||||
// Let's just stop the thread from doing that.
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " re-suspended", p2i(_target));
|
||||
set_suspended(true, register_vthread_SR);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// no suspend request
|
||||
assert(!is_suspended(), "cannot be suspended without a suspend request");
|
||||
// Thread is safe, so it must execute the request, thus we can count it as suspended
|
||||
// from this point.
|
||||
set_suspended(true, register_vthread_SR);
|
||||
set_async_suspend_handshake(true);
|
||||
log_trace(thread, suspend)("JavaThread:" INTPTR_FORMAT " suspended, arming ThreadSuspension", p2i(_target));
|
||||
ThreadSelfSuspensionHandshake* ts = new ThreadSelfSuspensionHandshake();
|
||||
Handshake::execute(ts, _target);
|
||||
return true;
|
||||
}
|
||||
|
||||
SuspendResumeManager::SuspendResumeManager(JavaThread* thread, Monitor* state_lock) : _target(thread), _state_lock(state_lock), _suspended(false), _async_suspend_handshake(false) {}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user