mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-10 11:29:39 +01:00
Compare commits
58 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d7aa349820 | ||
|
|
3b32f6a8ec | ||
|
|
8f73357004 | ||
|
|
429158218b | ||
|
|
ef4cbec6fb | ||
|
|
e9216efefc | ||
|
|
e5196fc24d | ||
|
|
c98dffa186 | ||
|
|
7d7fc69355 | ||
|
|
42ab8fcfb9 | ||
|
|
bf7d40d048 | ||
|
|
5ae32c4c86 | ||
|
|
56ce70c5df | ||
|
|
abc76c6b5b | ||
|
|
9586817cea | ||
|
|
38b877e941 | ||
|
|
8f487d26c0 | ||
|
|
500a3a2d0a | ||
|
|
a2f99fd88b | ||
|
|
0582bd290d | ||
|
|
3ff83ec49e | ||
|
|
7c9c8ba363 | ||
|
|
ca7b885873 | ||
|
|
92be7821f5 | ||
|
|
bcf860703d | ||
|
|
d186dacdb7 | ||
|
|
ef45c8154c | ||
|
|
cd9b1bc820 | ||
|
|
fcb68ea22d | ||
|
|
eb256deb80 | ||
|
|
156187accc | ||
|
|
a377773fa7 | ||
|
|
cae1fd3385 | ||
|
|
eb8ee8bdc7 | ||
|
|
2103dc15cb | ||
|
|
1c72b350e4 | ||
|
|
52338c94f6 | ||
|
|
91f12600d2 | ||
|
|
6c616c71ec | ||
|
|
e94ad551c6 | ||
|
|
d735255919 | ||
|
|
d024f58e61 | ||
|
|
026975a1aa | ||
|
|
8adb052b46 | ||
|
|
9658cecde3 | ||
|
|
b2e7cda6a0 | ||
|
|
65fda5c02a | ||
|
|
d1b788005b | ||
|
|
bb2611ad43 | ||
|
|
e918a59b1d | ||
|
|
28acca609b | ||
|
|
029e3bf8f5 | ||
|
|
78158f30ae | ||
|
|
c793de989f | ||
|
|
15178aa298 | ||
|
|
fe3be498b8 | ||
|
|
62fde68708 | ||
|
|
af87035b71 |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -310,7 +310,7 @@ jobs:
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
with:
|
||||
platform: windows-x64
|
||||
msvc-toolset-version: '14.44'
|
||||
msvc-toolset-version: '14.43'
|
||||
msvc-toolset-architecture: 'x86.x64'
|
||||
configure-arguments: ${{ github.event.inputs.configure-arguments }}
|
||||
make-arguments: ${{ github.event.inputs.make-arguments }}
|
||||
@@ -322,7 +322,7 @@ jobs:
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
with:
|
||||
platform: windows-aarch64
|
||||
msvc-toolset-version: '14.44'
|
||||
msvc-toolset-version: '14.43'
|
||||
msvc-toolset-architecture: 'arm64'
|
||||
make-target: 'hotspot'
|
||||
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[general]
|
||||
project=jdk
|
||||
jbs=JDK
|
||||
version=25
|
||||
version=26
|
||||
|
||||
[checks]
|
||||
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright
|
||||
|
||||
4
make/autoconf/configure
vendored
4
make/autoconf/configure
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -366,7 +366,7 @@ EOT
|
||||
|
||||
# Print additional help, e.g. a list of toolchains and JVM features.
|
||||
# This must be done by the autoconf script.
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf )
|
||||
|
||||
cat <<EOT
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
################################################################################
|
||||
|
||||
# Minimum supported versions
|
||||
JTREG_MINIMUM_VERSION=7.5.1
|
||||
JTREG_MINIMUM_VERSION=7.5.2
|
||||
GTEST_MINIMUM_VERSION=1.14.0
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
# Versions and download locations for dependencies used by GitHub Actions (GHA)
|
||||
|
||||
GTEST_VERSION=1.14.0
|
||||
JTREG_VERSION=7.5.1+1
|
||||
JTREG_VERSION=7.5.2+1
|
||||
|
||||
LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz
|
||||
|
||||
@@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
jtreg: {
|
||||
server: "jpg",
|
||||
product: "jtreg",
|
||||
version: "7.5.1",
|
||||
version: "7.5.2",
|
||||
build_number: "1",
|
||||
file: "bundles/jtreg-7.5.1+1.zip",
|
||||
file: "bundles/jtreg-7.5.2+1.zip",
|
||||
environment_name: "JT_HOME",
|
||||
environment_path: input.get("jtreg", "home_path") + "/bin",
|
||||
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
|
||||
@@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
server: "jpg",
|
||||
product: "jcov",
|
||||
version: "3.0",
|
||||
build_number: "1",
|
||||
file: "bundles/jcov-3.0+1.zip",
|
||||
build_number: "3",
|
||||
file: "bundles/jcov-3.0+3.zip",
|
||||
environment_name: "JCOV_HOME",
|
||||
},
|
||||
|
||||
|
||||
@@ -26,17 +26,17 @@
|
||||
# Default version, product, and vendor information to use,
|
||||
# unless overridden by configure
|
||||
|
||||
DEFAULT_VERSION_FEATURE=25
|
||||
DEFAULT_VERSION_FEATURE=26
|
||||
DEFAULT_VERSION_INTERIM=0
|
||||
DEFAULT_VERSION_UPDATE=0
|
||||
DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_EXTRA1=0
|
||||
DEFAULT_VERSION_EXTRA2=0
|
||||
DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2025-09-16
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_DATE=2026-03-17
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=25
|
||||
DEFAULT_PROMOTED_VERSION_PRE=
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25 26"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
|
||||
@@ -46,6 +46,8 @@ CLDR_GEN_DONE := $(GENSRC_DIR)/_cldr-gensrc.marker
|
||||
TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata
|
||||
ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template
|
||||
|
||||
# The `-utf8` option is used even for US English, as some names
|
||||
# may contain non-ASCII characters, such as “Türkiye”.
|
||||
$(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \
|
||||
@@ -61,7 +63,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-basemodule \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-zntempfile $(ZONENAME_TEMPLATE) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -45,7 +45,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-baselocales "en-US" \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-o $(GENSRC_DIR) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -187,22 +187,18 @@ public class HelloWorld {
|
||||
new Run("none", "Hello from Cupertino")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u53F0\u5317\u554F\u5019\u60A8\u0021")
|
||||
new Run("none", "台北問候您!")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0391\u03B8\u03B7\u03BD\u03B1\u03B9\u0020" // Greek
|
||||
+ "\u03B1\u03C3\u03C0\u03B1\u03B6\u03BF\u03BD"
|
||||
+ "\u03C4\u03B1\u03B9\u0020\u03C5\u03BC\u03B1"
|
||||
+ "\u03C2\u0021")
|
||||
new Run("none", "Αθηναι ασπαζονται υμας!") // Greek
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u6771\u4eac\u304b\u3089\u4eca\u65e5\u306f")
|
||||
new Run("none", "東京から今日は")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u05e9\u05dc\u05d5\u05dd \u05de\u05d9\u05e8\u05d5"
|
||||
+ "\u05e9\u05dc\u05d9\u05dd")
|
||||
new Run("none", "שלום מירושלים")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0633\u0644\u0627\u0645")
|
||||
new Run("none", "سلام")
|
||||
}), };
|
||||
}
|
||||
|
||||
@@ -456,13 +456,13 @@ SliderDemo.horizontal=Horizontal
|
||||
SliderDemo.vertical=Vertikal
|
||||
SliderDemo.plain=Einfach
|
||||
SliderDemo.a_plain_slider=Ein einfacher Schieberegler
|
||||
SliderDemo.majorticks=Hauptteilstriche
|
||||
SliderDemo.majorticksdescription=Ein Schieberegler mit Hauptteilstrichen
|
||||
SliderDemo.ticks=Hilfsteilstriche, zum Einrasten und Beschriften
|
||||
SliderDemo.minorticks=Hilfsteilstriche
|
||||
SliderDemo.minorticksdescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, in die der Schieberegler einrastet, wobei einige Teilstriche mit einer sichtbaren Beschriftung versehen sind
|
||||
SliderDemo.majorticks=Grobteilungen
|
||||
SliderDemo.majorticksdescription=Ein Schieberegler mit Grobteilungsmarkierungen
|
||||
SliderDemo.ticks=Feinteilungen, Teilungen zum Einrasten und Labels
|
||||
SliderDemo.minorticks=Feinteilungen
|
||||
SliderDemo.minorticksdescription=Ein Schieberegler mit Grob- und Feinteilungen, mit Teilungen, in die der Schieberegler einrastet, wobei einige Teilungen mit einem sichtbaren Label versehen sind
|
||||
SliderDemo.disabled=Deaktiviert
|
||||
SliderDemo.disableddescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, der nicht aktiviert ist (kann nicht bearbeitet werden)
|
||||
SliderDemo.disableddescription=Ein Schieberegler mit Grob- und Feinteilungen, der nicht aktiviert ist (kann nicht bearbeitet werden)
|
||||
|
||||
### SplitPane Demo ###
|
||||
|
||||
|
||||
@@ -3921,6 +3921,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -292,8 +292,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
} else {
|
||||
assert(is_phantom, "only remaining strength");
|
||||
assert(!is_narrow, "phantom access cannot be narrow");
|
||||
// AOT saved adapters need relocation for this call.
|
||||
__ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
|
||||
}
|
||||
__ blr(lr);
|
||||
__ mov(rscratch1, r0);
|
||||
|
||||
@@ -106,6 +106,13 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks, as long as
|
||||
// legitimize_address() does not require a preceding lea instruction to
|
||||
// materialize the memory operand. The absence of a preceding lea instruction
|
||||
// is guaranteed for immLoffset8 memory operands, because these do not lead to
|
||||
// out-of-range offsets (see definition of immLoffset8). Fortunately,
|
||||
// immLoffset8 memory operands are the most common ones in practice.
|
||||
ins_is_late_expanded_null_check_candidate(opnd_array(1)->opcode() == INDOFFL8);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
@@ -117,7 +124,11 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
// Fix up any out-of-range offsets.
|
||||
assert_different_registers(rscratch2, as_Register($mem$$base));
|
||||
assert_different_registers(rscratch2, $dst$$Register);
|
||||
ref_addr = __ legitimize_address(ref_addr, 8, rscratch2);
|
||||
int size = 8;
|
||||
assert(!this->is_late_expanded_null_check_candidate() ||
|
||||
!MacroAssembler::legitimize_address_requires_lea(ref_addr, size),
|
||||
"an instruction that can be used for implicit null checking should emit the candidate memory access first");
|
||||
ref_addr = __ legitimize_address(ref_addr, size, rscratch2);
|
||||
}
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
|
||||
|
||||
@@ -129,16 +129,21 @@ class MacroAssembler: public Assembler {
|
||||
a.lea(this, r);
|
||||
}
|
||||
|
||||
// Whether materializing the given address for a LDR/STR requires an
|
||||
// additional lea instruction.
|
||||
static bool legitimize_address_requires_lea(const Address &a, int size) {
|
||||
return a.getMode() == Address::base_plus_offset &&
|
||||
!Address::offset_ok_for_immed(a.offset(), exact_log2(size));
|
||||
}
|
||||
|
||||
/* Sometimes we get misaligned loads and stores, usually from Unsafe
|
||||
accesses, and these can exceed the offset range. */
|
||||
Address legitimize_address(const Address &a, int size, Register scratch) {
|
||||
if (a.getMode() == Address::base_plus_offset) {
|
||||
if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
if (legitimize_address_requires_lea(a, size)) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
@@ -8888,8 +8888,13 @@ instruct TailCalljmpInd(IPRegP jump_target, inline_cache_regP method_ptr) %{
|
||||
match(TailCall jump_target method_ptr);
|
||||
|
||||
ins_cost(CALL_COST);
|
||||
format %{ "jump $jump_target \t! $method_ptr holds method" %}
|
||||
format %{ "MOV Rexception_pc, LR\n\t"
|
||||
"jump $jump_target \t! $method_ptr holds method" %}
|
||||
ins_encode %{
|
||||
__ mov(Rexception_pc, LR); // this is used only to call
|
||||
// StubRoutines::forward_exception_entry()
|
||||
// which expects PC of exception in
|
||||
// R5. FIXME?
|
||||
__ jump($jump_target$$Register);
|
||||
%}
|
||||
ins_pipe(tail_call);
|
||||
@@ -8934,10 +8939,8 @@ instruct ForwardExceptionjmp()
|
||||
match(ForwardException);
|
||||
ins_cost(CALL_COST);
|
||||
|
||||
format %{ "MOV Rexception_pc, LR\n\t"
|
||||
"b forward_exception_entry" %}
|
||||
format %{ "b forward_exception_stub" %}
|
||||
ins_encode %{
|
||||
__ mov(Rexception_pc, LR);
|
||||
// OK to trash Rtemp, because Rtemp is used by stub
|
||||
__ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
|
||||
%}
|
||||
|
||||
@@ -141,6 +141,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
|
||||
@@ -160,6 +161,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
|
||||
@@ -3928,10 +3928,8 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
|
||||
Label L_outer_loop, L_inner_loop, L_last;
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
load_const_optimized(t0, VM_Version::_dscr_val | 7);
|
||||
mtdscr(t0);
|
||||
}
|
||||
load_const_optimized(t0, VM_Version::_dscr_val | 7);
|
||||
mtdscr(t0);
|
||||
|
||||
mtvrwz(VCRC, crc); // crc lives in VCRC, now
|
||||
|
||||
@@ -4075,10 +4073,8 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
|
||||
// ********** Main loop end **********
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
load_const_optimized(t0, VM_Version::_dscr_val);
|
||||
mtdscr(t0);
|
||||
}
|
||||
load_const_optimized(t0, VM_Version::_dscr_val);
|
||||
mtdscr(t0);
|
||||
|
||||
// ********** Simple loop for remaining 16 byte blocks **********
|
||||
{
|
||||
|
||||
@@ -4036,6 +4036,10 @@ ins_attrib ins_field_cbuf_insts_offset(-1);
|
||||
ins_attrib ins_field_load_ic_hi_node(0);
|
||||
ins_attrib ins_field_load_ic_node(0);
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct
|
||||
// parsing in the ADLC because operands constitute user defined types
|
||||
|
||||
@@ -952,10 +952,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start_pc = __ pc();
|
||||
Register tmp1 = R6_ARG4;
|
||||
// probably copy stub would have changed value reset it.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp1);
|
||||
}
|
||||
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp1);
|
||||
__ li(R3_RET, 0); // return 0
|
||||
__ blr();
|
||||
return start_pc;
|
||||
@@ -1072,10 +1070,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// If supported set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1095,10 +1092,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_10); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1349,10 +1344,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// If supported set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. It's not aligned 16-byte
|
||||
@@ -1372,11 +1365,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_9); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
} // FasterArrayCopy
|
||||
__ bind(l_6);
|
||||
|
||||
@@ -1537,10 +1527,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1560,10 +1549,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_7); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1684,10 +1672,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1707,10 +1694,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_4);
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ cmpwi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, l_6);
|
||||
@@ -1803,10 +1788,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1826,10 +1810,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_5); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1928,10 +1910,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1951,10 +1932,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_4);
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
__ cmpwi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, l_1);
|
||||
|
||||
@@ -80,9 +80,7 @@ void VM_Version::initialize() {
|
||||
"%zu on this machine", PowerArchitecturePPC64);
|
||||
|
||||
// Power 8: Configure Data Stream Control Register.
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
config_dscr();
|
||||
}
|
||||
config_dscr();
|
||||
|
||||
if (!UseSIGTRAP) {
|
||||
MSG(TrapBasedICMissChecks);
|
||||
@@ -172,8 +170,7 @@ void VM_Version::initialize() {
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64 sha aes%s%s%s",
|
||||
(has_mfdscr() ? " mfdscr" : ""),
|
||||
"ppc64 sha aes%s%s",
|
||||
(has_darn() ? " darn" : ""),
|
||||
(has_brw() ? " brw" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
@@ -491,7 +488,6 @@ void VM_Version::determine_features() {
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// Keep R3_ARG1 unmodified, it contains &field (see below).
|
||||
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
|
||||
a->mfdscr(R0);
|
||||
a->darn(R7);
|
||||
a->brw(R5, R6);
|
||||
a->blr();
|
||||
@@ -528,7 +524,6 @@ void VM_Version::determine_features() {
|
||||
|
||||
// determine which instructions are legal.
|
||||
int feature_cntr = 0;
|
||||
if (code[feature_cntr++]) features |= mfdscr_m;
|
||||
if (code[feature_cntr++]) features |= darn_m;
|
||||
if (code[feature_cntr++]) features |= brw_m;
|
||||
|
||||
|
||||
@@ -32,14 +32,12 @@
|
||||
class VM_Version: public Abstract_VM_Version {
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
mfdscr,
|
||||
darn,
|
||||
brw,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
unknown_m = 0,
|
||||
mfdscr_m = (1 << mfdscr ),
|
||||
darn_m = (1 << darn ),
|
||||
brw_m = (1 << brw ),
|
||||
all_features_m = (unsigned long)-1
|
||||
@@ -69,9 +67,8 @@ public:
|
||||
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
// CPU instruction support
|
||||
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; } // Power8, but may be unavailable (QEMU)
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
|
||||
@@ -2170,13 +2170,15 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
|
||||
cmov_cmp_fp_le(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::ge:
|
||||
cmov_cmp_fp_ge(op1, op2, dst, src, is_single);
|
||||
assert(false, "Should go to BoolTest::le case");
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
case BoolTest::lt:
|
||||
cmov_cmp_fp_lt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::gt:
|
||||
cmov_cmp_fp_gt(op1, op2, dst, src, is_single);
|
||||
assert(false, "Should go to BoolTest::lt case");
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
|
||||
@@ -96,6 +96,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, TEMP tmp, KILL cr);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
|
||||
@@ -1268,19 +1268,12 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
|
||||
}
|
||||
|
||||
// ----------- cmove, compare float -----------
|
||||
//
|
||||
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
|
||||
// so, just list behaviour of unordered ones as follow.
|
||||
//
|
||||
// Set dst (CMoveI (Binary cop (CmpF/D op1 op2)) (Binary dst src))
|
||||
// (If one or both inputs to the compare are NaN, then)
|
||||
// 1. (op1 lt op2) => true => CMove: dst = src
|
||||
// 2. (op1 le op2) => true => CMove: dst = src
|
||||
// 3. (op1 gt op2) => false => CMove: dst = dst
|
||||
// 4. (op1 ge op2) => false => CMove: dst = dst
|
||||
// 5. (op1 eq op2) => false => CMove: dst = dst
|
||||
// 6. (op1 ne op2) => true => CMove: dst = src
|
||||
|
||||
// Move src to dst only if cmp1 == cmp2,
|
||||
// otherwise leave dst unchanged, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 != cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1296,7 +1289,7 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 != cmp2, including the case of NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 == cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 == cmp2
|
||||
float_bne(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bne(cmp1, cmp2, no_set);
|
||||
@@ -1305,6 +1298,11 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Keep dst unchanged only if cmp1 == cmp2,
|
||||
// otherwise move src to dst, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 == cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1320,7 +1318,7 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 == cmp2
|
||||
// fallthrough (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
float_beq(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_beq(cmp1, cmp2, no_set);
|
||||
@@ -1329,6 +1327,14 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 < cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 > cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1344,7 +1350,7 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 > cmp2
|
||||
// fallthrough (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
float_bgt(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bgt(cmp1, cmp2, no_set);
|
||||
@@ -1353,30 +1359,14 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
fle_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
fle_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 >= cmp2
|
||||
float_blt(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_blt(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 <= cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 >= cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1392,7 +1382,7 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 >= cmp2
|
||||
// fallthrough (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
float_bge(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bge(cmp1, cmp2, no_set);
|
||||
@@ -1401,30 +1391,6 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
flt_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
flt_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 > cmp2
|
||||
float_ble(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_ble(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Float compare branch instructions
|
||||
|
||||
#define INSN(NAME, FLOATCMP, BRANCH) \
|
||||
|
||||
@@ -660,9 +660,7 @@ class MacroAssembler: public Assembler {
|
||||
void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
|
||||
public:
|
||||
// We try to follow risc-v asm menomics.
|
||||
|
||||
@@ -2619,6 +2619,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -410,7 +410,7 @@
|
||||
|
||||
// C2I adapter frames:
|
||||
//
|
||||
// STACK (interpreted called from compiled, on entry to frame manager):
|
||||
// STACK (interpreted called from compiled, on entry to template interpreter):
|
||||
//
|
||||
// [TOP_C2I_FRAME]
|
||||
// [JIT_FRAME]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -414,7 +414,7 @@ constexpr FloatRegister Z_FARG2 = Z_F2;
|
||||
constexpr FloatRegister Z_FARG3 = Z_F4;
|
||||
constexpr FloatRegister Z_FARG4 = Z_F6;
|
||||
|
||||
// Register declarations to be used in frame manager assembly code.
|
||||
// Register declarations to be used in template interpreter assembly code.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
|
||||
// Register to cache the integer value on top of the operand stack.
|
||||
@@ -439,7 +439,7 @@ constexpr Register Z_bcp = Z_R13;
|
||||
// Bytecode which is dispatched (short lived!).
|
||||
constexpr Register Z_bytecode = Z_R14;
|
||||
|
||||
// Temporary registers to be used within frame manager. We can use
|
||||
// Temporary registers to be used within template interpreter. We can use
|
||||
// the nonvolatile ones because the call stub has saved them.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
constexpr Register Z_tmp_1 = Z_R10;
|
||||
|
||||
@@ -118,7 +118,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
|
||||
__ z_lgr(Z_SP, saved_sp);
|
||||
|
||||
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
|
||||
// C2I adapter extensions are now removed by a resize in the frame manager
|
||||
// C2I adapter extensions are now removed by a resize in the template interpreter
|
||||
// (unwind_initial_activation_pending_exception).
|
||||
#ifdef ASSERT
|
||||
__ z_ltgr(handle_exception, handle_exception);
|
||||
|
||||
@@ -2139,7 +2139,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
Register value = Z_R12;
|
||||
|
||||
// Remember the senderSP so we can pop the interpreter arguments off of the stack.
|
||||
// In addition, frame manager expects initial_caller_sp in Z_R10.
|
||||
// In addition, template interpreter expects initial_caller_sp in Z_R10.
|
||||
__ z_lgr(sender_SP, Z_SP);
|
||||
|
||||
// This should always fit in 14 bit immediate.
|
||||
|
||||
@@ -115,7 +115,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// [SP+176] - thread : Thread*
|
||||
//
|
||||
address generate_call_stub(address& return_address) {
|
||||
// Set up a new C frame, copy Java arguments, call frame manager
|
||||
// Set up a new C frame, copy Java arguments, call template interpreter
|
||||
// or native_entry, and process result.
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
@@ -272,10 +272,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
BLOCK_COMMENT("call {");
|
||||
{
|
||||
// Call frame manager or native entry.
|
||||
// Call template interpreter or native entry.
|
||||
|
||||
//
|
||||
// Register state on entry to frame manager / native entry:
|
||||
// Register state on entry to template interpreter / native entry:
|
||||
//
|
||||
// Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed)
|
||||
// Lesp = (SP) + copied_arguments_offset - 8
|
||||
@@ -290,7 +290,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ z_lgr(Z_esp, r_top_of_arguments_addr);
|
||||
|
||||
//
|
||||
// Stack on entry to frame manager / native entry:
|
||||
// Stack on entry to template interpreter / native entry:
|
||||
//
|
||||
// F0 [TOP_IJAVA_FRAME_ABI]
|
||||
// [outgoing Java arguments]
|
||||
@@ -300,7 +300,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
|
||||
// Do a light-weight C-call here, r_new_arg_entry holds the address
|
||||
// of the interpreter entry point (frame manager or native entry)
|
||||
// of the interpreter entry point (template interpreter or native entry)
|
||||
// and save runtime-value of return_pc in return_address
|
||||
// (call by reference argument).
|
||||
return_address = __ call_stub(r_new_arg_entry);
|
||||
@@ -309,11 +309,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
{
|
||||
BLOCK_COMMENT("restore registers {");
|
||||
// Returned from frame manager or native entry.
|
||||
// Returned from template interpreter or native entry.
|
||||
// Now pop frame, process result, and return to caller.
|
||||
|
||||
//
|
||||
// Stack on exit from frame manager / native entry:
|
||||
// Stack on exit from template interpreter / native entry:
|
||||
//
|
||||
// F0 [ABI]
|
||||
// ...
|
||||
@@ -330,7 +330,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ pop_frame();
|
||||
|
||||
// Reload some volatile registers which we've spilled before the call
|
||||
// to frame manager / native entry.
|
||||
// to template interpreter / native entry.
|
||||
// Access all locals via frame pointer, because we know nothing about
|
||||
// the topmost frame's size.
|
||||
__ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp);
|
||||
|
||||
@@ -1217,7 +1217,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
// Math function, frame manager must set up an interpreter state, etc.
|
||||
// Math function, template interpreter must set up an interpreter state, etc.
|
||||
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
// Decide what to do: Use same platform specific instructions and runtime calls as compilers.
|
||||
|
||||
@@ -15681,8 +15681,6 @@ void Assembler::pusha_uncached() { // 64bit
|
||||
// Push pair of original stack pointer along with remaining registers
|
||||
// at 16B aligned boundary.
|
||||
push2p(rax, r31);
|
||||
// Restore the original contents of RAX register.
|
||||
movq(rax, Address(rax));
|
||||
push2p(r30, r29);
|
||||
push2p(r28, r27);
|
||||
push2p(r26, r25);
|
||||
|
||||
@@ -4655,7 +4655,6 @@ static void convertF2I_slowpath(C2_MacroAssembler& masm, C2GeneralStub<Register,
|
||||
__ subptr(rsp, 8);
|
||||
__ movdbl(Address(rsp), src);
|
||||
__ call(RuntimeAddress(target));
|
||||
// APX REX2 encoding for pop(dst) increases the stub size by 1 byte.
|
||||
__ pop(dst);
|
||||
__ jmp(stub.continuation());
|
||||
#undef __
|
||||
@@ -4688,9 +4687,7 @@ void C2_MacroAssembler::convertF2I(BasicType dst_bt, BasicType src_bt, Register
|
||||
}
|
||||
}
|
||||
|
||||
// Using the APX extended general purpose registers increases the instruction encoding size by 1 byte.
|
||||
int max_size = 23 + (UseAPX ? 1 : 0);
|
||||
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, max_size, convertF2I_slowpath);
|
||||
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, 23, convertF2I_slowpath);
|
||||
jcc(Assembler::equal, stub->entry());
|
||||
bind(stub->continuation());
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
|
||||
// The rest is saved with the optimized path
|
||||
|
||||
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
|
||||
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4;
|
||||
__ subptr(rsp, num_saved_regs * wordSize);
|
||||
uint slot = num_saved_regs;
|
||||
if (dst != rax) {
|
||||
@@ -367,25 +367,6 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r9);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r10);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r11);
|
||||
// Save APX extended registers r16–r31 if enabled
|
||||
if (UseAPX) {
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r16);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r17);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r18);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r19);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r20);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r21);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r22);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r23);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r24);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r25);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r26);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r27);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r28);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r29);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r30);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r31);
|
||||
}
|
||||
// r12-r15 are callee saved in all calling conventions
|
||||
assert(slot == 0, "must use all slots");
|
||||
|
||||
@@ -417,25 +398,6 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
|
||||
}
|
||||
|
||||
// Restore APX extended registers r31–r16 if previously saved
|
||||
if (UseAPX) {
|
||||
__ movptr(r31, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r30, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r29, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r28, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r27, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r26, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r25, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r24, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r23, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r22, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r21, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r20, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r19, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r18, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r17, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r16, Address(rsp, (slot++) * wordSize));
|
||||
}
|
||||
__ movptr(r11, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r10, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r9, Address(rsp, (slot++) * wordSize));
|
||||
|
||||
@@ -118,6 +118,10 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks. The
|
||||
// barrier's slow path includes an identical reload, which does not need to be
|
||||
// registered in the exception table because it is dominated by the main one.
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(125);
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, PRODUCT_ONLY(20000) NOT_PRODUCT(21000) WINDOWS_ONLY(+1000)) \
|
||||
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
|
||||
do_stub(initial, verify_mxcsr) \
|
||||
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
|
||||
verify_mxcsr_entry) \
|
||||
@@ -239,7 +239,7 @@
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 33000 \
|
||||
do_arch_blob(final, 31000 \
|
||||
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
|
||||
|
||||
#endif // CPU_X86_STUBDECLARATIONS_HPP
|
||||
|
||||
@@ -46,12 +46,6 @@
|
||||
//
|
||||
/******************************************************************************/
|
||||
|
||||
/* Represents 0x7FFFFFFFFFFFFFFF double precision in lower 64 bits*/
|
||||
ATTRIBUTE_ALIGNED(16) static const juint _ABS_MASK[] =
|
||||
{
|
||||
4294967295, 2147483647, 0, 0
|
||||
};
|
||||
|
||||
ATTRIBUTE_ALIGNED(4) static const juint _SIG_MASK[] =
|
||||
{
|
||||
0, 1032192
|
||||
@@ -194,10 +188,10 @@ address StubGenerator::generate_libmCbrt() {
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1;
|
||||
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1;
|
||||
Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1;
|
||||
Label B1_1, B1_2, B1_4;
|
||||
|
||||
address ABS_MASK = (address)_ABS_MASK;
|
||||
address SIG_MASK = (address)_SIG_MASK;
|
||||
address EXP_MASK = (address)_EXP_MASK;
|
||||
address EXP_MSK2 = (address)_EXP_MSK2;
|
||||
@@ -214,12 +208,8 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
__ bind(B1_1);
|
||||
__ ucomisd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if x is +/- zero or NaN
|
||||
__ movq(xmm1, xmm0);
|
||||
__ andpd(xmm1, ExternalAddress(ABS_MASK), r11 /*rscratch*/);
|
||||
__ ucomisd(xmm1, ExternalAddress(INF), r11 /*rscratch*/);
|
||||
__ jcc(Assembler::equal, B1_4); // Branch only if x is +/- INF
|
||||
__ subq(rsp, 24);
|
||||
__ movsd(Address(rsp), xmm0);
|
||||
|
||||
__ bind(B1_2);
|
||||
__ movq(xmm7, xmm0);
|
||||
@@ -238,6 +228,8 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ andl(rdx, rax);
|
||||
__ cmpl(rdx, 0);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_0_0_1); // Branch only if |x| is denormalized
|
||||
__ cmpl(rdx, 524032);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if |x| is INF or NaN
|
||||
__ shrl(rdx, 8);
|
||||
__ shrq(r9, 8);
|
||||
__ andpd(xmm2, xmm0);
|
||||
@@ -305,6 +297,8 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ andl(rdx, rax);
|
||||
__ shrl(rdx, 8);
|
||||
__ shrq(r9, 8);
|
||||
__ cmpl(rdx, 0);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_3_0_1); // Branch only if |x| is zero
|
||||
__ andpd(xmm2, xmm0);
|
||||
__ andpd(xmm0, xmm5);
|
||||
__ orpd(xmm3, xmm2);
|
||||
@@ -328,10 +322,41 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ psllq(xmm7, 52);
|
||||
__ jmp(L_2TAG_PACKET_2_0_1);
|
||||
|
||||
__ bind(L_2TAG_PACKET_3_0_1);
|
||||
__ cmpq(r9, 0);
|
||||
__ jcc(Assembler::notEqual, L_2TAG_PACKET_4_0_1); // Branch only if x is negative zero
|
||||
__ xorpd(xmm0, xmm0);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_4_0_1);
|
||||
__ movsd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_1_0_1);
|
||||
__ movl(rax, Address(rsp, 4));
|
||||
__ movl(rdx, Address(rsp));
|
||||
__ movl(rcx, rax);
|
||||
__ andl(rcx, 2147483647);
|
||||
__ cmpl(rcx, 2146435072);
|
||||
__ jcc(Assembler::above, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
|
||||
__ cmpl(rdx, 0);
|
||||
__ jcc(Assembler::notEqual, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
|
||||
__ cmpl(rax, 2146435072);
|
||||
__ jcc(Assembler::notEqual, L_2TAG_PACKET_6_0_1); // Branch only if x is negative INF
|
||||
__ movsd(xmm0, ExternalAddress(INF), r11 /*rscratch*/);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_6_0_1);
|
||||
__ movsd(xmm0, ExternalAddress(NEG_INF), r11 /*rscratch*/);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_5_0_1);
|
||||
__ movsd(xmm0, Address(rsp));
|
||||
__ addsd(xmm0, xmm0);
|
||||
__ movq(Address(rsp, 8), xmm0);
|
||||
|
||||
__ bind(B1_4);
|
||||
__ addq(rsp, 24);
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
|
||||
@@ -465,13 +465,19 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_tanh) {
|
||||
assert(StubRoutines::dtanh() != nullptr, "not initialized");
|
||||
if (StubRoutines::dtanh() != nullptr) {
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtanh())));
|
||||
} else {
|
||||
return nullptr; // Fallback to default implementation
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_cbrt) {
|
||||
assert(StubRoutines::dcbrt() != nullptr, "not initialized");
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
|
||||
if (StubRoutines::dcbrt() != nullptr) {
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
|
||||
} else {
|
||||
return nullptr; // Fallback to default implementation
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_abs) {
|
||||
assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
|
||||
@@ -440,6 +440,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits apx_f
|
||||
__ jcc(Assembler::equal, vector_save_restore);
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool save_apx = UseAPX;
|
||||
VM_Version::set_apx_cpuFeatures();
|
||||
UseAPX = true;
|
||||
@@ -456,6 +457,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ movq(Address(rsi, 8), r31);
|
||||
|
||||
UseAPX = save_apx;
|
||||
#endif
|
||||
__ bind(vector_save_restore);
|
||||
//
|
||||
// Check if OS has enabled XGETBV instruction to access XCR0
|
||||
@@ -1020,6 +1022,8 @@ void VM_Version::get_processor_features() {
|
||||
if (UseAPX && !apx_supported) {
|
||||
warning("UseAPX is not supported on this CPU, setting it to false");
|
||||
FLAG_SET_DEFAULT(UseAPX, false);
|
||||
} else if (FLAG_IS_DEFAULT(UseAPX)) {
|
||||
FLAG_SET_DEFAULT(UseAPX, apx_supported ? true : false);
|
||||
}
|
||||
|
||||
if (!UseAPX) {
|
||||
@@ -2107,7 +2111,7 @@ bool VM_Version::is_intel_cascade_lake() {
|
||||
// has improved implementation of 64-byte load/stores and so the default
|
||||
// threshold is set to 0 for these platforms.
|
||||
int VM_Version::avx3_threshold() {
|
||||
return (is_intel_server_family() &&
|
||||
return (is_intel_family_core() &&
|
||||
supports_serialize() &&
|
||||
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
|
||||
}
|
||||
@@ -3147,11 +3151,17 @@ bool VM_Version::os_supports_apx_egprs() {
|
||||
if (!supports_apx_f()) {
|
||||
return false;
|
||||
}
|
||||
// Enable APX support for product builds after
|
||||
// completion of planned features listed in JDK-8329030.
|
||||
#if !defined(PRODUCT)
|
||||
if (_cpuid_info.apx_save[0] != egpr_test_value() ||
|
||||
_cpuid_info.apx_save[1] != egpr_test_value()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
uint VM_Version::cores_per_cpu() {
|
||||
|
||||
@@ -2055,6 +2055,10 @@ ins_attrib ins_alignment(1); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -1261,69 +1261,6 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do beyond of what os::print_cpu_info() does.
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so.
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
Dl_info dlinfo;
|
||||
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
char* rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
|
||||
assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
|
||||
saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
|
||||
@@ -154,7 +154,8 @@ julong os::Bsd::available_memory() {
|
||||
assert(kerr == KERN_SUCCESS,
|
||||
"host_statistics64 failed - check mach_host_self() and count");
|
||||
if (kerr == KERN_SUCCESS) {
|
||||
available = vmstat.free_count * os::vm_page_size();
|
||||
// free_count is just a lowerbound, other page categories can be freed too and make memory available
|
||||
available = (vmstat.free_count + vmstat.inactive_count + vmstat.purgeable_count) * os::vm_page_size();
|
||||
}
|
||||
#endif
|
||||
return available;
|
||||
@@ -1482,83 +1483,6 @@ void os::print_memory_info(outputStream* st) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
char *rp = nullptr;
|
||||
if (ret && dli_fname[0] != '\0') {
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm"
|
||||
const char* p = strrchr(buf, '/');
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer space");
|
||||
// Add the appropriate library and JVM variant subdirs
|
||||
ss.print("%s/lib/%s", buf, Abstract_VM_Version::vm_variant());
|
||||
|
||||
if (0 != access(buf, F_OK)) {
|
||||
ss.reset();
|
||||
ss.print("%s/lib", buf);
|
||||
}
|
||||
|
||||
// If the path exists within JAVA_HOME, add the JVM library name
|
||||
// to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm"
|
||||
ss.print("/libjvm%s", JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Fall back to path of current library
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,9 +35,6 @@
|
||||
range, \
|
||||
constraint) \
|
||||
\
|
||||
product(bool, UseOprofile, false, \
|
||||
"(Deprecated) enable support for Oprofile profiler") \
|
||||
\
|
||||
product(bool, UseTransparentHugePages, false, \
|
||||
"Use MADV_HUGEPAGE for large pages") \
|
||||
\
|
||||
|
||||
@@ -2746,118 +2746,9 @@ void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
char *rp = nullptr;
|
||||
if (ret && dli_fname[0] != '\0') {
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
// Rationale behind this function:
|
||||
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
|
||||
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
|
||||
// samples for JITted code. Here we create private executable mapping over the code cache
|
||||
// and then we can use standard (well, almost, as mapping can change) way to provide
|
||||
// info for the reporting script by storing timestamp and location of symbol
|
||||
void linux_wrap_code(char* base, size_t size) {
|
||||
static volatile jint cnt = 0;
|
||||
|
||||
static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
|
||||
|
||||
if (!UseOprofile) {
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[PATH_MAX+1];
|
||||
int num = Atomic::add(&cnt, 1);
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
|
||||
os::get_temp_directory(), os::current_process_id(), num);
|
||||
unlink(buf);
|
||||
|
||||
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
|
||||
|
||||
if (fd != -1) {
|
||||
off_t rv = ::lseek(fd, size-2, SEEK_SET);
|
||||
if (rv != (off_t)-1) {
|
||||
if (::write(fd, "", 1) == 1) {
|
||||
mmap(base, size,
|
||||
PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
|
||||
}
|
||||
}
|
||||
::close(fd);
|
||||
unlink(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static bool recoverable_mmap_error(int err) {
|
||||
// See if the error is one we can let the caller handle. This
|
||||
// list of errno values comes from JBS-6843484. I can't find a
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#ifdef AIX
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "os_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#endif
|
||||
#ifdef LINUX
|
||||
#include "os_linux.hpp"
|
||||
@@ -1060,6 +1061,95 @@ bool os::same_files(const char* file1, const char* file2) {
|
||||
return is_same;
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
const char* fname;
|
||||
#ifdef AIX
|
||||
Dl_info dlinfo;
|
||||
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
if (ret == 0) {
|
||||
return;
|
||||
}
|
||||
fname = dlinfo.dli_fname;
|
||||
#else
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
if (!ret) {
|
||||
return;
|
||||
}
|
||||
fname = dli_fname;
|
||||
#endif // AIX
|
||||
char* rp = nullptr;
|
||||
if (fname[0] != '\0') {
|
||||
rp = os::realpath(fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
// If the path exists within JAVA_HOME, add the VM variant directory and JVM
|
||||
// library name to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath(fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
// Called when creating the thread. The minimum stack sizes have already been calculated
|
||||
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
|
||||
size_t stack_size;
|
||||
|
||||
@@ -2623,6 +2623,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT)
|
||||
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
|
||||
VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
|
||||
// Verify that OS save/restore APX registers.
|
||||
@@ -2630,6 +2631,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr_apx());
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
|
||||
if (VMError::was_assert_poison_crash(exception_record)) {
|
||||
|
||||
@@ -81,14 +81,12 @@
|
||||
#endif
|
||||
|
||||
#define SPELL_REG_SP "sp"
|
||||
#define SPELL_REG_FP "fp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
// see darwin-xnu/osfmk/mach/arm/_structs.h
|
||||
|
||||
// 10.5 UNIX03 member name prefixes
|
||||
#define DU3_PREFIX(s, m) __ ## s.__ ## m
|
||||
#endif
|
||||
|
||||
#define context_x uc_mcontext->DU3_PREFIX(ss,x)
|
||||
#define context_fp uc_mcontext->DU3_PREFIX(ss,fp)
|
||||
@@ -97,6 +95,31 @@
|
||||
#define context_pc uc_mcontext->DU3_PREFIX(ss,pc)
|
||||
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
|
||||
#define context_esr uc_mcontext->DU3_PREFIX(es,esr)
|
||||
#endif
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
# define context_x uc_mcontext.mc_gpregs.gp_x
|
||||
# define context_fp context_x[REG_FP]
|
||||
# define context_lr uc_mcontext.mc_gpregs.gp_lr
|
||||
# define context_sp uc_mcontext.mc_gpregs.gp_sp
|
||||
# define context_pc uc_mcontext.mc_gpregs.gp_elr
|
||||
#endif
|
||||
|
||||
#ifdef __NetBSD__
|
||||
# define context_x uc_mcontext.__gregs
|
||||
# define context_fp uc_mcontext.__gregs[_REG_FP]
|
||||
# define context_lr uc_mcontext.__gregs[_REG_LR]
|
||||
# define context_sp uc_mcontext.__gregs[_REG_SP]
|
||||
# define context_pc uc_mcontext.__gregs[_REG_ELR]
|
||||
#endif
|
||||
|
||||
#ifdef __OpenBSD__
|
||||
# define context_x sc_x
|
||||
# define context_fp sc_x[REG_FP]
|
||||
# define context_lr sc_lr
|
||||
# define context_sp sc_sp
|
||||
# define context_pc sc_elr
|
||||
#endif
|
||||
|
||||
#define REG_BCP context_x[22]
|
||||
|
||||
@@ -497,9 +520,11 @@ int os::extra_bang_size_in_bytes() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
void os::current_thread_enable_wx(WXMode mode) {
|
||||
pthread_jit_write_protect_np(mode == WXExec);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
|
||||
*(jlong *) dst = *(const jlong *) src;
|
||||
|
||||
@@ -429,11 +429,13 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
stub = VM_Version::cpuinfo_cont_addr();
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT) && defined(_LP64)
|
||||
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
|
||||
// Verify that OS save/restore APX registers.
|
||||
stub = VM_Version::cpuinfo_cont_addr_apx();
|
||||
VM_Version::clear_apx_test_state();
|
||||
}
|
||||
#endif
|
||||
|
||||
// We test if stub is already set (by the stack overflow code
|
||||
// above) so it is not overwritten by the code that follows. This
|
||||
|
||||
@@ -255,11 +255,13 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
stub = VM_Version::cpuinfo_cont_addr();
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT) && defined(_LP64)
|
||||
if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
|
||||
// Verify that OS save/restore APX registers.
|
||||
stub = VM_Version::cpuinfo_cont_addr_apx();
|
||||
VM_Version::clear_apx_test_state();
|
||||
}
|
||||
#endif
|
||||
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
// Java thread running in Java code => find exception handler if any
|
||||
|
||||
@@ -481,7 +481,3 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
|
||||
*legal_text = legal_start;
|
||||
return (int) (legal_end - legal_start);
|
||||
}
|
||||
|
||||
void *operator new( size_t size, int, const char *, int ) throw() {
|
||||
return ::operator new( size );
|
||||
}
|
||||
|
||||
@@ -1626,6 +1626,8 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
while (attr != nullptr) {
|
||||
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
|
||||
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
|
||||
} else if (strcmp (attr->_ident, "ins_is_late_expanded_null_check_candidate") == 0) {
|
||||
fprintf(fp, " virtual bool is_late_expanded_null_check_candidate() const { return %s; }\n", attr->_val);
|
||||
} else if (strcmp (attr->_ident, "ins_cost") != 0 &&
|
||||
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
|
||||
// Must match function in node.hpp: return type bool, no prefix "ins_".
|
||||
|
||||
@@ -818,7 +818,7 @@ JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
|
||||
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
|
||||
|
||||
if (action == Deoptimization::Action_make_not_entrant) {
|
||||
if (nm->make_not_entrant("C1 deoptimize")) {
|
||||
if (nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize)) {
|
||||
if (reason == Deoptimization::Reason_tenured) {
|
||||
MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
|
||||
if (trap_mdo != nullptr) {
|
||||
@@ -1110,7 +1110,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
|
||||
// safepoint, but if it's still alive then make it not_entrant.
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("C1 code patch");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::C1_codepatch);
|
||||
}
|
||||
|
||||
Deoptimization::deoptimize_frame(current, caller_frame.id());
|
||||
@@ -1358,7 +1358,7 @@ void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
|
||||
// Make sure the nmethod is invalidated, i.e. made not entrant.
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("C1 deoptimize for patching");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize_for_patching);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1486,7 +1486,7 @@ JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
|
||||
|
||||
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
|
||||
assert (nm != nullptr, "no more nmethod?");
|
||||
nm->make_not_entrant("C1 predicate failed trap");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::C1_predicate_failed_trap);
|
||||
|
||||
methodHandle m(current, nm->method());
|
||||
MethodData* mdo = m->method_data();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -187,13 +187,7 @@ class ValueNumberingVisitor: public InstructionVisitor {
|
||||
void do_Convert (Convert* x) { /* nothing to do */ }
|
||||
void do_NullCheck (NullCheck* x) { /* nothing to do */ }
|
||||
void do_TypeCast (TypeCast* x) { /* nothing to do */ }
|
||||
void do_NewInstance (NewInstance* x) {
|
||||
ciInstanceKlass* c = x->klass();
|
||||
if (c != nullptr && !c->is_initialized() &&
|
||||
(!c->is_loaded() || c->has_class_initializer())) {
|
||||
kill_memory();
|
||||
}
|
||||
}
|
||||
void do_NewInstance (NewInstance* x) { /* nothing to do */ }
|
||||
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }
|
||||
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }
|
||||
void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }
|
||||
|
||||
@@ -110,12 +110,24 @@ const char* CDSConfig::default_archive_path() {
|
||||
// before CDSConfig::ergo_initialize() is called.
|
||||
assert(_cds_ergo_initialize_started, "sanity");
|
||||
if (_default_archive_path == nullptr) {
|
||||
char jvm_path[JVM_MAXPATHLEN];
|
||||
os::jvm_path(jvm_path, sizeof(jvm_path));
|
||||
char *end = strrchr(jvm_path, *os::file_separator());
|
||||
if (end != nullptr) *end = '\0';
|
||||
stringStream tmp;
|
||||
tmp.print("%s%sclasses", jvm_path, os::file_separator());
|
||||
if (is_vm_statically_linked()) {
|
||||
// It's easier to form the path using JAVA_HOME as os::jvm_path
|
||||
// gives the path to the launcher executable on static JDK.
|
||||
const char* subdir = WINDOWS_ONLY("bin") NOT_WINDOWS("lib");
|
||||
tmp.print("%s%s%s%s%s%sclasses",
|
||||
Arguments::get_java_home(), os::file_separator(),
|
||||
subdir, os::file_separator(),
|
||||
Abstract_VM_Version::vm_variant(), os::file_separator());
|
||||
} else {
|
||||
// Assume .jsa is in the same directory where libjvm resides on
|
||||
// non-static JDK.
|
||||
char jvm_path[JVM_MAXPATHLEN];
|
||||
os::jvm_path(jvm_path, sizeof(jvm_path));
|
||||
char *end = strrchr(jvm_path, *os::file_separator());
|
||||
if (end != nullptr) *end = '\0';
|
||||
tmp.print("%s%sclasses", jvm_path, os::file_separator());
|
||||
}
|
||||
#ifdef _LP64
|
||||
if (!UseCompressedOops) {
|
||||
tmp.print_raw("_nocoops");
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
product(bool, AOTVerifyTrainingData, trueInDebug, DIAGNOSTIC, \
|
||||
"Verify archived training data") \
|
||||
\
|
||||
product(bool, AOTCompileEagerly, false, EXPERIMENTAL, \
|
||||
product(bool, AOTCompileEagerly, false, DIAGNOSTIC, \
|
||||
"Compile methods as soon as possible") \
|
||||
\
|
||||
/* AOT Code flags */ \
|
||||
|
||||
@@ -837,10 +837,11 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
|
||||
struct stat st;
|
||||
if (os::stat(AOTCache, &st) != 0) {
|
||||
tty->print_cr("AOTCache creation failed: %s", AOTCache);
|
||||
vm_exit(0);
|
||||
} else {
|
||||
tty->print_cr("AOTCache creation is complete: %s " INT64_FORMAT " bytes", AOTCache, (int64_t)(st.st_size));
|
||||
vm_exit(0);
|
||||
}
|
||||
vm_direct_exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -549,11 +549,6 @@ bool ciInstanceKlass::compute_has_trusted_loader() {
|
||||
return java_lang_ClassLoader::is_trusted_loader(loader_oop);
|
||||
}
|
||||
|
||||
bool ciInstanceKlass::has_class_initializer() {
|
||||
VM_ENTRY_MARK;
|
||||
return get_instanceKlass()->class_initializer() != nullptr;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciInstanceKlass::find_method
|
||||
//
|
||||
|
||||
@@ -231,8 +231,6 @@ public:
|
||||
ciInstanceKlass* unique_concrete_subklass();
|
||||
bool has_finalizable_subclass();
|
||||
|
||||
bool has_class_initializer();
|
||||
|
||||
bool contains_field_offset(int offset);
|
||||
|
||||
// Get the instance of java.lang.Class corresponding to
|
||||
|
||||
@@ -802,7 +802,7 @@ class CompileReplay : public StackObj {
|
||||
// Make sure the existence of a prior compile doesn't stop this one
|
||||
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
|
||||
if (nm != nullptr) {
|
||||
nm->make_not_entrant("CI replay");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::CI_replay);
|
||||
}
|
||||
replay_state = this;
|
||||
CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level,
|
||||
|
||||
@@ -154,6 +154,8 @@
|
||||
|
||||
#define JAVA_25_VERSION 69
|
||||
|
||||
#define JAVA_26_VERSION 70
|
||||
|
||||
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
|
||||
assert((bad_constant == JVM_CONSTANT_Module ||
|
||||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
|
||||
@@ -3738,7 +3740,6 @@ void ClassFileParser::apply_parsed_class_metadata(
|
||||
_cp->set_pool_holder(this_klass);
|
||||
this_klass->set_constants(_cp);
|
||||
this_klass->set_fieldinfo_stream(_fieldinfo_stream);
|
||||
this_klass->set_fieldinfo_search_table(_fieldinfo_search_table);
|
||||
this_klass->set_fields_status(_fields_status);
|
||||
this_klass->set_methods(_methods);
|
||||
this_klass->set_inner_classes(_inner_classes);
|
||||
@@ -3748,8 +3749,6 @@ void ClassFileParser::apply_parsed_class_metadata(
|
||||
this_klass->set_permitted_subclasses(_permitted_subclasses);
|
||||
this_klass->set_record_components(_record_components);
|
||||
|
||||
DEBUG_ONLY(FieldInfoStream::validate_search_table(_cp, _fieldinfo_stream, _fieldinfo_search_table));
|
||||
|
||||
// Delay the setting of _local_interfaces and _transitive_interfaces until after
|
||||
// initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could
|
||||
// be shared with _transitive_interfaces and _transitive_interfaces may be shared with
|
||||
@@ -5057,7 +5056,6 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
|
||||
// note that is not safe to use the fields in the parser from this point on
|
||||
assert(nullptr == _cp, "invariant");
|
||||
assert(nullptr == _fieldinfo_stream, "invariant");
|
||||
assert(nullptr == _fieldinfo_search_table, "invariant");
|
||||
assert(nullptr == _fields_status, "invariant");
|
||||
assert(nullptr == _methods, "invariant");
|
||||
assert(nullptr == _inner_classes, "invariant");
|
||||
@@ -5278,7 +5276,6 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
|
||||
_super_klass(),
|
||||
_cp(nullptr),
|
||||
_fieldinfo_stream(nullptr),
|
||||
_fieldinfo_search_table(nullptr),
|
||||
_fields_status(nullptr),
|
||||
_methods(nullptr),
|
||||
_inner_classes(nullptr),
|
||||
@@ -5355,7 +5352,6 @@ void ClassFileParser::clear_class_metadata() {
|
||||
// deallocated if classfile parsing returns an error.
|
||||
_cp = nullptr;
|
||||
_fieldinfo_stream = nullptr;
|
||||
_fieldinfo_search_table = nullptr;
|
||||
_fields_status = nullptr;
|
||||
_methods = nullptr;
|
||||
_inner_classes = nullptr;
|
||||
@@ -5378,7 +5374,6 @@ ClassFileParser::~ClassFileParser() {
|
||||
if (_fieldinfo_stream != nullptr) {
|
||||
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_stream);
|
||||
}
|
||||
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_search_table);
|
||||
|
||||
if (_fields_status != nullptr) {
|
||||
MetadataFactory::free_array<FieldStatus>(_loader_data, _fields_status);
|
||||
@@ -5779,7 +5774,6 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
|
||||
_fieldinfo_stream =
|
||||
FieldInfoStream::create_FieldInfoStream(_temp_field_info, _java_fields_count,
|
||||
injected_fields_count, loader_data(), CHECK);
|
||||
_fieldinfo_search_table = FieldInfoStream::create_search_table(_cp, _fieldinfo_stream, _loader_data, CHECK);
|
||||
_fields_status =
|
||||
MetadataFactory::new_array<FieldStatus>(_loader_data, _temp_field_info->length(),
|
||||
FieldStatus(0), CHECK);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -123,7 +123,6 @@ class ClassFileParser {
|
||||
const InstanceKlass* _super_klass;
|
||||
ConstantPool* _cp;
|
||||
Array<u1>* _fieldinfo_stream;
|
||||
Array<u1>* _fieldinfo_search_table;
|
||||
Array<FieldStatus>* _fields_status;
|
||||
Array<Method*>* _methods;
|
||||
Array<u2>* _inner_classes;
|
||||
|
||||
@@ -301,7 +301,7 @@ void FieldLayout::reconstruct_layout(const InstanceKlass* ik, bool& has_instance
|
||||
BasicType last_type;
|
||||
int last_offset = -1;
|
||||
while (ik != nullptr) {
|
||||
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
|
||||
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
|
||||
BasicType type = Signature::basic_type(fs.signature());
|
||||
// distinction between static and non-static fields is missing
|
||||
if (fs.access_flags().is_static()) continue;
|
||||
@@ -461,7 +461,7 @@ void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlas
|
||||
bool found = false;
|
||||
const InstanceKlass* ik = super;
|
||||
while (!found && ik != nullptr) {
|
||||
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
|
||||
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
|
||||
if (fs.offset() == b->offset()) {
|
||||
output->print_cr(" @%d \"%s\" %s %d/%d %s",
|
||||
b->offset(),
|
||||
|
||||
@@ -967,13 +967,6 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
|
||||
Array<u1>* new_fis = FieldInfoStream::create_FieldInfoStream(fields, java_fields, injected_fields, k->class_loader_data(), CHECK);
|
||||
ik->set_fieldinfo_stream(new_fis);
|
||||
MetadataFactory::free_array<u1>(k->class_loader_data(), old_stream);
|
||||
|
||||
Array<u1>* old_table = ik->fieldinfo_search_table();
|
||||
Array<u1>* search_table = FieldInfoStream::create_search_table(ik->constants(), new_fis, k->class_loader_data(), CHECK);
|
||||
ik->set_fieldinfo_search_table(search_table);
|
||||
MetadataFactory::free_array<u1>(k->class_loader_data(), old_table);
|
||||
|
||||
DEBUG_ONLY(FieldInfoStream::validate_search_table(ik->constants(), new_fis, search_table));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
@@ -116,7 +115,6 @@ OopStorage* StringTable::_oop_storage;
|
||||
|
||||
static size_t _current_size = 0;
|
||||
static volatile size_t _items_count = 0;
|
||||
DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
|
||||
|
||||
volatile bool _alt_hash = false;
|
||||
|
||||
@@ -348,10 +346,6 @@ bool StringTable::has_work() {
|
||||
return Atomic::load_acquire(&_has_work);
|
||||
}
|
||||
|
||||
size_t StringTable::items_count_acquire() {
|
||||
return Atomic::load_acquire(&_items_count);
|
||||
}
|
||||
|
||||
void StringTable::trigger_concurrent_work() {
|
||||
// Avoid churn on ServiceThread
|
||||
if (!has_work()) {
|
||||
@@ -510,9 +504,6 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
|
||||
}
|
||||
|
||||
oop StringTable::intern(const StringWrapper& name, TRAPS) {
|
||||
assert(!Atomic::load_acquire(&_disable_interning_during_cds_dump),
|
||||
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
|
||||
|
||||
// shared table always uses java_lang_String::hash_code
|
||||
unsigned int hash = hash_wrapped_string(name);
|
||||
oop found_string = lookup_shared(name, hash);
|
||||
@@ -802,7 +793,7 @@ void StringTable::verify() {
|
||||
}
|
||||
|
||||
// Verification and comp
|
||||
class StringTable::VerifyCompStrings : StackObj {
|
||||
class VerifyCompStrings : StackObj {
|
||||
static unsigned string_hash(oop const& str) {
|
||||
return java_lang_String::hash_code_noupdate(str);
|
||||
}
|
||||
@@ -814,7 +805,7 @@ class StringTable::VerifyCompStrings : StackObj {
|
||||
string_hash, string_equals> _table;
|
||||
public:
|
||||
size_t _errors;
|
||||
VerifyCompStrings() : _table(unsigned(items_count_acquire() / 8) + 1, 0 /* do not resize */), _errors(0) {}
|
||||
VerifyCompStrings() : _table(unsigned(_items_count / 8) + 1, 0 /* do not resize */), _errors(0) {}
|
||||
bool operator()(WeakHandle* val) {
|
||||
oop s = val->resolve();
|
||||
if (s == nullptr) {
|
||||
@@ -948,31 +939,20 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
|
||||
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
|
||||
}
|
||||
|
||||
// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
|
||||
// be used by the shared strings table.
|
||||
// This is called BEFORE we enter the CDS safepoint. We can allocate heap objects.
|
||||
// This should be called when we know no more strings will be added (which will be easy
|
||||
// to guarantee because CDS runs with a single Java thread. See JDK-8253495.)
|
||||
void StringTable::allocate_shared_strings_array(TRAPS) {
|
||||
if (!CDSConfig::is_dumping_heap()) {
|
||||
return;
|
||||
}
|
||||
assert(CDSConfig::allow_only_single_java_thread(), "No more interned strings can be added");
|
||||
|
||||
CompileBroker::wait_for_no_active_tasks();
|
||||
|
||||
precond(CDSConfig::allow_only_single_java_thread());
|
||||
|
||||
// At this point, no more strings will be added:
|
||||
// - There's only a single Java thread (this thread). It no longer executes Java bytecodes
|
||||
// so JIT compilation will eventually stop.
|
||||
// - CompileBroker has no more active tasks, so all JIT requests have been processed.
|
||||
|
||||
// This flag will be cleared after intern table dumping has completed, so we can run the
|
||||
// compiler again (for future AOT method compilation, etc).
|
||||
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, true));
|
||||
|
||||
if (items_count_acquire() > (size_t)max_jint) {
|
||||
fatal("Too many strings to be archived: %zu", items_count_acquire());
|
||||
if (_items_count > (size_t)max_jint) {
|
||||
fatal("Too many strings to be archived: %zu", _items_count);
|
||||
}
|
||||
|
||||
int total = (int)items_count_acquire();
|
||||
int total = (int)_items_count;
|
||||
size_t single_array_size = objArrayOopDesc::object_size(total);
|
||||
|
||||
log_info(aot)("allocated string table for %d strings", total);
|
||||
@@ -992,7 +972,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
|
||||
// This can only happen if you have an extremely large number of classes that
|
||||
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
|
||||
// but bail out for safety.
|
||||
log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
|
||||
log_error(aot)("Too many strings to be archived: %zu", _items_count);
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
}
|
||||
|
||||
@@ -1090,7 +1070,7 @@ oop StringTable::init_shared_strings_array() {
|
||||
|
||||
void StringTable::write_shared_table() {
|
||||
_shared_table.reset();
|
||||
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
|
||||
CompactHashtableWriter writer((int)_items_count, ArchiveBuilder::string_stats());
|
||||
|
||||
int index = 0;
|
||||
auto copy_into_shared_table = [&] (WeakHandle* val) {
|
||||
@@ -1104,8 +1084,6 @@ void StringTable::write_shared_table() {
|
||||
};
|
||||
_local_table->do_safepoint_scan(copy_into_shared_table);
|
||||
writer.dump(&_shared_table, "string");
|
||||
|
||||
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, false));
|
||||
}
|
||||
|
||||
void StringTable::set_shared_strings_array_index(int root_index) {
|
||||
|
||||
@@ -40,7 +40,7 @@ class StringTableConfig;
|
||||
|
||||
class StringTable : AllStatic {
|
||||
friend class StringTableConfig;
|
||||
class VerifyCompStrings;
|
||||
|
||||
static volatile bool _has_work;
|
||||
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
@@ -74,7 +74,6 @@ private:
|
||||
|
||||
static void item_added();
|
||||
static void item_removed();
|
||||
static size_t items_count_acquire();
|
||||
|
||||
static oop intern(const StringWrapper& name, TRAPS);
|
||||
static oop do_intern(const StringWrapper& name, uintx hash, TRAPS);
|
||||
|
||||
@@ -289,8 +289,6 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_dsin:
|
||||
case vmIntrinsics::_dcos:
|
||||
case vmIntrinsics::_dtan:
|
||||
case vmIntrinsics::_dtanh:
|
||||
case vmIntrinsics::_dcbrt:
|
||||
case vmIntrinsics::_dlog:
|
||||
case vmIntrinsics::_dexp:
|
||||
case vmIntrinsics::_dpow:
|
||||
@@ -316,6 +314,13 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_fmaF:
|
||||
if (!InlineMathNatives || !UseFMA) return true;
|
||||
break;
|
||||
case vmIntrinsics::_dtanh:
|
||||
case vmIntrinsics::_dcbrt:
|
||||
if (!InlineMathNatives || !InlineIntrinsics) return true;
|
||||
#if defined(AMD64) && (defined(COMPILER1) || defined(COMPILER2))
|
||||
if (!UseLibmIntrinsic) return true;
|
||||
#endif
|
||||
break;
|
||||
case vmIntrinsics::_floatToFloat16:
|
||||
case vmIntrinsics::_float16ToFloat:
|
||||
if (!InlineIntrinsics) return true;
|
||||
|
||||
@@ -344,7 +344,6 @@ AOTCodeCache::~AOTCodeCache() {
|
||||
_store_buffer = nullptr;
|
||||
}
|
||||
if (_table != nullptr) {
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
delete _table;
|
||||
_table = nullptr;
|
||||
}
|
||||
@@ -775,9 +774,6 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind
|
||||
// we need to take a lock to prevent race between compiler threads generating AOT code
|
||||
// and the main thread generating adapter
|
||||
MutexLocker ml(Compile_lock);
|
||||
if (!is_on()) {
|
||||
return false; // AOT code cache was already dumped and closed.
|
||||
}
|
||||
if (!cache->align_write()) {
|
||||
return false;
|
||||
}
|
||||
@@ -1438,9 +1434,6 @@ AOTCodeAddressTable::~AOTCodeAddressTable() {
|
||||
if (_extrs_addr != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(address, _extrs_addr);
|
||||
}
|
||||
if (_stubs_addr != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(address, _stubs_addr);
|
||||
}
|
||||
if (_shared_blobs_addr != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
|
||||
}
|
||||
@@ -1492,7 +1485,6 @@ void AOTCodeCache::load_strings() {
|
||||
|
||||
int AOTCodeCache::store_strings() {
|
||||
if (_C_strings_used > 0) {
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
uint offset = _write_position;
|
||||
uint length = 0;
|
||||
uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
|
||||
@@ -1518,17 +1510,15 @@ int AOTCodeCache::store_strings() {
|
||||
|
||||
const char* AOTCodeCache::add_C_string(const char* str) {
|
||||
if (is_on_for_dump() && str != nullptr) {
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
AOTCodeAddressTable* table = addr_table();
|
||||
if (table != nullptr) {
|
||||
return table->add_C_string(str);
|
||||
}
|
||||
return _cache->_table->add_C_string(str);
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
const char* AOTCodeAddressTable::add_C_string(const char* str) {
|
||||
if (_extrs_complete) {
|
||||
LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Check previous strings address
|
||||
for (int i = 0; i < _C_strings_count; i++) {
|
||||
if (_C_strings_in[i] == str) {
|
||||
@@ -1545,7 +1535,9 @@ const char* AOTCodeAddressTable::add_C_string(const char* str) {
|
||||
_C_strings_in[_C_strings_count] = str;
|
||||
const char* dup = os::strdup(str);
|
||||
_C_strings[_C_strings_count++] = dup;
|
||||
log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
|
||||
if (log.is_enabled()) {
|
||||
log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
|
||||
}
|
||||
return dup;
|
||||
} else {
|
||||
assert(false, "Number of C strings >= MAX_STR_COUNT");
|
||||
|
||||
@@ -136,7 +136,6 @@ private:
|
||||
public:
|
||||
AOTCodeAddressTable() :
|
||||
_extrs_addr(nullptr),
|
||||
_stubs_addr(nullptr),
|
||||
_shared_blobs_addr(nullptr),
|
||||
_C1_blobs_addr(nullptr),
|
||||
_extrs_length(0),
|
||||
|
||||
@@ -160,7 +160,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size
|
||||
}
|
||||
} else {
|
||||
// We need unique and valid not null address
|
||||
assert(_mutable_data == blob_end(), "sanity");
|
||||
assert(_mutable_data = blob_end(), "sanity");
|
||||
}
|
||||
|
||||
set_oop_maps(oop_maps);
|
||||
@@ -177,7 +177,6 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
|
||||
_code_offset(_content_offset),
|
||||
_data_offset(size),
|
||||
_frame_size(0),
|
||||
_mutable_data_size(0),
|
||||
S390_ONLY(_ctable_offset(0) COMMA)
|
||||
_header_size(header_size),
|
||||
_frame_complete_offset(CodeOffsets::frame_never_safe),
|
||||
@@ -186,7 +185,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
|
||||
{
|
||||
assert(is_aligned(size, oopSize), "unaligned size");
|
||||
assert(is_aligned(header_size, oopSize), "unaligned size");
|
||||
assert(_mutable_data == blob_end(), "sanity");
|
||||
assert(_mutable_data = blob_end(), "sanity");
|
||||
}
|
||||
|
||||
void CodeBlob::restore_mutable_data(address reloc_data) {
|
||||
@@ -196,11 +195,8 @@ void CodeBlob::restore_mutable_data(address reloc_data) {
|
||||
if (_mutable_data == nullptr) {
|
||||
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
|
||||
}
|
||||
} else {
|
||||
_mutable_data = blob_end(); // default value
|
||||
}
|
||||
if (_relocation_size > 0) {
|
||||
assert(_mutable_data_size > 0, "relocation is part of mutable data section");
|
||||
memcpy((address)relocation_begin(), reloc_data, relocation_size());
|
||||
}
|
||||
}
|
||||
@@ -210,8 +206,6 @@ void CodeBlob::purge() {
|
||||
if (_mutable_data != blob_end()) {
|
||||
os::free(_mutable_data);
|
||||
_mutable_data = blob_end(); // Valid not null address
|
||||
_mutable_data_size = 0;
|
||||
_relocation_size = 0;
|
||||
}
|
||||
if (_oop_maps != nullptr) {
|
||||
delete _oop_maps;
|
||||
|
||||
@@ -247,7 +247,7 @@ public:
|
||||
// Sizes
|
||||
int size() const { return _size; }
|
||||
int header_size() const { return _header_size; }
|
||||
int relocation_size() const { return _relocation_size; }
|
||||
int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); }
|
||||
int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); }
|
||||
int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); }
|
||||
|
||||
|
||||
@@ -1361,7 +1361,7 @@ void CodeCache::make_marked_nmethods_deoptimized() {
|
||||
while(iter.next()) {
|
||||
nmethod* nm = iter.method();
|
||||
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
|
||||
nm->make_not_entrant("marked for deoptimization");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::marked_for_deoptimization);
|
||||
nm->make_deoptimized();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "code/dependencies.hpp"
|
||||
#include "code/nativeInst.hpp"
|
||||
#include "code/nmethod.inline.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compilationLog.hpp"
|
||||
@@ -1652,6 +1653,10 @@ void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
|
||||
}
|
||||
|
||||
void nmethod::print_nmethod(bool printmethod) {
|
||||
// Enter a critical section to prevent a race with deopts that patch code and updates the relocation info.
|
||||
// Unfortunately, we have to lock the NMethodState_lock before the tty lock due to the deadlock rules and
|
||||
// cannot lock in a more finely grained manner.
|
||||
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
if (xtty != nullptr) {
|
||||
xtty->begin_head("print_nmethod");
|
||||
@@ -1970,14 +1975,12 @@ void nmethod::invalidate_osr_method() {
|
||||
}
|
||||
}
|
||||
|
||||
void nmethod::log_state_change(const char* reason) const {
|
||||
assert(reason != nullptr, "Must provide a reason");
|
||||
|
||||
void nmethod::log_state_change(ChangeReason change_reason) const {
|
||||
if (LogCompilation) {
|
||||
if (xtty != nullptr) {
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
|
||||
os::current_thread_id(), reason);
|
||||
os::current_thread_id(), change_reason_to_string(change_reason));
|
||||
log_identity(xtty);
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
@@ -1986,7 +1989,7 @@ void nmethod::log_state_change(const char* reason) const {
|
||||
|
||||
ResourceMark rm;
|
||||
stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
|
||||
ss.print("made not entrant: %s", reason);
|
||||
ss.print("made not entrant: %s", change_reason_to_string(change_reason));
|
||||
|
||||
CompileTask::print_ul(this, ss.freeze());
|
||||
if (PrintCompilation) {
|
||||
@@ -2001,9 +2004,7 @@ void nmethod::unlink_from_method() {
|
||||
}
|
||||
|
||||
// Invalidate code
|
||||
bool nmethod::make_not_entrant(const char* reason) {
|
||||
assert(reason != nullptr, "Must provide a reason");
|
||||
|
||||
bool nmethod::make_not_entrant(ChangeReason change_reason) {
|
||||
// This can be called while the system is already at a safepoint which is ok
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
@@ -2041,6 +2042,17 @@ bool nmethod::make_not_entrant(const char* reason) {
|
||||
// cache call.
|
||||
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
|
||||
SharedRuntime::get_handle_wrong_method_stub());
|
||||
|
||||
// Update the relocation info for the patched entry.
|
||||
// First, get the old relocation info...
|
||||
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
|
||||
if (iter.next() && iter.addr() == verified_entry_point()) {
|
||||
Relocation* old_reloc = iter.reloc();
|
||||
// ...then reset the iterator to update it.
|
||||
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
|
||||
relocInfo::change_reloc_info_for_address(&iter, verified_entry_point(), old_reloc->type(),
|
||||
relocInfo::relocType::runtime_call_type);
|
||||
}
|
||||
}
|
||||
|
||||
if (update_recompile_counts()) {
|
||||
@@ -2061,7 +2073,7 @@ bool nmethod::make_not_entrant(const char* reason) {
|
||||
assert(success, "Transition can't fail");
|
||||
|
||||
// Log the transition once
|
||||
log_state_change(reason);
|
||||
log_state_change(change_reason);
|
||||
|
||||
// Remove nmethod from method.
|
||||
unlink_from_method();
|
||||
@@ -2166,7 +2178,6 @@ void nmethod::purge(bool unregister_nmethod) {
|
||||
}
|
||||
CodeCache::unregister_old_nmethod(this);
|
||||
|
||||
JVMCI_ONLY( _metadata_size = 0; )
|
||||
CodeBlob::purge();
|
||||
}
|
||||
|
||||
|
||||
@@ -471,6 +471,85 @@ class nmethod : public CodeBlob {
|
||||
void oops_do_set_strong_done(nmethod* old_head);
|
||||
|
||||
public:
|
||||
enum class ChangeReason : u1 {
|
||||
C1_codepatch,
|
||||
C1_deoptimize,
|
||||
C1_deoptimize_for_patching,
|
||||
C1_predicate_failed_trap,
|
||||
CI_replay,
|
||||
JVMCI_invalidate_nmethod,
|
||||
JVMCI_invalidate_nmethod_mirror,
|
||||
JVMCI_materialize_virtual_object,
|
||||
JVMCI_new_installation,
|
||||
JVMCI_register_method,
|
||||
JVMCI_replacing_with_new_code,
|
||||
JVMCI_reprofile,
|
||||
marked_for_deoptimization,
|
||||
missing_exception_handler,
|
||||
not_used,
|
||||
OSR_invalidation_back_branch,
|
||||
OSR_invalidation_for_compiling_with_C1,
|
||||
OSR_invalidation_of_lower_level,
|
||||
set_native_function,
|
||||
uncommon_trap,
|
||||
whitebox_deoptimization,
|
||||
zombie,
|
||||
};
|
||||
|
||||
|
||||
static const char* change_reason_to_string(ChangeReason change_reason) {
|
||||
switch (change_reason) {
|
||||
case ChangeReason::C1_codepatch:
|
||||
return "C1 code patch";
|
||||
case ChangeReason::C1_deoptimize:
|
||||
return "C1 deoptimized";
|
||||
case ChangeReason::C1_deoptimize_for_patching:
|
||||
return "C1 deoptimize for patching";
|
||||
case ChangeReason::C1_predicate_failed_trap:
|
||||
return "C1 predicate failed trap";
|
||||
case ChangeReason::CI_replay:
|
||||
return "CI replay";
|
||||
case ChangeReason::JVMCI_invalidate_nmethod:
|
||||
return "JVMCI invalidate nmethod";
|
||||
case ChangeReason::JVMCI_invalidate_nmethod_mirror:
|
||||
return "JVMCI invalidate nmethod mirror";
|
||||
case ChangeReason::JVMCI_materialize_virtual_object:
|
||||
return "JVMCI materialize virtual object";
|
||||
case ChangeReason::JVMCI_new_installation:
|
||||
return "JVMCI new installation";
|
||||
case ChangeReason::JVMCI_register_method:
|
||||
return "JVMCI register method";
|
||||
case ChangeReason::JVMCI_replacing_with_new_code:
|
||||
return "JVMCI replacing with new code";
|
||||
case ChangeReason::JVMCI_reprofile:
|
||||
return "JVMCI reprofile";
|
||||
case ChangeReason::marked_for_deoptimization:
|
||||
return "marked for deoptimization";
|
||||
case ChangeReason::missing_exception_handler:
|
||||
return "missing exception handler";
|
||||
case ChangeReason::not_used:
|
||||
return "not used";
|
||||
case ChangeReason::OSR_invalidation_back_branch:
|
||||
return "OSR invalidation back branch";
|
||||
case ChangeReason::OSR_invalidation_for_compiling_with_C1:
|
||||
return "OSR invalidation for compiling with C1";
|
||||
case ChangeReason::OSR_invalidation_of_lower_level:
|
||||
return "OSR invalidation of lower level";
|
||||
case ChangeReason::set_native_function:
|
||||
return "set native function";
|
||||
case ChangeReason::uncommon_trap:
|
||||
return "uncommon trap";
|
||||
case ChangeReason::whitebox_deoptimization:
|
||||
return "whitebox deoptimization";
|
||||
case ChangeReason::zombie:
|
||||
return "zombie";
|
||||
default: {
|
||||
assert(false, "Unhandled reason");
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create nmethod with entry_bci
|
||||
static nmethod* new_nmethod(const methodHandle& method,
|
||||
int compile_id,
|
||||
@@ -633,8 +712,8 @@ public:
|
||||
// alive. It is used when an uncommon trap happens. Returns true
|
||||
// if this thread changed the state of the nmethod or false if
|
||||
// another thread performed the transition.
|
||||
bool make_not_entrant(const char* reason);
|
||||
bool make_not_used() { return make_not_entrant("not used"); }
|
||||
bool make_not_entrant(ChangeReason change_reason);
|
||||
bool make_not_used() { return make_not_entrant(ChangeReason::not_used); }
|
||||
|
||||
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
|
||||
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
|
||||
@@ -947,7 +1026,7 @@ public:
|
||||
// Logging
|
||||
void log_identity(xmlStream* log) const;
|
||||
void log_new_nmethod() const;
|
||||
void log_state_change(const char* reason) const;
|
||||
void log_state_change(ChangeReason change_reason) const;
|
||||
|
||||
// Prints block-level comments, including nmethod specific block labels:
|
||||
void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;
|
||||
|
||||
@@ -924,7 +924,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
|
||||
nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
|
||||
if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
|
||||
// Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
|
||||
osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
|
||||
osr_nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_for_compiling_with_C1);
|
||||
}
|
||||
compile(mh, bci, CompLevel_simple, THREAD);
|
||||
}
|
||||
@@ -1516,7 +1516,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
|
||||
int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
|
||||
print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
|
||||
}
|
||||
nm->make_not_entrant("OSR invalidation, back branch");
|
||||
nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_back_branch);
|
||||
}
|
||||
}
|
||||
// Fix up next_level if necessary to avoid deopts
|
||||
|
||||
@@ -1750,10 +1750,6 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
|
||||
}
|
||||
}
|
||||
|
||||
void CompileBroker::wait_for_no_active_tasks() {
|
||||
CompileTask::wait_for_no_active_tasks();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize compiler thread(s) + compiler object(s). The postcondition
|
||||
* of this function is that the compiler runtimes are initialized and that
|
||||
|
||||
@@ -383,9 +383,6 @@ public:
|
||||
static bool is_compilation_disabled_forever() {
|
||||
return _should_compile_new_jobs == shutdown_compilation;
|
||||
}
|
||||
|
||||
static void wait_for_no_active_tasks();
|
||||
|
||||
static void handle_full_code_cache(CodeBlobType code_blob_type);
|
||||
// Ensures that warning is only printed once.
|
||||
static bool should_print_compiler_warning() {
|
||||
|
||||
@@ -37,13 +37,12 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
CompileTask* CompileTask::_task_free_list = nullptr;
|
||||
int CompileTask::_active_tasks = 0;
|
||||
|
||||
/**
|
||||
* Allocate a CompileTask, from the free list if possible.
|
||||
*/
|
||||
CompileTask* CompileTask::allocate() {
|
||||
MonitorLocker locker(CompileTaskAlloc_lock);
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
CompileTask* task = nullptr;
|
||||
|
||||
if (_task_free_list != nullptr) {
|
||||
@@ -57,7 +56,6 @@ CompileTask* CompileTask::allocate() {
|
||||
}
|
||||
assert(task->is_free(), "Task must be free.");
|
||||
task->set_is_free(false);
|
||||
_active_tasks++;
|
||||
return task;
|
||||
}
|
||||
|
||||
@@ -65,7 +63,7 @@ CompileTask* CompileTask::allocate() {
|
||||
* Add a task to the free list.
|
||||
*/
|
||||
void CompileTask::free(CompileTask* task) {
|
||||
MonitorLocker locker(CompileTaskAlloc_lock);
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
if (!task->is_free()) {
|
||||
if ((task->_method_holder != nullptr && JNIHandles::is_weak_global_handle(task->_method_holder))) {
|
||||
JNIHandles::destroy_weak_global(task->_method_holder);
|
||||
@@ -81,17 +79,6 @@ void CompileTask::free(CompileTask* task) {
|
||||
task->set_is_free(true);
|
||||
task->set_next(_task_free_list);
|
||||
_task_free_list = task;
|
||||
_active_tasks--;
|
||||
if (_active_tasks == 0) {
|
||||
locker.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTask::wait_for_no_active_tasks() {
|
||||
MonitorLocker locker(CompileTaskAlloc_lock);
|
||||
while (_active_tasks > 0) {
|
||||
locker.wait();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -83,7 +83,6 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
|
||||
private:
|
||||
static CompileTask* _task_free_list;
|
||||
static int _active_tasks;
|
||||
int _compile_id;
|
||||
Method* _method;
|
||||
jobject _method_holder;
|
||||
@@ -124,7 +123,6 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
|
||||
static CompileTask* allocate();
|
||||
static void free(CompileTask* task);
|
||||
static void wait_for_no_active_tasks();
|
||||
|
||||
int compile_id() const { return _compile_id; }
|
||||
Method* method() const { return _method; }
|
||||
|
||||
@@ -98,15 +98,15 @@ void ParallelArguments::initialize() {
|
||||
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
|
||||
}
|
||||
|
||||
// The alignment used for boundary between young gen and old gen
|
||||
static size_t default_gen_alignment() {
|
||||
// The alignment used for spaces in young gen and old gen
|
||||
static size_t default_space_alignment() {
|
||||
return 64 * K * HeapWordSize;
|
||||
}
|
||||
|
||||
void ParallelArguments::initialize_alignments() {
|
||||
// Initialize card size before initializing alignments
|
||||
CardTable::initialize_card_size();
|
||||
SpaceAlignment = GenAlignment = default_gen_alignment();
|
||||
SpaceAlignment = default_space_alignment();
|
||||
HeapAlignment = compute_heap_alignment();
|
||||
}
|
||||
|
||||
@@ -123,9 +123,8 @@ void ParallelArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Can a page size be something else than a power of two?
|
||||
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
|
||||
size_t new_alignment = align_up(page_sz, GenAlignment);
|
||||
if (new_alignment != GenAlignment) {
|
||||
GenAlignment = new_alignment;
|
||||
size_t new_alignment = align_up(page_sz, SpaceAlignment);
|
||||
if (new_alignment != SpaceAlignment) {
|
||||
SpaceAlignment = new_alignment;
|
||||
// Redo everything from the start
|
||||
initialize_heap_flags_and_sizes_one_pass();
|
||||
|
||||
@@ -29,10 +29,8 @@
|
||||
void ParallelInitLogger::print_heap() {
|
||||
log_info_p(gc, init)("Alignments:"
|
||||
" Space " EXACTFMT ","
|
||||
" Generation " EXACTFMT ","
|
||||
" Heap " EXACTFMT,
|
||||
EXACTFMTARGS(SpaceAlignment),
|
||||
EXACTFMTARGS(GenAlignment),
|
||||
EXACTFMTARGS(HeapAlignment));
|
||||
GCInitLogger::print_heap();
|
||||
}
|
||||
|
||||
@@ -69,8 +69,8 @@ jint ParallelScavengeHeap::initialize() {
|
||||
|
||||
initialize_reserved_region(heap_rs);
|
||||
// Layout the reserved space for the generations.
|
||||
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, GenAlignment);
|
||||
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment);
|
||||
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, SpaceAlignment);
|
||||
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
|
||||
assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
|
||||
|
||||
PSCardTable* card_table = new PSCardTable(_reserved);
|
||||
@@ -107,7 +107,7 @@ jint ParallelScavengeHeap::initialize() {
|
||||
new PSAdaptiveSizePolicy(eden_capacity,
|
||||
initial_promo_size,
|
||||
young_gen()->to_space()->capacity_in_bytes(),
|
||||
GenAlignment,
|
||||
SpaceAlignment,
|
||||
max_gc_pause_sec,
|
||||
GCTimeRatio
|
||||
);
|
||||
|
||||
@@ -41,7 +41,7 @@ PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,
|
||||
_min_gen_size(min_size),
|
||||
_max_gen_size(max_size)
|
||||
{
|
||||
initialize(rs, initial_size, GenAlignment);
|
||||
initialize(rs, initial_size, SpaceAlignment);
|
||||
}
|
||||
|
||||
void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment) {
|
||||
|
||||
@@ -47,7 +47,7 @@ PSYoungGen::PSYoungGen(ReservedSpace rs, size_t initial_size, size_t min_size, s
|
||||
_from_counters(nullptr),
|
||||
_to_counters(nullptr)
|
||||
{
|
||||
initialize(rs, initial_size, GenAlignment);
|
||||
initialize(rs, initial_size, SpaceAlignment);
|
||||
}
|
||||
|
||||
void PSYoungGen::initialize_virtual_space(ReservedSpace rs,
|
||||
@@ -746,7 +746,7 @@ size_t PSYoungGen::available_to_live() {
|
||||
}
|
||||
|
||||
size_t delta_in_bytes = unused_committed + delta_in_survivor;
|
||||
delta_in_bytes = align_down(delta_in_bytes, GenAlignment);
|
||||
delta_in_bytes = align_down(delta_in_bytes, SpaceAlignment);
|
||||
return delta_in_bytes;
|
||||
}
|
||||
|
||||
|
||||
@@ -188,8 +188,8 @@ jint SerialHeap::initialize() {
|
||||
|
||||
initialize_reserved_region(heap_rs);
|
||||
|
||||
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, GenAlignment);
|
||||
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, GenAlignment);
|
||||
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, SpaceAlignment);
|
||||
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, SpaceAlignment);
|
||||
|
||||
_rem_set = new CardTableRS(_reserved);
|
||||
_rem_set->initialize(young_rs.base(), old_rs.base());
|
||||
|
||||
@@ -35,7 +35,7 @@ extern size_t SpaceAlignment;
|
||||
|
||||
class GCArguments {
|
||||
protected:
|
||||
// Initialize HeapAlignment, SpaceAlignment, and extra alignments (E.g. GenAlignment)
|
||||
// Initialize HeapAlignment, SpaceAlignment
|
||||
virtual void initialize_alignments() = 0;
|
||||
virtual void initialize_heap_flags_and_sizes();
|
||||
virtual void initialize_size_info();
|
||||
|
||||
@@ -42,17 +42,15 @@ size_t MaxOldSize = 0;
|
||||
// See more in JDK-8346005
|
||||
size_t OldSize = ScaleForWordSize(4*M);
|
||||
|
||||
size_t GenAlignment = 0;
|
||||
|
||||
size_t GenArguments::conservative_max_heap_alignment() { return (size_t)Generation::GenGrain; }
|
||||
|
||||
static size_t young_gen_size_lower_bound() {
|
||||
// The young generation must be aligned and have room for eden + two survivors
|
||||
return align_up(3 * SpaceAlignment, GenAlignment);
|
||||
return 3 * SpaceAlignment;
|
||||
}
|
||||
|
||||
static size_t old_gen_size_lower_bound() {
|
||||
return align_up(SpaceAlignment, GenAlignment);
|
||||
return SpaceAlignment;
|
||||
}
|
||||
|
||||
size_t GenArguments::scale_by_NewRatio_aligned(size_t base_size, size_t alignment) {
|
||||
@@ -69,23 +67,20 @@ static size_t bound_minus_alignment(size_t desired_size,
|
||||
void GenArguments::initialize_alignments() {
|
||||
// Initialize card size before initializing alignments
|
||||
CardTable::initialize_card_size();
|
||||
SpaceAlignment = GenAlignment = (size_t)Generation::GenGrain;
|
||||
SpaceAlignment = (size_t)Generation::GenGrain;
|
||||
HeapAlignment = compute_heap_alignment();
|
||||
}
|
||||
|
||||
void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
GCArguments::initialize_heap_flags_and_sizes();
|
||||
|
||||
assert(GenAlignment != 0, "Generation alignment not set up properly");
|
||||
assert(HeapAlignment >= GenAlignment,
|
||||
"HeapAlignment: %zu less than GenAlignment: %zu",
|
||||
HeapAlignment, GenAlignment);
|
||||
assert(GenAlignment % SpaceAlignment == 0,
|
||||
"GenAlignment: %zu not aligned by SpaceAlignment: %zu",
|
||||
GenAlignment, SpaceAlignment);
|
||||
assert(HeapAlignment % GenAlignment == 0,
|
||||
"HeapAlignment: %zu not aligned by GenAlignment: %zu",
|
||||
HeapAlignment, GenAlignment);
|
||||
assert(SpaceAlignment != 0, "Generation alignment not set up properly");
|
||||
assert(HeapAlignment >= SpaceAlignment,
|
||||
"HeapAlignment: %zu less than SpaceAlignment: %zu",
|
||||
HeapAlignment, SpaceAlignment);
|
||||
assert(HeapAlignment % SpaceAlignment == 0,
|
||||
"HeapAlignment: %zu not aligned by SpaceAlignment: %zu",
|
||||
HeapAlignment, SpaceAlignment);
|
||||
|
||||
// All generational heaps have a young gen; handle those flags here
|
||||
|
||||
@@ -106,7 +101,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Make sure NewSize allows an old generation to fit even if set on the command line
|
||||
if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) {
|
||||
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment);
|
||||
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment);
|
||||
log_warning(gc, ergo)("NewSize (%zuk) is equal to or greater than initial heap size (%zuk). A new "
|
||||
"NewSize of %zuk will be used to accomodate an old generation.",
|
||||
NewSize/K, InitialHeapSize/K, revised_new_size/K);
|
||||
@@ -115,8 +110,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
|
||||
// Now take the actual NewSize into account. We will silently increase NewSize
|
||||
// if the user specified a smaller or unaligned value.
|
||||
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, GenAlignment);
|
||||
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, GenAlignment));
|
||||
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, SpaceAlignment);
|
||||
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, SpaceAlignment));
|
||||
if (bounded_new_size != NewSize) {
|
||||
FLAG_SET_ERGO(NewSize, bounded_new_size);
|
||||
}
|
||||
@@ -125,7 +120,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
|
||||
if (MaxNewSize >= MaxHeapSize) {
|
||||
// Make sure there is room for an old generation
|
||||
size_t smaller_max_new_size = MaxHeapSize - GenAlignment;
|
||||
size_t smaller_max_new_size = MaxHeapSize - SpaceAlignment;
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("MaxNewSize (%zuk) is equal to or greater than the entire "
|
||||
"heap (%zuk). A new max generation size of %zuk will be used.",
|
||||
@@ -137,8 +132,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
}
|
||||
} else if (MaxNewSize < NewSize) {
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
} else if (!is_aligned(MaxNewSize, GenAlignment)) {
|
||||
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, GenAlignment));
|
||||
} else if (!is_aligned(MaxNewSize, SpaceAlignment)) {
|
||||
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, SpaceAlignment));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,13 +161,13 @@ void GenArguments::initialize_heap_flags_and_sizes() {
|
||||
// exceed it. Adjust New/OldSize as necessary.
|
||||
size_t calculated_size = NewSize + OldSize;
|
||||
double shrink_factor = (double) MaxHeapSize / calculated_size;
|
||||
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), GenAlignment);
|
||||
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), SpaceAlignment);
|
||||
FLAG_SET_ERGO(NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
|
||||
|
||||
// OldSize is already aligned because above we aligned MaxHeapSize to
|
||||
// HeapAlignment, and we just made sure that NewSize is aligned to
|
||||
// GenAlignment. In initialize_flags() we verified that HeapAlignment
|
||||
// is a multiple of GenAlignment.
|
||||
// SpaceAlignment. In initialize_flags() we verified that HeapAlignment
|
||||
// is a multiple of SpaceAlignment.
|
||||
OldSize = MaxHeapSize - NewSize;
|
||||
} else {
|
||||
FLAG_SET_ERGO(MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
|
||||
@@ -200,7 +195,7 @@ void GenArguments::initialize_size_info() {
|
||||
// Determine maximum size of the young generation.
|
||||
|
||||
if (FLAG_IS_DEFAULT(MaxNewSize)) {
|
||||
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, GenAlignment);
|
||||
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, SpaceAlignment);
|
||||
// Bound the maximum size by NewSize below (since it historically
|
||||
// would have been NewSize and because the NewRatio calculation could
|
||||
// yield a size that is too small) and bound it by MaxNewSize above.
|
||||
@@ -229,18 +224,18 @@ void GenArguments::initialize_size_info() {
|
||||
// If NewSize is set on the command line, we should use it as
|
||||
// the initial size, but make sure it is within the heap bounds.
|
||||
initial_young_size =
|
||||
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
|
||||
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, GenAlignment);
|
||||
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment));
|
||||
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, SpaceAlignment);
|
||||
} else {
|
||||
// For the case where NewSize is not set on the command line, use
|
||||
// NewRatio to size the initial generation size. Use the current
|
||||
// NewSize as the floor, because if NewRatio is overly large, the resulting
|
||||
// size can be too small.
|
||||
initial_young_size =
|
||||
clamp(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize, max_young_size);
|
||||
clamp(scale_by_NewRatio_aligned(InitialHeapSize, SpaceAlignment), NewSize, max_young_size);
|
||||
|
||||
// Derive MinNewSize from MinHeapSize
|
||||
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, GenAlignment), initial_young_size);
|
||||
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, SpaceAlignment), initial_young_size);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,7 +247,7 @@ void GenArguments::initialize_size_info() {
|
||||
// The maximum old size can be determined from the maximum young
|
||||
// and maximum heap size since no explicit flags exist
|
||||
// for setting the old generation maximum.
|
||||
MaxOldSize = MAX2(MaxHeapSize - max_young_size, GenAlignment);
|
||||
MaxOldSize = MAX2(MaxHeapSize - max_young_size, SpaceAlignment);
|
||||
MinOldSize = MIN3(MaxOldSize,
|
||||
InitialHeapSize - initial_young_size,
|
||||
MinHeapSize - MinNewSize);
|
||||
@@ -315,10 +310,10 @@ void GenArguments::assert_flags() {
|
||||
assert(NewSize >= MinNewSize, "Ergonomics decided on a too small young gen size");
|
||||
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
|
||||
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
|
||||
assert(NewSize % GenAlignment == 0, "NewSize alignment");
|
||||
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
|
||||
assert(NewSize % SpaceAlignment == 0, "NewSize alignment");
|
||||
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
|
||||
assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
|
||||
assert(OldSize % GenAlignment == 0, "OldSize alignment");
|
||||
assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
|
||||
}
|
||||
|
||||
void GenArguments::assert_size_info() {
|
||||
@@ -327,19 +322,19 @@ void GenArguments::assert_size_info() {
|
||||
assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
|
||||
assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes");
|
||||
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
|
||||
assert(MinNewSize % GenAlignment == 0, "_min_young_size alignment");
|
||||
assert(NewSize % GenAlignment == 0, "_initial_young_size alignment");
|
||||
assert(MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
|
||||
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, GenAlignment),
|
||||
assert(MinNewSize % SpaceAlignment == 0, "_min_young_size alignment");
|
||||
assert(NewSize % SpaceAlignment == 0, "_initial_young_size alignment");
|
||||
assert(MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
|
||||
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, SpaceAlignment),
|
||||
"Ergonomics made minimum young generation larger than minimum heap");
|
||||
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment),
|
||||
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment),
|
||||
"Ergonomics made initial young generation larger than initial heap");
|
||||
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, GenAlignment),
|
||||
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, SpaceAlignment),
|
||||
"Ergonomics made maximum young generation lager than maximum heap");
|
||||
assert(MinOldSize <= OldSize, "Ergonomics decided on incompatible minimum and initial old gen sizes");
|
||||
assert(OldSize <= MaxOldSize, "Ergonomics decided on incompatible initial and maximum old gen sizes");
|
||||
assert(MaxOldSize % GenAlignment == 0, "MaxOldSize alignment");
|
||||
assert(OldSize % GenAlignment == 0, "OldSize alignment");
|
||||
assert(MaxOldSize % SpaceAlignment == 0, "MaxOldSize alignment");
|
||||
assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
|
||||
assert(MaxHeapSize <= (MaxNewSize + MaxOldSize), "Total maximum heap sizes must be sum of generation maximum sizes");
|
||||
assert(MinNewSize + MinOldSize <= MinHeapSize, "Minimum generation sizes exceed minimum heap size");
|
||||
assert(NewSize + OldSize == InitialHeapSize, "Initial generation sizes should match initial heap size");
|
||||
|
||||
@@ -35,8 +35,6 @@ extern size_t MaxOldSize;
|
||||
|
||||
extern size_t OldSize;
|
||||
|
||||
extern size_t GenAlignment;
|
||||
|
||||
class GenArguments : public GCArguments {
|
||||
friend class TestGenCollectorPolicy; // Testing
|
||||
private:
|
||||
|
||||
@@ -625,34 +625,6 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
|
||||
Node* control) {
|
||||
return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
|
||||
phase->ctrl_or_self(maybe_load) == control;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
|
||||
if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
|
||||
return;
|
||||
}
|
||||
Node* mem = maybe_store->in(MemNode::Memory);
|
||||
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = mem->fast_out(i);
|
||||
if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
|
||||
wq.push(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
|
||||
wq.push(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
|
||||
// That both nodes have the same control is not sufficient to prove
|
||||
// domination, verify that there's no path from d to n
|
||||
@@ -667,9 +639,22 @@ bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node*
|
||||
if (m->is_Phi() && m->in(0)->is_Loop()) {
|
||||
assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
|
||||
} else {
|
||||
// Take anti-dependencies into account
|
||||
maybe_push_anti_dependent_loads(phase, m, c, wq);
|
||||
push_data_inputs_at_control(phase, m, c, wq);
|
||||
if (m->is_Store() || m->is_LoadStore()) {
|
||||
// Take anti-dependencies into account
|
||||
Node* mem = m->in(MemNode::Memory);
|
||||
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = mem->fast_out(i);
|
||||
if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
|
||||
phase->ctrl_or_self(u) == c) {
|
||||
wq.push(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (uint i = 0; i < m->req(); i++) {
|
||||
if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
|
||||
wq.push(m->in(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
@@ -1021,20 +1006,7 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
|
||||
phase->register_new_node(val, ctrl);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
|
||||
nodes_above_barrier.clear();
|
||||
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
|
||||
nodes_above_barrier.push(init_raw_mem);
|
||||
}
|
||||
for (uint next = 0; next < nodes_above_barrier.size(); next++) {
|
||||
Node* n = nodes_above_barrier.at(next);
|
||||
// Take anti-dependencies into account
|
||||
maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
|
||||
push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
|
||||
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
|
||||
Node* ctrl = phase->get_ctrl(barrier);
|
||||
Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
|
||||
|
||||
@@ -1045,17 +1017,30 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
|
||||
// control will be after the expanded barrier. The raw memory (if
|
||||
// its memory is control dependent on the barrier's input control)
|
||||
// must stay above the barrier.
|
||||
collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
|
||||
uses_to_ignore.clear();
|
||||
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
|
||||
uses_to_ignore.push(init_raw_mem);
|
||||
}
|
||||
for (uint next = 0; next < uses_to_ignore.size(); next++) {
|
||||
Node *n = uses_to_ignore.at(next);
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
|
||||
uses_to_ignore.push(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = ctrl->fast_out(i);
|
||||
if (u->_idx < last &&
|
||||
u != barrier &&
|
||||
!u->depends_only_on_test() && // preserve dependency on test
|
||||
!nodes_above_barrier.member(u) &&
|
||||
!uses_to_ignore.member(u) &&
|
||||
(u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
|
||||
(ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
|
||||
Node* old_c = phase->ctrl_or_self(u);
|
||||
if (old_c != ctrl ||
|
||||
Node* c = old_c;
|
||||
if (c != ctrl ||
|
||||
is_dominator_same_ctrl(old_c, barrier, u, phase) ||
|
||||
ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
|
||||
phase->igvn().rehash_node_delayed(u);
|
||||
@@ -1330,7 +1315,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
|
||||
// Expand load-reference-barriers
|
||||
MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
|
||||
Unique_Node_List nodes_above_barriers;
|
||||
Unique_Node_List uses_to_ignore;
|
||||
for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
|
||||
ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
|
||||
uint last = phase->C->unique();
|
||||
@@ -1425,7 +1410,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* out_val = val_phi;
|
||||
phase->register_new_node(val_phi, region);
|
||||
|
||||
fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
|
||||
fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
|
||||
|
||||
ctrl = orig_ctrl;
|
||||
|
||||
|
||||
@@ -62,12 +62,8 @@ private:
|
||||
PhaseIdealLoop* phase, int flags);
|
||||
static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
|
||||
DecoratorSet decorators, PhaseIdealLoop* phase);
|
||||
|
||||
static void collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl,
|
||||
Node* init_raw_mem);
|
||||
|
||||
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
|
||||
|
||||
static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb);
|
||||
public:
|
||||
@@ -80,11 +76,6 @@ public:
|
||||
static bool expand(Compile* C, PhaseIterGVN& igvn);
|
||||
static void pin_and_expand(PhaseIdealLoop* phase);
|
||||
|
||||
static void push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl,
|
||||
Unique_Node_List &wq);
|
||||
static bool is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store, Node* control);
|
||||
|
||||
static void maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq);
|
||||
#ifdef ASSERT
|
||||
static void verify(RootNode* root);
|
||||
#endif
|
||||
|
||||
@@ -415,6 +415,10 @@ void ShenandoahConcurrentGC::entry_reset() {
|
||||
msg);
|
||||
op_reset();
|
||||
}
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
heap->old_generation()->card_scan()->mark_read_table_as_clean();
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentGC::entry_scan_remembered_set() {
|
||||
@@ -640,10 +644,6 @@ void ShenandoahConcurrentGC::op_reset() {
|
||||
} else {
|
||||
_generation->prepare_gc();
|
||||
}
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
heap->old_generation()->card_scan()->mark_read_table_as_clean();
|
||||
}
|
||||
}
|
||||
|
||||
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
|
||||
|
||||
@@ -136,15 +136,9 @@ void ShenandoahDegenGC::op_degenerated() {
|
||||
heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
|
||||
(!heap->mode()->is_generational() || _generation->is_global()));
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
// Clean the read table before swapping it. The end goal here is to have a clean
|
||||
// write table, and to have the read table updated with the previous write table.
|
||||
heap->old_generation()->card_scan()->mark_read_table_as_clean();
|
||||
|
||||
if (_generation->is_young()) {
|
||||
// Swap remembered sets for young
|
||||
_generation->swap_card_tables();
|
||||
}
|
||||
if (heap->mode()->is_generational() && _generation->is_young()) {
|
||||
// Swap remembered sets for young
|
||||
_generation->swap_card_tables();
|
||||
}
|
||||
|
||||
case _degenerated_roots:
|
||||
|
||||
@@ -183,29 +183,6 @@ void ShenandoahGenerationalHeap::stop() {
|
||||
regulator_thread()->stop();
|
||||
}
|
||||
|
||||
bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
|
||||
if (is_idle()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
|
||||
// We are marking young, this object is in young, and it is below the TAMS
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_in_old(obj)) {
|
||||
// Card marking barriers are required for objects in the old generation
|
||||
return true;
|
||||
}
|
||||
|
||||
if (has_forwarded_objects()) {
|
||||
// Object may have pointers that need to be updated
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
|
||||
ShenandoahRegionIterator regions;
|
||||
ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, false /* only promote regions */);
|
||||
|
||||
@@ -128,8 +128,6 @@ public:
|
||||
|
||||
void stop() override;
|
||||
|
||||
bool requires_barriers(stackChunkOop obj) const override;
|
||||
|
||||
// Used for logging the result of a region transfer outside the heap lock
|
||||
struct TransferResult {
|
||||
bool success;
|
||||
|
||||
@@ -1452,23 +1452,27 @@ void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
|
||||
size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
|
||||
assert(start->is_humongous_start(), "reclaim regions starting with the first one");
|
||||
|
||||
oop humongous_obj = cast_to_oop(start->bottom());
|
||||
size_t size = humongous_obj->size();
|
||||
size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
|
||||
size_t index = start->index() + required_regions - 1;
|
||||
|
||||
assert(!start->has_live(), "liveness must be zero");
|
||||
|
||||
// Do not try to get the size of this humongous object. STW collections will
|
||||
// have already unloaded classes, so an unmarked object may have a bad klass pointer.
|
||||
ShenandoahHeapRegion* region = start;
|
||||
size_t index = region->index();
|
||||
do {
|
||||
assert(region->is_humongous(), "Expect correct humongous start or continuation");
|
||||
assert(!region->is_cset(), "Humongous region should not be in collection set");
|
||||
region->make_trash_immediate();
|
||||
region = get_region(++index);
|
||||
} while (region != nullptr && region->is_humongous_continuation());
|
||||
for(size_t i = 0; i < required_regions; i++) {
|
||||
// Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
|
||||
// as it expects that every region belongs to a humongous region starting with a humongous start region.
|
||||
ShenandoahHeapRegion* region = get_region(index --);
|
||||
|
||||
// Return number of regions trashed
|
||||
return index - start->index();
|
||||
assert(region->is_humongous(), "expect correct humongous start or continuation");
|
||||
assert(!region->is_cset(), "Humongous region should not be in collection set");
|
||||
|
||||
region->make_trash_immediate();
|
||||
}
|
||||
return required_regions;
|
||||
}
|
||||
|
||||
class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
|
||||
|
||||
@@ -828,7 +828,7 @@ public:
|
||||
static inline void atomic_clear_oop(narrowOop* addr, oop compare);
|
||||
static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
|
||||
|
||||
size_t trash_humongous_region_at(ShenandoahHeapRegion *r) const;
|
||||
size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
|
||||
|
||||
static inline void increase_object_age(oop obj, uint additional_age);
|
||||
|
||||
|
||||
@@ -624,7 +624,7 @@ void ShenandoahDirectCardMarkRememberedSet::swap_card_tables() {
|
||||
|
||||
#ifdef ASSERT
|
||||
CardValue* start_bp = &(_card_table->write_byte_map())[0];
|
||||
CardValue* end_bp = &(start_bp[_card_table->last_valid_index()]);
|
||||
CardValue* end_bp = &(new_ptr)[_card_table->last_valid_index()];
|
||||
|
||||
while (start_bp <= end_bp) {
|
||||
assert(*start_bp == CardTable::clean_card_val(), "Should be clean: " PTR_FORMAT, p2i(start_bp));
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
|
||||
#include "gc/z/zAllocator.hpp"
|
||||
#include "gc/z/zObjectAllocator.hpp"
|
||||
#include "gc/z/zPageAge.inline.hpp"
|
||||
|
||||
ZAllocatorEden* ZAllocator::_eden;
|
||||
ZAllocatorForRelocation* ZAllocator::_relocation[ZAllocator::_relocation_allocators];
|
||||
@@ -47,7 +48,7 @@ ZPageAge ZAllocatorForRelocation::install() {
|
||||
for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
|
||||
if (_relocation[i] == nullptr) {
|
||||
_relocation[i] = this;
|
||||
return static_cast<ZPageAge>(i + 1);
|
||||
return to_zpageage(i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,7 +35,7 @@ class ZPage;
|
||||
|
||||
class ZAllocator {
|
||||
public:
|
||||
static constexpr uint _relocation_allocators = static_cast<uint>(ZPageAge::old);
|
||||
static constexpr uint _relocation_allocators = ZPageAgeCount - 1;
|
||||
|
||||
protected:
|
||||
ZObjectAllocator _object_allocator;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user