mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-14 05:19:45 +01:00
Compare commits
153 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
08b1fa4cb3 | ||
|
|
23e1e2ff4a | ||
|
|
5a62e99523 | ||
|
|
982064e50c | ||
|
|
b52af182c4 | ||
|
|
7bc0d82450 | ||
|
|
b7fcd0b235 | ||
|
|
984d7f9cdf | ||
|
|
42d3604a31 | ||
|
|
cf78925859 | ||
|
|
ba32b78bfa | ||
|
|
547ce03016 | ||
|
|
f07f5ce984 | ||
|
|
cabd7c1f7a | ||
|
|
57266064a7 | ||
|
|
2b94b70ef5 | ||
|
|
1130c1bc33 | ||
|
|
2f63d3aee5 | ||
|
|
382f870cd5 | ||
|
|
8c760e78b9 | ||
|
|
afa52e4681 | ||
|
|
164cae469c | ||
|
|
49a82d8806 | ||
|
|
96070212ad | ||
|
|
53a83d15a1 | ||
|
|
21b72dea78 | ||
|
|
51877f568b | ||
|
|
c1deb9eebf | ||
|
|
f62f1178aa | ||
|
|
a08208283b | ||
|
|
f7cd3fad24 | ||
|
|
ff75f763c0 | ||
|
|
a16d23557b | ||
|
|
e55ddabffa | ||
|
|
9a1c1f2efb | ||
|
|
e57a214e2a | ||
|
|
2f2acb2e3f | ||
|
|
06d804a0f0 | ||
|
|
6e390ef17c | ||
|
|
9652ae9a8d | ||
|
|
59460ff700 | ||
|
|
9d060574e5 | ||
|
|
fedd0a0ee3 | ||
|
|
79497ef7f5 | ||
|
|
8416ca3104 | ||
|
|
d8c3533a91 | ||
|
|
eacfcd86d3 | ||
|
|
534a8605e5 | ||
|
|
6fe9143bbb | ||
|
|
1a01839f8c | ||
|
|
26848a7d6c | ||
|
|
0e725c6fb1 | ||
|
|
b3f56086c9 | ||
|
|
ee35f6384f | ||
|
|
12a0dd03b8 | ||
|
|
366650a438 | ||
|
|
78b1360e7d | ||
|
|
417f8ecf07 | ||
|
|
57cabc6d74 | ||
|
|
b4c4496ef8 | ||
|
|
b5334fe237 | ||
|
|
e8ef93ae9d | ||
|
|
25b22c9b55 | ||
|
|
ead4529c92 | ||
|
|
3a1887269b | ||
|
|
e7f63ba310 | ||
|
|
a0fb35c837 | ||
|
|
032ead1d90 | ||
|
|
a8b4284848 | ||
|
|
ed39e17e34 | ||
|
|
6749c62b9e | ||
|
|
9aeacf2de5 | ||
|
|
991097b7bf | ||
|
|
523a4efe1c | ||
|
|
0dd7c69b9e | ||
|
|
66535fe26d | ||
|
|
db7af2b3c3 | ||
|
|
99829950f6 | ||
|
|
0ef0986731 | ||
|
|
610a18e7b3 | ||
|
|
8d33ea7395 | ||
|
|
3c53057fa6 | ||
|
|
1fcede053c | ||
|
|
fae9c7a3f0 | ||
|
|
dd68829017 | ||
|
|
b85fe02be5 | ||
|
|
e18277b470 | ||
|
|
e5ce5c57c8 | ||
|
|
91fdd72c97 | ||
|
|
b6ec93b038 | ||
|
|
65e63b6ab4 | ||
|
|
3f0fef2c9c | ||
|
|
3e0ef832cc | ||
|
|
7b7136b4ec | ||
|
|
5886ef728f | ||
|
|
d7aa349820 | ||
|
|
3b32f6a8ec | ||
|
|
8f73357004 | ||
|
|
429158218b | ||
|
|
ef4cbec6fb | ||
|
|
e9216efefc | ||
|
|
e5196fc24d | ||
|
|
c98dffa186 | ||
|
|
7d7fc69355 | ||
|
|
42ab8fcfb9 | ||
|
|
bf7d40d048 | ||
|
|
5ae32c4c86 | ||
|
|
56ce70c5df | ||
|
|
abc76c6b5b | ||
|
|
9586817cea | ||
|
|
38b877e941 | ||
|
|
8f487d26c0 | ||
|
|
500a3a2d0a | ||
|
|
a2f99fd88b | ||
|
|
0582bd290d | ||
|
|
3ff83ec49e | ||
|
|
7c9c8ba363 | ||
|
|
ca7b885873 | ||
|
|
92be7821f5 | ||
|
|
bcf860703d | ||
|
|
d186dacdb7 | ||
|
|
ef45c8154c | ||
|
|
cd9b1bc820 | ||
|
|
fcb68ea22d | ||
|
|
eb256deb80 | ||
|
|
156187accc | ||
|
|
a377773fa7 | ||
|
|
cae1fd3385 | ||
|
|
eb8ee8bdc7 | ||
|
|
2103dc15cb | ||
|
|
1c72b350e4 | ||
|
|
52338c94f6 | ||
|
|
91f12600d2 | ||
|
|
6c616c71ec | ||
|
|
e94ad551c6 | ||
|
|
d735255919 | ||
|
|
d024f58e61 | ||
|
|
026975a1aa | ||
|
|
8adb052b46 | ||
|
|
9658cecde3 | ||
|
|
b2e7cda6a0 | ||
|
|
65fda5c02a | ||
|
|
d1b788005b | ||
|
|
bb2611ad43 | ||
|
|
e918a59b1d | ||
|
|
28acca609b | ||
|
|
029e3bf8f5 | ||
|
|
78158f30ae | ||
|
|
c793de989f | ||
|
|
15178aa298 | ||
|
|
fe3be498b8 | ||
|
|
62fde68708 | ||
|
|
af87035b71 |
@@ -1,7 +1,7 @@
|
||||
[general]
|
||||
project=jdk
|
||||
jbs=JDK
|
||||
version=25
|
||||
version=26
|
||||
|
||||
[checks]
|
||||
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright
|
||||
|
||||
4
make/autoconf/configure
vendored
4
make/autoconf/configure
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -366,7 +366,7 @@ EOT
|
||||
|
||||
# Print additional help, e.g. a list of toolchains and JVM features.
|
||||
# This must be done by the autoconf script.
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf )
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
|
||||
|
||||
cat <<EOT
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
################################################################################
|
||||
|
||||
# Minimum supported versions
|
||||
JTREG_MINIMUM_VERSION=7.5.1
|
||||
JTREG_MINIMUM_VERSION=7.5.2
|
||||
GTEST_MINIMUM_VERSION=1.14.0
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
# Versions and download locations for dependencies used by GitHub Actions (GHA)
|
||||
|
||||
GTEST_VERSION=1.14.0
|
||||
JTREG_VERSION=7.5.1+1
|
||||
JTREG_VERSION=7.5.2+1
|
||||
|
||||
LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz
|
||||
|
||||
@@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
jtreg: {
|
||||
server: "jpg",
|
||||
product: "jtreg",
|
||||
version: "7.5.1",
|
||||
version: "7.5.2",
|
||||
build_number: "1",
|
||||
file: "bundles/jtreg-7.5.1+1.zip",
|
||||
file: "bundles/jtreg-7.5.2+1.zip",
|
||||
environment_name: "JT_HOME",
|
||||
environment_path: input.get("jtreg", "home_path") + "/bin",
|
||||
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
|
||||
@@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
server: "jpg",
|
||||
product: "jcov",
|
||||
version: "3.0",
|
||||
build_number: "1",
|
||||
file: "bundles/jcov-3.0+1.zip",
|
||||
build_number: "3",
|
||||
file: "bundles/jcov-3.0+3.zip",
|
||||
environment_name: "JCOV_HOME",
|
||||
},
|
||||
|
||||
|
||||
@@ -26,17 +26,17 @@
|
||||
# Default version, product, and vendor information to use,
|
||||
# unless overridden by configure
|
||||
|
||||
DEFAULT_VERSION_FEATURE=25
|
||||
DEFAULT_VERSION_FEATURE=26
|
||||
DEFAULT_VERSION_INTERIM=0
|
||||
DEFAULT_VERSION_UPDATE=0
|
||||
DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_EXTRA1=0
|
||||
DEFAULT_VERSION_EXTRA2=0
|
||||
DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2025-09-16
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_DATE=2026-03-17
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=25
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25 26"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
|
||||
@@ -542,10 +542,10 @@ class Bundle {
|
||||
if (pattern != null) {
|
||||
// Perform date-time format pattern conversion which is
|
||||
// applicable to both SimpleDateFormat and j.t.f.DateTimeFormatter.
|
||||
String transPattern = translateDateFormatLetters(calendarType, pattern, this::convertDateTimePatternLetter);
|
||||
String transPattern = translateDateFormatLetters(calendarType, key, pattern, this::convertDateTimePatternLetter);
|
||||
dateTimePatterns.add(i, transPattern);
|
||||
// Additionally, perform SDF specific date-time format pattern conversion
|
||||
sdfPatterns.add(i, translateDateFormatLetters(calendarType, transPattern, this::convertSDFLetter));
|
||||
sdfPatterns.add(i, translateDateFormatLetters(calendarType, key, transPattern, this::convertSDFLetter));
|
||||
} else {
|
||||
dateTimePatterns.add(i, null);
|
||||
sdfPatterns.add(i, null);
|
||||
@@ -568,7 +568,7 @@ class Bundle {
|
||||
}
|
||||
}
|
||||
|
||||
private String translateDateFormatLetters(CalendarType calendarType, String cldrFormat, ConvertDateTimeLetters converter) {
|
||||
private String translateDateFormatLetters(CalendarType calendarType, String patternKey, String cldrFormat, ConvertDateTimeLetters converter) {
|
||||
String pattern = cldrFormat;
|
||||
int length = pattern.length();
|
||||
boolean inQuote = false;
|
||||
@@ -587,7 +587,7 @@ class Bundle {
|
||||
if (nextc == '\'') {
|
||||
i++;
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -597,7 +597,7 @@ class Bundle {
|
||||
}
|
||||
if (!inQuote) {
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -614,7 +614,7 @@ class Bundle {
|
||||
}
|
||||
if (!(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z')) {
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -627,7 +627,7 @@ class Bundle {
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = c;
|
||||
count = 1;
|
||||
}
|
||||
@@ -637,7 +637,7 @@ class Bundle {
|
||||
}
|
||||
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
}
|
||||
if (cldrFormat.contentEquals(jrePattern)) {
|
||||
return cldrFormat;
|
||||
@@ -661,7 +661,7 @@ class Bundle {
|
||||
* on the support given by the SimpleDateFormat and the j.t.f.DateTimeFormatter
|
||||
* for date-time formatting.
|
||||
*/
|
||||
private void convertDateTimePatternLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
|
||||
private void convertDateTimePatternLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
|
||||
switch (cldrLetter) {
|
||||
case 'u':
|
||||
case 'U':
|
||||
@@ -683,7 +683,7 @@ class Bundle {
|
||||
* Perform a conversion of CLDR date-time format pattern letter which is
|
||||
* specific to the SimpleDateFormat.
|
||||
*/
|
||||
private void convertSDFLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
|
||||
private void convertSDFLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
|
||||
switch (cldrLetter) {
|
||||
case 'G':
|
||||
if (calendarType != CalendarType.GREGORIAN) {
|
||||
@@ -722,6 +722,17 @@ class Bundle {
|
||||
appendN('z', count, sb);
|
||||
break;
|
||||
|
||||
case 'y':
|
||||
// If the style is FULL/LONG for a Japanese Calendar, make the
|
||||
// count == 4 for Gan-nen
|
||||
if (calendarType == CalendarType.JAPANESE &&
|
||||
(patternKey.contains("full-") ||
|
||||
patternKey.contains("long-"))) {
|
||||
count = 4;
|
||||
}
|
||||
appendN(cldrLetter, count, sb);
|
||||
break;
|
||||
|
||||
case 'Z':
|
||||
if (count == 4 || count == 5) {
|
||||
sb.append("XXX");
|
||||
@@ -767,6 +778,7 @@ class Bundle {
|
||||
.collect(Collectors.toMap(
|
||||
e -> calendarPrefix + e.getKey(),
|
||||
e -> translateDateFormatLetters(calendarType,
|
||||
e.getKey(),
|
||||
(String)e.getValue(),
|
||||
this::convertDateTimePatternLetter)
|
||||
))
|
||||
@@ -775,7 +787,7 @@ class Bundle {
|
||||
|
||||
@FunctionalInterface
|
||||
private interface ConvertDateTimeLetters {
|
||||
void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb);
|
||||
void convert(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -46,6 +46,8 @@ CLDR_GEN_DONE := $(GENSRC_DIR)/_cldr-gensrc.marker
|
||||
TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata
|
||||
ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template
|
||||
|
||||
# The `-utf8` option is used even for US English, as some names
|
||||
# may contain non-ASCII characters, such as “Türkiye”.
|
||||
$(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \
|
||||
@@ -61,7 +63,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-basemodule \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-zntempfile $(ZONENAME_TEMPLATE) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -45,7 +45,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-baselocales "en-US" \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-o $(GENSRC_DIR) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -62,7 +62,7 @@ BUILD_JDK_JTREG_LIBRARIES_JDK_LIBS_libGetXSpace := java.base:libjava
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
BUILD_JDK_JTREG_EXCLUDE += libDirectIO.c libInheritedChannel.c \
|
||||
libExplicitAttach.c libImplicitAttach.c \
|
||||
exelauncher.c
|
||||
exelauncher.c libFDLeaker.c exeFDLeakTester.c
|
||||
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeNullCallerTest := $(LIBCXX)
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exerevokeall := advapi32.lib
|
||||
|
||||
@@ -187,22 +187,18 @@ public class HelloWorld {
|
||||
new Run("none", "Hello from Cupertino")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u53F0\u5317\u554F\u5019\u60A8\u0021")
|
||||
new Run("none", "台北問候您!")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0391\u03B8\u03B7\u03BD\u03B1\u03B9\u0020" // Greek
|
||||
+ "\u03B1\u03C3\u03C0\u03B1\u03B6\u03BF\u03BD"
|
||||
+ "\u03C4\u03B1\u03B9\u0020\u03C5\u03BC\u03B1"
|
||||
+ "\u03C2\u0021")
|
||||
new Run("none", "Αθηναι ασπαζονται υμας!") // Greek
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u6771\u4eac\u304b\u3089\u4eca\u65e5\u306f")
|
||||
new Run("none", "東京から今日は")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u05e9\u05dc\u05d5\u05dd \u05de\u05d9\u05e8\u05d5"
|
||||
+ "\u05e9\u05dc\u05d9\u05dd")
|
||||
new Run("none", "שלום מירושלים")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0633\u0644\u0627\u0645")
|
||||
new Run("none", "سلام")
|
||||
}), };
|
||||
}
|
||||
|
||||
@@ -1888,7 +1888,7 @@ void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
||||
code_stub = &stub->entry();
|
||||
}
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3921,6 +3921,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -483,7 +483,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||
|
||||
code_stub->set_safepoint_offset(__ offset());
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
|
||||
__ ret(lr);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -43,15 +43,15 @@ define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -51,8 +51,8 @@ define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
@@ -69,12 +69,12 @@ define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 8);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, false);
|
||||
|
||||
@@ -90,13 +90,15 @@ void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
|
||||
#ifdef ASSERT
|
||||
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
|
||||
NativeJump* jump = MacroAssembler::codestub_branch_needs_far_jump()
|
||||
? nativeGeneralJump_at(method_holder->next_instruction_address())
|
||||
: nativeJump_at(method_holder->next_instruction_address());
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
#endif
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry);
|
||||
MacroAssembler::pd_patch_instruction(method_holder->next_instruction_address(), entry);
|
||||
ICache::invalidate_range(stub, to_interp_stub_size());
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
|
||||
@@ -289,7 +289,7 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
|
||||
__ verify_sve_vector_length(tmp1);
|
||||
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */, tmp1);
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, false /* in_nmethod */, tmp1);
|
||||
|
||||
__ ldrw(tmp1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(tmp1, L_safepoint_poll_slow_path);
|
||||
|
||||
@@ -106,6 +106,13 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks, as long as
|
||||
// legitimize_address() does not require a preceding lea instruction to
|
||||
// materialize the memory operand. The absence of a preceding lea instruction
|
||||
// is guaranteed for immLoffset8 memory operands, because these do not lead to
|
||||
// out-of-range offsets (see definition of immLoffset8). Fortunately,
|
||||
// immLoffset8 memory operands are the most common ones in practice.
|
||||
ins_is_late_expanded_null_check_candidate(opnd_array(1)->opcode() == INDOFFL8);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
@@ -117,7 +124,11 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
// Fix up any out-of-range offsets.
|
||||
assert_different_registers(rscratch2, as_Register($mem$$base));
|
||||
assert_different_registers(rscratch2, $dst$$Register);
|
||||
ref_addr = __ legitimize_address(ref_addr, 8, rscratch2);
|
||||
int size = 8;
|
||||
assert(!this->is_late_expanded_null_check_candidate() ||
|
||||
!MacroAssembler::legitimize_address_requires_lea(ref_addr, size),
|
||||
"an instruction that can be used for implicit null checking should emit the candidate memory access first");
|
||||
ref_addr = __ legitimize_address(ref_addr, size, rscratch2);
|
||||
}
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
|
||||
|
||||
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
|
||||
@@ -603,7 +603,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// the stack, will call InterpreterRuntime::at_unwind.
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
br(Assembler::AL, fast_path);
|
||||
bind(slow_path);
|
||||
push(state);
|
||||
|
||||
@@ -553,13 +553,8 @@ address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned
|
||||
return MacroAssembler::target_addr_for_insn(insn_addr, insn);
|
||||
}
|
||||
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) {
|
||||
if (acquire) {
|
||||
lea(tmp, Address(rthread, JavaThread::polling_word_offset()));
|
||||
ldar(tmp, tmp);
|
||||
} else {
|
||||
ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
|
||||
}
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
|
||||
ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
|
||||
if (at_return) {
|
||||
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
|
||||
// we may safely use the sp instead to perform the stack watermark check.
|
||||
@@ -989,11 +984,19 @@ void MacroAssembler::emit_static_call_stub() {
|
||||
mov_metadata(rmethod, nullptr);
|
||||
|
||||
// Jump to the entry point of the c2i stub.
|
||||
movptr(rscratch1, 0);
|
||||
br(rscratch1);
|
||||
if (codestub_branch_needs_far_jump()) {
|
||||
movptr(rscratch1, 0);
|
||||
br(rscratch1);
|
||||
} else {
|
||||
b(pc());
|
||||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::static_call_stub_size() {
|
||||
if (!codestub_branch_needs_far_jump()) {
|
||||
// isb; movk; movz; movz; b
|
||||
return 5 * NativeInstruction::instruction_size;
|
||||
}
|
||||
// isb; movk; movz; movz; movk; movz; movz; br
|
||||
return 8 * NativeInstruction::instruction_size;
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ class MacroAssembler: public Assembler {
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp = rscratch1);
|
||||
void rt_call(address dest, Register tmp = rscratch1);
|
||||
|
||||
// Load Effective Address
|
||||
@@ -129,16 +129,21 @@ class MacroAssembler: public Assembler {
|
||||
a.lea(this, r);
|
||||
}
|
||||
|
||||
// Whether materializing the given address for a LDR/STR requires an
|
||||
// additional lea instruction.
|
||||
static bool legitimize_address_requires_lea(const Address &a, int size) {
|
||||
return a.getMode() == Address::base_plus_offset &&
|
||||
!Address::offset_ok_for_immed(a.offset(), exact_log2(size));
|
||||
}
|
||||
|
||||
/* Sometimes we get misaligned loads and stores, usually from Unsafe
|
||||
accesses, and these can exceed the offset range. */
|
||||
Address legitimize_address(const Address &a, int size, Register scratch) {
|
||||
if (a.getMode() == Address::base_plus_offset) {
|
||||
if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
if (legitimize_address_requires_lea(a, size)) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
@@ -386,18 +386,6 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||
|
||||
void NativeGeneralJump::verify() { }
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
|
||||
|
||||
CodeBuffer cb(code_pos, instruction_size);
|
||||
MacroAssembler a(&cb);
|
||||
|
||||
a.movptr(rscratch1, (uintptr_t)entry);
|
||||
a.br(rscratch1);
|
||||
|
||||
ICache::invalidate_range(code_pos, instruction_size);
|
||||
}
|
||||
|
||||
// MT-safe patching of a long jump instruction.
|
||||
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
ShouldNotCallThis();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -383,7 +383,6 @@ public:
|
||||
address jump_destination() const;
|
||||
void set_jump_destination(address dest);
|
||||
|
||||
static void insert_unconditional(address code_pos, address entry);
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer);
|
||||
static void verify();
|
||||
};
|
||||
|
||||
@@ -1877,7 +1877,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Check for safepoint operation in progress and/or pending suspend requests.
|
||||
{
|
||||
// No need for acquire as Java threads always disarm themselves.
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||
__ bind(safepoint_in_progress_done);
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
#define CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -11653,6 +11653,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
};
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for aarch64
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generate initial stubs and initializes the entry points
|
||||
|
||||
@@ -11898,6 +11902,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1018,7 +1018,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* in_nmethod */);
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
@@ -1065,7 +1065,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* in_nmethod */);
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
@@ -1455,7 +1455,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
Label L, Continue;
|
||||
|
||||
// No need for acquire as Java threads always disarm themselves.
|
||||
__ safepoint_poll(L, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(L, true /* at_return */, false /* in_nmethod */);
|
||||
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbz(rscratch2, Continue);
|
||||
__ bind(L);
|
||||
@@ -1608,7 +1608,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
__ br(Assembler::AL, fast_path);
|
||||
__ bind(slow_path);
|
||||
__ push(dtos);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -51,7 +51,7 @@ define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -94,7 +94,7 @@ define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
|
||||
#endif
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -36,9 +36,9 @@ define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, false); // No need - only few compiler's stubs
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||
#define DEFAULT_STACK_RED_PAGES (1)
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_ARM_STUBDECLARATIONS_HPP
|
||||
#define CPU_ARM_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -3126,6 +3126,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//---------------------------------------------------------------------------
|
||||
// Initialization
|
||||
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for arm
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
@@ -3201,6 +3205,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -44,17 +44,17 @@ define_pd_global(intx, CompileThreshold, 1000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 1400);
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
#endif // !COMPILER2
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -78,17 +78,17 @@ define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, OptoScheduling, false);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, true);
|
||||
|
||||
|
||||
@@ -333,9 +333,9 @@ int SaveLiveRegisters::iterate_over_register_mask(IterationAction action, int of
|
||||
}
|
||||
} else if (vm_reg->is_ConditionRegister()) {
|
||||
// NOP. Conditions registers are covered by save_LR_CR
|
||||
} else if (vm_reg->is_VectorSRegister()) {
|
||||
} else if (vm_reg->is_VectorRegister()) {
|
||||
assert(SuperwordUseVSX, "or should not reach here");
|
||||
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
|
||||
VectorSRegister vs_reg = (vm_reg->as_VectorRegister()).to_vsr();
|
||||
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
|
||||
reg_save_index += (2 + (reg_save_index & 1)); // 2 slots + alignment if needed
|
||||
|
||||
|
||||
@@ -141,6 +141,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
|
||||
@@ -160,6 +161,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -59,10 +59,10 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
|
||||
define_pd_global(bool, VMContinuations, true);
|
||||
|
||||
// Use large code-entry alignment.
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 128);
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 128);
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
|
||||
// Flags for template interpreter.
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -321,6 +321,7 @@ class VectorRegister {
|
||||
|
||||
// accessors
|
||||
constexpr int encoding() const { assert(is_valid(), "invalid register"); return _encoding; }
|
||||
inline VMReg as_VMReg() const;
|
||||
|
||||
// testers
|
||||
constexpr bool is_valid() const { return (0 <= _encoding && _encoding < number_of_registers); }
|
||||
@@ -392,7 +393,6 @@ class VectorSRegister {
|
||||
|
||||
// accessors
|
||||
constexpr int encoding() const { assert(is_valid(), "invalid register"); return _encoding; }
|
||||
inline VMReg as_VMReg() const;
|
||||
VectorSRegister successor() const { return VectorSRegister(encoding() + 1); }
|
||||
|
||||
// testers
|
||||
@@ -484,8 +484,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
enum {
|
||||
max_gpr = Register::number_of_registers * 2,
|
||||
max_fpr = max_gpr + FloatRegister::number_of_registers * 2,
|
||||
max_vsr = max_fpr + VectorSRegister::number_of_registers * 4,
|
||||
max_cnd = max_vsr + ConditionRegister::number_of_registers,
|
||||
max_vr = max_fpr + VectorRegister::number_of_registers * 4,
|
||||
max_cnd = max_vr + ConditionRegister::number_of_registers,
|
||||
max_spr = max_cnd + SpecialRegister::number_of_registers,
|
||||
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
|
||||
// There is no requirement that any ordering here matches any ordering c2 gives
|
||||
|
||||
@@ -111,13 +111,13 @@ class RegisterSaver {
|
||||
int_reg,
|
||||
float_reg,
|
||||
special_reg,
|
||||
vs_reg
|
||||
vec_reg
|
||||
} RegisterType;
|
||||
|
||||
typedef enum {
|
||||
reg_size = 8,
|
||||
half_reg_size = reg_size / 2,
|
||||
vs_reg_size = 16
|
||||
vec_reg_size = 16
|
||||
} RegisterConstants;
|
||||
|
||||
typedef struct {
|
||||
@@ -137,8 +137,8 @@ class RegisterSaver {
|
||||
#define RegisterSaver_LiveSpecialReg(regname) \
|
||||
{ RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
|
||||
|
||||
#define RegisterSaver_LiveVSReg(regname) \
|
||||
{ RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() }
|
||||
#define RegisterSaver_LiveVecReg(regname) \
|
||||
{ RegisterSaver::vec_reg, regname->encoding(), regname->as_VMReg() }
|
||||
|
||||
static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
|
||||
// Live registers which get spilled to the stack. Register
|
||||
@@ -220,42 +220,42 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
|
||||
RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below)
|
||||
};
|
||||
|
||||
static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = {
|
||||
static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
|
||||
//
|
||||
// live vector scalar registers (optional, only these ones are used by C2):
|
||||
// live vector registers (optional, only these ones are used by C2):
|
||||
//
|
||||
RegisterSaver_LiveVSReg( VSR32 ),
|
||||
RegisterSaver_LiveVSReg( VSR33 ),
|
||||
RegisterSaver_LiveVSReg( VSR34 ),
|
||||
RegisterSaver_LiveVSReg( VSR35 ),
|
||||
RegisterSaver_LiveVSReg( VSR36 ),
|
||||
RegisterSaver_LiveVSReg( VSR37 ),
|
||||
RegisterSaver_LiveVSReg( VSR38 ),
|
||||
RegisterSaver_LiveVSReg( VSR39 ),
|
||||
RegisterSaver_LiveVSReg( VSR40 ),
|
||||
RegisterSaver_LiveVSReg( VSR41 ),
|
||||
RegisterSaver_LiveVSReg( VSR42 ),
|
||||
RegisterSaver_LiveVSReg( VSR43 ),
|
||||
RegisterSaver_LiveVSReg( VSR44 ),
|
||||
RegisterSaver_LiveVSReg( VSR45 ),
|
||||
RegisterSaver_LiveVSReg( VSR46 ),
|
||||
RegisterSaver_LiveVSReg( VSR47 ),
|
||||
RegisterSaver_LiveVSReg( VSR48 ),
|
||||
RegisterSaver_LiveVSReg( VSR49 ),
|
||||
RegisterSaver_LiveVSReg( VSR50 ),
|
||||
RegisterSaver_LiveVSReg( VSR51 ),
|
||||
RegisterSaver_LiveVSReg( VSR52 ),
|
||||
RegisterSaver_LiveVSReg( VSR53 ),
|
||||
RegisterSaver_LiveVSReg( VSR54 ),
|
||||
RegisterSaver_LiveVSReg( VSR55 ),
|
||||
RegisterSaver_LiveVSReg( VSR56 ),
|
||||
RegisterSaver_LiveVSReg( VSR57 ),
|
||||
RegisterSaver_LiveVSReg( VSR58 ),
|
||||
RegisterSaver_LiveVSReg( VSR59 ),
|
||||
RegisterSaver_LiveVSReg( VSR60 ),
|
||||
RegisterSaver_LiveVSReg( VSR61 ),
|
||||
RegisterSaver_LiveVSReg( VSR62 ),
|
||||
RegisterSaver_LiveVSReg( VSR63 )
|
||||
RegisterSaver_LiveVecReg( VR0 ),
|
||||
RegisterSaver_LiveVecReg( VR1 ),
|
||||
RegisterSaver_LiveVecReg( VR2 ),
|
||||
RegisterSaver_LiveVecReg( VR3 ),
|
||||
RegisterSaver_LiveVecReg( VR4 ),
|
||||
RegisterSaver_LiveVecReg( VR5 ),
|
||||
RegisterSaver_LiveVecReg( VR6 ),
|
||||
RegisterSaver_LiveVecReg( VR7 ),
|
||||
RegisterSaver_LiveVecReg( VR8 ),
|
||||
RegisterSaver_LiveVecReg( VR9 ),
|
||||
RegisterSaver_LiveVecReg( VR10 ),
|
||||
RegisterSaver_LiveVecReg( VR11 ),
|
||||
RegisterSaver_LiveVecReg( VR12 ),
|
||||
RegisterSaver_LiveVecReg( VR13 ),
|
||||
RegisterSaver_LiveVecReg( VR14 ),
|
||||
RegisterSaver_LiveVecReg( VR15 ),
|
||||
RegisterSaver_LiveVecReg( VR16 ),
|
||||
RegisterSaver_LiveVecReg( VR17 ),
|
||||
RegisterSaver_LiveVecReg( VR18 ),
|
||||
RegisterSaver_LiveVecReg( VR19 ),
|
||||
RegisterSaver_LiveVecReg( VR20 ),
|
||||
RegisterSaver_LiveVecReg( VR21 ),
|
||||
RegisterSaver_LiveVecReg( VR22 ),
|
||||
RegisterSaver_LiveVecReg( VR23 ),
|
||||
RegisterSaver_LiveVecReg( VR24 ),
|
||||
RegisterSaver_LiveVecReg( VR25 ),
|
||||
RegisterSaver_LiveVecReg( VR26 ),
|
||||
RegisterSaver_LiveVecReg( VR27 ),
|
||||
RegisterSaver_LiveVecReg( VR28 ),
|
||||
RegisterSaver_LiveVecReg( VR29 ),
|
||||
RegisterSaver_LiveVecReg( VR30 ),
|
||||
RegisterSaver_LiveVecReg( VR31 )
|
||||
};
|
||||
|
||||
|
||||
@@ -277,10 +277,10 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
|
||||
// calculate frame size
|
||||
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType);
|
||||
const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) /
|
||||
const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType))
|
||||
: 0;
|
||||
const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size;
|
||||
const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
|
||||
const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
|
||||
+ frame::native_abi_reg_args_size;
|
||||
|
||||
@@ -298,8 +298,8 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
|
||||
|
||||
// Save some registers in the last (non-vector) slots of the new frame so we
|
||||
// can use them as scratch regs or to determine the return pc.
|
||||
__ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP);
|
||||
__ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP);
|
||||
__ std(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
|
||||
__ std(R30, frame_size_in_bytes - 2*reg_size - vecregstosave_num * vec_reg_size, R1_SP);
|
||||
|
||||
// save the flags
|
||||
// Do the save_LR by hand and adjust the return pc if requested.
|
||||
@@ -360,37 +360,37 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
|
||||
// the utilized instructions (PowerArchitecturePPC64).
|
||||
assert(is_aligned(offset, StackAlignmentInBytes), "should be");
|
||||
if (PowerArchitecturePPC64 >= 10) {
|
||||
assert(is_even(vsregstosave_num), "expectation");
|
||||
for (int i = 0; i < vsregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVSRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
assert(is_even(vecregstosave_num), "expectation");
|
||||
for (int i = 0; i < vecregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
|
||||
__ stxvp(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
|
||||
// Note: The contents were read in the same order (see loadV16_Power9 node in ppc.ad).
|
||||
if (generate_oop_map) {
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
|
||||
RegisterSaver_LiveVSRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((offset + vs_reg_size) >> 2),
|
||||
RegisterSaver_LiveVSRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg);
|
||||
RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((offset + vec_reg_size) >> 2),
|
||||
RegisterSaver_LiveVecRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg);
|
||||
}
|
||||
offset += (2 * vs_reg_size);
|
||||
offset += (2 * vec_reg_size);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < vsregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
for (int i = 0; i < vecregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
__ stxv(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ stxv(as_VectorRegister(reg_num)->to_vsr(), offset, R1_SP);
|
||||
} else {
|
||||
__ li(R31, offset);
|
||||
__ stxvd2x(as_VectorSRegister(reg_num), R31, R1_SP);
|
||||
__ stxvd2x(as_VectorRegister(reg_num)->to_vsr(), R31, R1_SP);
|
||||
}
|
||||
// Note: The contents were read in the same order (see loadV16_Power8 / loadV16_Power9 node in ppc.ad).
|
||||
if (generate_oop_map) {
|
||||
VMReg vsr = RegisterSaver_LiveVSRegs[i].vmreg;
|
||||
VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg;
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr);
|
||||
}
|
||||
offset += vs_reg_size;
|
||||
offset += vec_reg_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,10 +411,10 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
|
||||
bool save_vectors) {
|
||||
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType);
|
||||
const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) /
|
||||
const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType))
|
||||
: 0;
|
||||
const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size;
|
||||
const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
|
||||
|
||||
const int register_save_offset = frame_size_in_bytes - register_save_size;
|
||||
|
||||
@@ -456,26 +456,26 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
|
||||
|
||||
assert(is_aligned(offset, StackAlignmentInBytes), "should be");
|
||||
if (PowerArchitecturePPC64 >= 10) {
|
||||
for (int i = 0; i < vsregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVSRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
for (int i = 0; i < vecregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
|
||||
__ lxvp(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ lxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
|
||||
|
||||
offset += (2 * vs_reg_size);
|
||||
offset += (2 * vec_reg_size);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < vsregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
for (int i = 0; i < vecregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
__ lxv(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ lxv(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
|
||||
} else {
|
||||
__ li(R31, offset);
|
||||
__ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP);
|
||||
__ lxvd2x(as_VectorRegister(reg_num).to_vsr(), R31, R1_SP);
|
||||
}
|
||||
|
||||
offset += vs_reg_size;
|
||||
offset += vec_reg_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -486,7 +486,7 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
|
||||
__ mtlr(R31);
|
||||
|
||||
// restore scratch register's value
|
||||
__ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP);
|
||||
__ ld(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
|
||||
|
||||
// pop the frame
|
||||
__ addi(R1_SP, R1_SP, frame_size_in_bytes);
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_PPC_STUBDECLARATIONS_HPP
|
||||
#define CPU_PPC_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -4938,6 +4938,10 @@ void generate_lookup_secondary_supers_table_stub() {
|
||||
}
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for ppc
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
@@ -5067,6 +5071,9 @@ void generate_lookup_secondary_supers_table_stub() {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -47,7 +47,7 @@ void VMRegImpl::set_regName() {
|
||||
}
|
||||
|
||||
VectorSRegister vsreg = ::as_VectorSRegister(0);
|
||||
for ( ; i < ConcreteRegisterImpl::max_vsr; ) {
|
||||
for ( ; i < ConcreteRegisterImpl::max_vr; ) {
|
||||
regName[i++] = vsreg->name();
|
||||
regName[i++] = vsreg->name();
|
||||
regName[i++] = vsreg->name();
|
||||
|
||||
@@ -35,13 +35,13 @@ inline bool is_FloatRegister() {
|
||||
value() < ConcreteRegisterImpl::max_fpr;
|
||||
}
|
||||
|
||||
inline bool is_VectorSRegister() {
|
||||
inline bool is_VectorRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_fpr &&
|
||||
value() < ConcreteRegisterImpl::max_vsr;
|
||||
value() < ConcreteRegisterImpl::max_vr;
|
||||
}
|
||||
|
||||
inline bool is_ConditionRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_vsr &&
|
||||
return value() >= ConcreteRegisterImpl::max_vr &&
|
||||
value() < ConcreteRegisterImpl::max_cnd;
|
||||
}
|
||||
|
||||
@@ -60,15 +60,15 @@ inline FloatRegister as_FloatRegister() {
|
||||
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
|
||||
}
|
||||
|
||||
inline VectorSRegister as_VectorSRegister() {
|
||||
assert(is_VectorSRegister(), "must be");
|
||||
return ::as_VectorSRegister((value() - ConcreteRegisterImpl::max_fpr) >> 2);
|
||||
inline VectorRegister as_VectorRegister() {
|
||||
assert(is_VectorRegister(), "must be");
|
||||
return ::as_VectorRegister((value() - ConcreteRegisterImpl::max_fpr) >> 2);
|
||||
}
|
||||
|
||||
inline bool is_concrete() {
|
||||
assert(is_reg(), "must be");
|
||||
if (is_Register() || is_FloatRegister()) return is_even(value());
|
||||
if (is_VectorSRegister()) {
|
||||
if (is_VectorRegister()) {
|
||||
int base = value() - ConcreteRegisterImpl::max_fpr;
|
||||
return (base & 3) == 0;
|
||||
}
|
||||
|
||||
@@ -40,13 +40,13 @@ inline VMReg FloatRegister::as_VMReg() const {
|
||||
return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
|
||||
}
|
||||
|
||||
inline VMReg VectorSRegister::as_VMReg() const {
|
||||
inline VMReg VectorRegister::as_VMReg() const {
|
||||
// Four halves, multiply by 4.
|
||||
return VMRegImpl::as_VMReg((encoding() << 2) + ConcreteRegisterImpl::max_fpr);
|
||||
}
|
||||
|
||||
inline VMReg ConditionRegister::as_VMReg() const {
|
||||
return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_vsr);
|
||||
return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_vr);
|
||||
}
|
||||
|
||||
inline VMReg SpecialRegister::as_VMReg() const {
|
||||
|
||||
@@ -401,7 +401,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||
|
||||
code_stub->set_safepoint_offset(__ offset());
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -43,15 +43,15 @@ define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
|
||||
@@ -2170,15 +2170,13 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
|
||||
cmov_cmp_fp_le(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::ge:
|
||||
assert(false, "Should go to BoolTest::le case");
|
||||
ShouldNotReachHere();
|
||||
cmov_cmp_fp_ge(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::lt:
|
||||
cmov_cmp_fp_lt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::gt:
|
||||
assert(false, "Should go to BoolTest::lt case");
|
||||
ShouldNotReachHere();
|
||||
cmov_cmp_fp_gt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -51,8 +51,8 @@ define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
@@ -69,12 +69,12 @@ define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, false);
|
||||
|
||||
@@ -287,7 +287,7 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
__ membar(MacroAssembler::AnyAny);
|
||||
}
|
||||
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
__ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
|
||||
__ bnez(t0, L_safepoint_poll_slow_path);
|
||||
|
||||
|
||||
@@ -96,6 +96,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, TEMP tmp, KILL cr);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
|
||||
@@ -645,7 +645,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// the stack, will call InterpreterRuntime::at_unwind.
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
j(fast_path);
|
||||
|
||||
bind(slow_path);
|
||||
|
||||
@@ -1268,12 +1268,19 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
|
||||
}
|
||||
|
||||
// ----------- cmove, compare float -----------
|
||||
//
|
||||
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
|
||||
// so, just list behaviour of unordered ones as follow.
|
||||
//
|
||||
// Set dst (CMoveI (Binary cop (CmpF/D op1 op2)) (Binary dst src))
|
||||
// (If one or both inputs to the compare are NaN, then)
|
||||
// 1. (op1 lt op2) => true => CMove: dst = src
|
||||
// 2. (op1 le op2) => true => CMove: dst = src
|
||||
// 3. (op1 gt op2) => false => CMove: dst = dst
|
||||
// 4. (op1 ge op2) => false => CMove: dst = dst
|
||||
// 5. (op1 eq op2) => false => CMove: dst = dst
|
||||
// 6. (op1 ne op2) => true => CMove: dst = src
|
||||
|
||||
// Move src to dst only if cmp1 == cmp2,
|
||||
// otherwise leave dst unchanged, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 != cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1289,7 +1296,7 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 != cmp2, including the case of NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 == cmp2
|
||||
// fallthrough (i.e. move src to dst) if cmp1 == cmp2
|
||||
float_bne(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bne(cmp1, cmp2, no_set);
|
||||
@@ -1298,11 +1305,6 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Keep dst unchanged only if cmp1 == cmp2,
|
||||
// otherwise move src to dst, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 == cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1318,7 +1320,7 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 == cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
float_beq(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_beq(cmp1, cmp2, no_set);
|
||||
@@ -1327,14 +1329,6 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 < cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 > cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1350,7 +1344,7 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 > cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
float_bgt(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bgt(cmp1, cmp2, no_set);
|
||||
@@ -1359,14 +1353,30 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 <= cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 >= cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
fle_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
fle_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 >= cmp2
|
||||
float_blt(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_blt(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1382,7 +1392,7 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 >= cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
float_bge(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bge(cmp1, cmp2, no_set);
|
||||
@@ -1391,6 +1401,30 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
flt_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
flt_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 > cmp2
|
||||
float_ble(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_ble(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Float compare branch instructions
|
||||
|
||||
#define INSN(NAME, FLOATCMP, BRANCH) \
|
||||
@@ -3739,11 +3773,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
bind(L_failure);
|
||||
}
|
||||
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp_reg) {
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp_reg) {
|
||||
ld(tmp_reg, Address(xthread, JavaThread::polling_word_offset()));
|
||||
if (acquire) {
|
||||
membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
|
||||
}
|
||||
if (at_return) {
|
||||
bgtu(in_nmethod ? sp : fp, tmp_reg, slow_path, /* is_far */ true);
|
||||
} else {
|
||||
|
||||
@@ -44,7 +44,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp_reg = t0);
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp_reg = t0);
|
||||
|
||||
// Alignment
|
||||
int align(int modulus, int extra_offset = 0);
|
||||
@@ -660,7 +660,9 @@ class MacroAssembler: public Assembler {
|
||||
void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
|
||||
public:
|
||||
// We try to follow risc-v asm menomics.
|
||||
|
||||
@@ -1493,7 +1493,7 @@ void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
||||
code_stub = &stub->entry();
|
||||
}
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2619,6 +2619,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -1777,15 +1777,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
{
|
||||
// We need an acquire here to ensure that any subsequent load of the
|
||||
// global SafepointSynchronize::_state flag is ordered after this load
|
||||
// of the thread-local polling word. We don't want this poll to
|
||||
// return false (i.e. not safepointing) and a later poll of the global
|
||||
// SafepointSynchronize::_state spuriously to return true.
|
||||
// This is to avoid a race when we're in a native->Java transition
|
||||
// racing the code which wakes up from a safepoint.
|
||||
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
|
||||
__ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
|
||||
__ bnez(t0, safepoint_in_progress);
|
||||
__ bind(safepoint_in_progress_done);
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
#define CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -6660,6 +6660,10 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
#undef __
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for riscv
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generate initial stubs and initializes the entry points
|
||||
|
||||
@@ -6815,6 +6819,9 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1229,15 +1229,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
{
|
||||
Label L, Continue;
|
||||
|
||||
// We need an acquire here to ensure that any subsequent load of the
|
||||
// global SafepointSynchronize::_state flag is ordered after this load
|
||||
// of the thread-local polling word. We don't want this poll to
|
||||
// return false (i.e. not safepointing) and a later poll of the global
|
||||
// SafepointSynchronize::_state spuriously to return true.
|
||||
//
|
||||
// This is to avoid a race when we're in a native->Java transition
|
||||
// racing the code which wakes up from a safepoint.
|
||||
__ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(L, true /* at_return */, false /* in_nmethod */);
|
||||
__ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset()));
|
||||
__ beqz(t1, Continue);
|
||||
__ bind(L);
|
||||
@@ -1388,7 +1380,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
__ j(fast_path);
|
||||
|
||||
__ bind(slow_path);
|
||||
|
||||
@@ -203,15 +203,15 @@ void VM_Version::common_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
// Misc Intrinsics could depend on RVV
|
||||
// Misc Intrinsics that could depend on RVV.
|
||||
|
||||
if (UseZba || UseRVV) {
|
||||
if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
|
||||
}
|
||||
} else {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
warning("CRC32 intrinsic requires Zba or RVV instructions (not available on this CPU)");
|
||||
warning("CRC32 intrinsic are not available on this CPU.");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -44,17 +44,17 @@ define_pd_global(intx, CompileThreshold, 1000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 1400);
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
#endif // !COMPILER2
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -67,17 +67,17 @@ define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, false);
|
||||
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture.
|
||||
|
||||
|
||||
@@ -410,7 +410,7 @@
|
||||
|
||||
// C2I adapter frames:
|
||||
//
|
||||
// STACK (interpreted called from compiled, on entry to frame manager):
|
||||
// STACK (interpreted called from compiled, on entry to template interpreter):
|
||||
//
|
||||
// [TOP_C2I_FRAME]
|
||||
// [JIT_FRAME]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nu
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 256);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 256);
|
||||
// This shall be at least 32 for proper branch target alignment.
|
||||
// Ideally, this is 256 (cache line size). This keeps code end data
|
||||
// on separate lines. But we reduced it to 64 since 256 increased
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -414,7 +414,7 @@ constexpr FloatRegister Z_FARG2 = Z_F2;
|
||||
constexpr FloatRegister Z_FARG3 = Z_F4;
|
||||
constexpr FloatRegister Z_FARG4 = Z_F6;
|
||||
|
||||
// Register declarations to be used in frame manager assembly code.
|
||||
// Register declarations to be used in template interpreter assembly code.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
|
||||
// Register to cache the integer value on top of the operand stack.
|
||||
@@ -439,7 +439,7 @@ constexpr Register Z_bcp = Z_R13;
|
||||
// Bytecode which is dispatched (short lived!).
|
||||
constexpr Register Z_bytecode = Z_R14;
|
||||
|
||||
// Temporary registers to be used within frame manager. We can use
|
||||
// Temporary registers to be used within template interpreter. We can use
|
||||
// the nonvolatile ones because the call stub has saved them.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
constexpr Register Z_tmp_1 = Z_R10;
|
||||
|
||||
@@ -118,7 +118,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
|
||||
__ z_lgr(Z_SP, saved_sp);
|
||||
|
||||
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
|
||||
// C2I adapter extensions are now removed by a resize in the frame manager
|
||||
// C2I adapter extensions are now removed by a resize in the template interpreter
|
||||
// (unwind_initial_activation_pending_exception).
|
||||
#ifdef ASSERT
|
||||
__ z_ltgr(handle_exception, handle_exception);
|
||||
|
||||
@@ -2139,7 +2139,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
Register value = Z_R12;
|
||||
|
||||
// Remember the senderSP so we can pop the interpreter arguments off of the stack.
|
||||
// In addition, frame manager expects initial_caller_sp in Z_R10.
|
||||
// In addition, template interpreter expects initial_caller_sp in Z_R10.
|
||||
__ z_lgr(sender_SP, Z_SP);
|
||||
|
||||
// This should always fit in 14 bit immediate.
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_S390_STUBDECLARATIONS_HPP
|
||||
#define CPU_S390_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -115,7 +115,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// [SP+176] - thread : Thread*
|
||||
//
|
||||
address generate_call_stub(address& return_address) {
|
||||
// Set up a new C frame, copy Java arguments, call frame manager
|
||||
// Set up a new C frame, copy Java arguments, call template interpreter
|
||||
// or native_entry, and process result.
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
@@ -272,10 +272,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
BLOCK_COMMENT("call {");
|
||||
{
|
||||
// Call frame manager or native entry.
|
||||
// Call template interpreter or native entry.
|
||||
|
||||
//
|
||||
// Register state on entry to frame manager / native entry:
|
||||
// Register state on entry to template interpreter / native entry:
|
||||
//
|
||||
// Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed)
|
||||
// Lesp = (SP) + copied_arguments_offset - 8
|
||||
@@ -290,7 +290,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ z_lgr(Z_esp, r_top_of_arguments_addr);
|
||||
|
||||
//
|
||||
// Stack on entry to frame manager / native entry:
|
||||
// Stack on entry to template interpreter / native entry:
|
||||
//
|
||||
// F0 [TOP_IJAVA_FRAME_ABI]
|
||||
// [outgoing Java arguments]
|
||||
@@ -300,7 +300,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
|
||||
// Do a light-weight C-call here, r_new_arg_entry holds the address
|
||||
// of the interpreter entry point (frame manager or native entry)
|
||||
// of the interpreter entry point (template interpreter or native entry)
|
||||
// and save runtime-value of return_pc in return_address
|
||||
// (call by reference argument).
|
||||
return_address = __ call_stub(r_new_arg_entry);
|
||||
@@ -309,11 +309,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
{
|
||||
BLOCK_COMMENT("restore registers {");
|
||||
// Returned from frame manager or native entry.
|
||||
// Returned from template interpreter or native entry.
|
||||
// Now pop frame, process result, and return to caller.
|
||||
|
||||
//
|
||||
// Stack on exit from frame manager / native entry:
|
||||
// Stack on exit from template interpreter / native entry:
|
||||
//
|
||||
// F0 [ABI]
|
||||
// ...
|
||||
@@ -330,7 +330,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ pop_frame();
|
||||
|
||||
// Reload some volatile registers which we've spilled before the call
|
||||
// to frame manager / native entry.
|
||||
// to template interpreter / native entry.
|
||||
// Access all locals via frame pointer, because we know nothing about
|
||||
// the topmost frame's size.
|
||||
__ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp);
|
||||
@@ -3283,6 +3283,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for s390
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points.
|
||||
|
||||
@@ -3418,6 +3422,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1217,7 +1217,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
// Math function, frame manager must set up an interpreter state, etc.
|
||||
// Math function, template interpreter must set up an interpreter state, etc.
|
||||
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
// Decide what to do: Use same platform specific instructions and runtime calls as compilers.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -42,15 +42,15 @@ define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1 );
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1 );
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
|
||||
@@ -4655,6 +4655,7 @@ static void convertF2I_slowpath(C2_MacroAssembler& masm, C2GeneralStub<Register,
|
||||
__ subptr(rsp, 8);
|
||||
__ movdbl(Address(rsp), src);
|
||||
__ call(RuntimeAddress(target));
|
||||
// APX REX2 encoding for pop(dst) increases the stub size by 1 byte.
|
||||
__ pop(dst);
|
||||
__ jmp(stub.continuation());
|
||||
#undef __
|
||||
@@ -4687,7 +4688,9 @@ void C2_MacroAssembler::convertF2I(BasicType dst_bt, BasicType src_bt, Register
|
||||
}
|
||||
}
|
||||
|
||||
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, 23, convertF2I_slowpath);
|
||||
// Using the APX extended general purpose registers increases the instruction encoding size by 1 byte.
|
||||
int max_size = 23 + (UseAPX ? 1 : 0);
|
||||
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, max_size, convertF2I_slowpath);
|
||||
jcc(Assembler::equal, stub->entry());
|
||||
bind(stub->continuation());
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -50,8 +50,8 @@ define_pd_global(intx, InteriorEntryAlignment, 16);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
@@ -60,8 +60,8 @@ define_pd_global(intx, InteriorEntryAlignment, 4);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 4*K);
|
||||
define_pd_global(intx, LoopUnrollLimit, 50); // Design center runs on 1.3.1
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2304*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
|
||||
@@ -79,12 +79,12 @@ define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on x86.
|
||||
|
||||
|
||||
@@ -118,6 +118,10 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks. The
|
||||
// barrier's slow path includes an identical reload, which does not need to be
|
||||
// registered in the exception table because it is dominated by the main one.
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(125);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -37,7 +37,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't
|
||||
// assign a different value for C2 without touching a number of files. Use
|
||||
// #ifdef to minimize the change as it's late in Mantis. -- FIXME.
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_X86_STUBDECLARATIONS_HPP
|
||||
#define CPU_X86_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 500) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
@@ -239,7 +246,7 @@
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 31000 \
|
||||
do_arch_blob(final, 33000 \
|
||||
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
|
||||
|
||||
#endif // CPU_X86_STUBDECLARATIONS_HPP
|
||||
|
||||
@@ -4049,6 +4049,11 @@ void StubGenerator::create_control_words() {
|
||||
}
|
||||
|
||||
// Initialization
|
||||
void StubGenerator::generate_preuniverse_stubs() {
|
||||
// atomic calls
|
||||
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
||||
}
|
||||
|
||||
void StubGenerator::generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
@@ -4074,9 +4079,6 @@ void StubGenerator::generate_initial_stubs() {
|
||||
// is referenced by megamorphic call
|
||||
StubRoutines::_catch_exception_entry = generate_catch_exception();
|
||||
|
||||
// atomic calls
|
||||
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
||||
|
||||
// platform dependent
|
||||
StubRoutines::x86::_get_previous_sp_entry = generate_get_previous_sp();
|
||||
|
||||
@@ -4344,6 +4346,9 @@ void StubGenerator::generate_compiler_stubs() {
|
||||
|
||||
StubGenerator::StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -634,6 +634,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void create_control_words();
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs();
|
||||
void generate_initial_stubs();
|
||||
void generate_continuation_stubs();
|
||||
void generate_compiler_stubs();
|
||||
|
||||
@@ -465,13 +465,19 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_tanh) {
|
||||
assert(StubRoutines::dtanh() != nullptr, "not initialized");
|
||||
if (StubRoutines::dtanh() != nullptr) {
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtanh())));
|
||||
} else {
|
||||
return nullptr; // Fallback to default implementation
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_cbrt) {
|
||||
assert(StubRoutines::dcbrt() != nullptr, "not initialized");
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
|
||||
if (StubRoutines::dcbrt() != nullptr) {
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
|
||||
} else {
|
||||
return nullptr; // Fallback to default implementation
|
||||
}
|
||||
} else if (kind == Interpreter::java_lang_math_abs) {
|
||||
assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
|
||||
__ movdbl(xmm0, Address(rsp, wordSize));
|
||||
|
||||
@@ -2111,7 +2111,7 @@ bool VM_Version::is_intel_cascade_lake() {
|
||||
// has improved implementation of 64-byte load/stores and so the default
|
||||
// threshold is set to 0 for these platforms.
|
||||
int VM_Version::avx3_threshold() {
|
||||
return (is_intel_family_core() &&
|
||||
return (is_intel_server_family() &&
|
||||
supports_serialize() &&
|
||||
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
|
||||
}
|
||||
|
||||
@@ -2055,6 +2055,10 @@ ins_attrib ins_alignment(1); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
@@ -10527,7 +10531,8 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
|
||||
// Xor Register with Immediate
|
||||
instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseAPX);
|
||||
// Strict predicate check to make selection of xorI_rReg_im1 cost agnostic if immI src is -1.
|
||||
predicate(!UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI dst src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -10541,7 +10546,8 @@ instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
|
||||
|
||||
instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
// Strict predicate check to make selection of xorI_rReg_im1_ndd cost agnostic if immI src2 is -1.
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -10559,6 +10565,7 @@ instruct xorI_rReg_mem_imm_ndd(rRegI dst, memory src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorI (LoadI src1) src2));
|
||||
effect(KILL cr);
|
||||
ins_cost(150);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
|
||||
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
|
||||
@@ -11201,7 +11208,8 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
|
||||
// Xor Register with Immediate
|
||||
instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseAPX);
|
||||
// Strict predicate check to make selection of xorL_rReg_im1 cost agnostic if immL32 src is -1.
|
||||
predicate(!UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL dst src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -11215,7 +11223,8 @@ instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
|
||||
|
||||
instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
// Strict predicate check to make selection of xorL_rReg_im1_ndd cost agnostic if immL32 src2 is -1.
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -11234,6 +11243,7 @@ instruct xorL_rReg_mem_imm(rRegL dst, memory src1, immL32 src2, rFlagsReg cr)
|
||||
match(Set dst (XorL (LoadL src1) src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
ins_cost(150);
|
||||
|
||||
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -38,10 +38,10 @@ define_pd_global(bool, UncommonNullCast, true);
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, false); // Don't have compiler's stubs
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineSmallCode, 1000);
|
||||
|
||||
// not used, but must satisfy following constraints:
|
||||
// 1.) <VALUE> must be in the allowed range for intx *and*
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_ZERO_STUBDECLARATIONS_HPP
|
||||
#define CPU_ZERO_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -178,6 +178,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_oop_arraycopy;
|
||||
}
|
||||
|
||||
void generate_preuniverse_stubs() {
|
||||
StubRoutines::_fence_entry = ShouldNotCallThisStub();
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// entry points that exist in all platforms Note: This is code
|
||||
// that could be shared among different platforms - however the
|
||||
@@ -194,7 +198,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_atomic_cmpxchg_entry = ShouldNotCallThisStub();
|
||||
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
|
||||
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
||||
StubRoutines::_fence_entry = ShouldNotCallThisStub();
|
||||
}
|
||||
|
||||
void generate_continuation_stubs() {
|
||||
@@ -214,6 +217,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_AIX_C1_GLOBALS_AIX_HPP
|
||||
#define OS_AIX_C1_GLOBALS_AIX_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// client compiler. (see c1_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_AIX_C1_GLOBALS_AIX_HPP
|
||||
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_AIX_C2_GLOBALS_AIX_HPP
|
||||
#define OS_AIX_C2_GLOBALS_AIX_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// server compiler. (see c2_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_AIX_C2_GLOBALS_AIX_HPP
|
||||
@@ -1261,69 +1261,6 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
|
||||
// Nothing to do beyond of what os::print_cpu_info() does.
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so.
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
Dl_info dlinfo;
|
||||
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
char* rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
|
||||
assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
|
||||
saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_BSD_C1_GLOBALS_BSD_HPP
|
||||
#define OS_BSD_C1_GLOBALS_BSD_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// client compiler. (see c1_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_BSD_C1_GLOBALS_BSD_HPP
|
||||
@@ -154,7 +154,8 @@ julong os::Bsd::available_memory() {
|
||||
assert(kerr == KERN_SUCCESS,
|
||||
"host_statistics64 failed - check mach_host_self() and count");
|
||||
if (kerr == KERN_SUCCESS) {
|
||||
available = vmstat.free_count * os::vm_page_size();
|
||||
// free_count is just a lowerbound, other page categories can be freed too and make memory available
|
||||
available = (vmstat.free_count + vmstat.inactive_count + vmstat.purgeable_count) * os::vm_page_size();
|
||||
}
|
||||
#endif
|
||||
return available;
|
||||
@@ -1482,83 +1483,6 @@ void os::print_memory_info(outputStream* st) {
|
||||
st->cr();
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
char *rp = nullptr;
|
||||
if (ret && dli_fname[0] != '\0') {
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm"
|
||||
const char* p = strrchr(buf, '/');
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer space");
|
||||
// Add the appropriate library and JVM variant subdirs
|
||||
ss.print("%s/lib/%s", buf, Abstract_VM_Version::vm_variant());
|
||||
|
||||
if (0 != access(buf, F_OK)) {
|
||||
ss.reset();
|
||||
ss.print("%s/lib", buf);
|
||||
}
|
||||
|
||||
// If the path exists within JAVA_HOME, add the JVM library name
|
||||
// to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm"
|
||||
ss.print("/libjvm%s", JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Fall back to path of current library
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_LINUX_C1_GLOBALS_LINUX_HPP
|
||||
#define OS_LINUX_C1_GLOBALS_LINUX_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// client compiler. (see c1_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_LINUX_C1_GLOBALS_LINUX_HPP
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_LINUX_C2_GLOBALS_LINUX_HPP
|
||||
#define OS_LINUX_C2_GLOBALS_LINUX_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// server compiler. (see c2_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_LINUX_C2_GLOBALS_LINUX_HPP
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,9 +35,6 @@
|
||||
range, \
|
||||
constraint) \
|
||||
\
|
||||
product(bool, UseOprofile, false, \
|
||||
"(Deprecated) enable support for Oprofile profiler") \
|
||||
\
|
||||
product(bool, UseTransparentHugePages, false, \
|
||||
"Use MADV_HUGEPAGE for large pages") \
|
||||
\
|
||||
|
||||
@@ -2746,118 +2746,9 @@ void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
|
||||
#endif
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
char *rp = nullptr;
|
||||
if (ret && dli_fname[0] != '\0') {
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath(dli_fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Virtual Memory
|
||||
|
||||
// Rationale behind this function:
|
||||
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
|
||||
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
|
||||
// samples for JITted code. Here we create private executable mapping over the code cache
|
||||
// and then we can use standard (well, almost, as mapping can change) way to provide
|
||||
// info for the reporting script by storing timestamp and location of symbol
|
||||
void linux_wrap_code(char* base, size_t size) {
|
||||
static volatile jint cnt = 0;
|
||||
|
||||
static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
|
||||
|
||||
if (!UseOprofile) {
|
||||
return;
|
||||
}
|
||||
|
||||
char buf[PATH_MAX+1];
|
||||
int num = Atomic::add(&cnt, 1);
|
||||
|
||||
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
|
||||
os::get_temp_directory(), os::current_process_id(), num);
|
||||
unlink(buf);
|
||||
|
||||
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
|
||||
|
||||
if (fd != -1) {
|
||||
off_t rv = ::lseek(fd, size-2, SEEK_SET);
|
||||
if (rv != (off_t)-1) {
|
||||
if (::write(fd, "", 1) == 1) {
|
||||
mmap(base, size,
|
||||
PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
|
||||
}
|
||||
}
|
||||
::close(fd);
|
||||
unlink(buf);
|
||||
}
|
||||
}
|
||||
|
||||
static bool recoverable_mmap_error(int err) {
|
||||
// See if the error is one we can let the caller handle. This
|
||||
// list of errno values comes from JBS-6843484. I can't find a
|
||||
|
||||
@@ -59,6 +59,7 @@
|
||||
#ifdef AIX
|
||||
#include "loadlib_aix.hpp"
|
||||
#include "os_aix.hpp"
|
||||
#include "porting_aix.hpp"
|
||||
#endif
|
||||
#ifdef LINUX
|
||||
#include "os_linux.hpp"
|
||||
@@ -1060,6 +1061,95 @@ bool os::same_files(const char* file1, const char* file2) {
|
||||
return is_same;
|
||||
}
|
||||
|
||||
static char saved_jvm_path[MAXPATHLEN] = {0};
|
||||
|
||||
// Find the full path to the current module, libjvm.so
|
||||
void os::jvm_path(char *buf, jint buflen) {
|
||||
// Error checking.
|
||||
if (buflen < MAXPATHLEN) {
|
||||
assert(false, "must use a large-enough buffer");
|
||||
buf[0] = '\0';
|
||||
return;
|
||||
}
|
||||
// Lazy resolve the path to current module.
|
||||
if (saved_jvm_path[0] != 0) {
|
||||
strcpy(buf, saved_jvm_path);
|
||||
return;
|
||||
}
|
||||
|
||||
const char* fname;
|
||||
#ifdef AIX
|
||||
Dl_info dlinfo;
|
||||
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
|
||||
assert(ret != 0, "cannot locate libjvm");
|
||||
if (ret == 0) {
|
||||
return;
|
||||
}
|
||||
fname = dlinfo.dli_fname;
|
||||
#else
|
||||
char dli_fname[MAXPATHLEN];
|
||||
dli_fname[0] = '\0';
|
||||
bool ret = dll_address_to_library_name(
|
||||
CAST_FROM_FN_PTR(address, os::jvm_path),
|
||||
dli_fname, sizeof(dli_fname), nullptr);
|
||||
assert(ret, "cannot locate libjvm");
|
||||
if (!ret) {
|
||||
return;
|
||||
}
|
||||
fname = dli_fname;
|
||||
#endif // AIX
|
||||
char* rp = nullptr;
|
||||
if (fname[0] != '\0') {
|
||||
rp = os::realpath(fname, buf, buflen);
|
||||
}
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
// If executing unit tests we require JAVA_HOME to point to the real JDK.
|
||||
if (Arguments::executing_unit_tests()) {
|
||||
// Look for JAVA_HOME in the environment.
|
||||
char* java_home_var = ::getenv("JAVA_HOME");
|
||||
if (java_home_var != nullptr && java_home_var[0] != 0) {
|
||||
|
||||
// Check the current module name "libjvm.so".
|
||||
const char* p = strrchr(buf, '/');
|
||||
if (p == nullptr) {
|
||||
return;
|
||||
}
|
||||
assert(strstr(p, "/libjvm") == p, "invalid library name");
|
||||
|
||||
stringStream ss(buf, buflen);
|
||||
rp = os::realpath(java_home_var, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
|
||||
ss.print("%s/lib", buf);
|
||||
|
||||
// If the path exists within JAVA_HOME, add the VM variant directory and JVM
|
||||
// library name to complete the path to JVM being overridden. Otherwise fallback
|
||||
// to the path to the current library.
|
||||
if (0 == access(buf, F_OK)) {
|
||||
// Use current module name "libjvm.so"
|
||||
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
|
||||
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
|
||||
"buf has been truncated");
|
||||
} else {
|
||||
// Go back to path of .so
|
||||
rp = os::realpath(fname, buf, buflen);
|
||||
if (rp == nullptr) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
strncpy(saved_jvm_path, buf, MAXPATHLEN);
|
||||
saved_jvm_path[MAXPATHLEN - 1] = '\0';
|
||||
}
|
||||
|
||||
// Called when creating the thread. The minimum stack sizes have already been calculated
|
||||
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
|
||||
size_t stack_size;
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_WINDOWS_C1_GLOBALS_WINDOWS_HPP
|
||||
#define OS_WINDOWS_C1_GLOBALS_WINDOWS_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// client compiler. (see c1_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_WINDOWS_C1_GLOBALS_WINDOWS_HPP
|
||||
@@ -1,36 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef OS_WINDOWS_C2_GLOBALS_WINDOWS_HPP
|
||||
#define OS_WINDOWS_C2_GLOBALS_WINDOWS_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
//
|
||||
// Sets the default values for operating system dependent flags used by the
|
||||
// server compiler. (see c2_globals.hpp)
|
||||
//
|
||||
|
||||
#endif // OS_WINDOWS_C2_GLOBALS_WINDOWS_HPP
|
||||
@@ -81,14 +81,12 @@
|
||||
#endif
|
||||
|
||||
#define SPELL_REG_SP "sp"
|
||||
#define SPELL_REG_FP "fp"
|
||||
|
||||
#ifdef __APPLE__
|
||||
// see darwin-xnu/osfmk/mach/arm/_structs.h
|
||||
|
||||
// 10.5 UNIX03 member name prefixes
|
||||
#define DU3_PREFIX(s, m) __ ## s.__ ## m
|
||||
#endif
|
||||
|
||||
#define context_x uc_mcontext->DU3_PREFIX(ss,x)
|
||||
#define context_fp uc_mcontext->DU3_PREFIX(ss,fp)
|
||||
@@ -97,6 +95,31 @@
|
||||
#define context_pc uc_mcontext->DU3_PREFIX(ss,pc)
|
||||
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
|
||||
#define context_esr uc_mcontext->DU3_PREFIX(es,esr)
|
||||
#endif
|
||||
|
||||
#ifdef __FreeBSD__
|
||||
# define context_x uc_mcontext.mc_gpregs.gp_x
|
||||
# define context_fp context_x[REG_FP]
|
||||
# define context_lr uc_mcontext.mc_gpregs.gp_lr
|
||||
# define context_sp uc_mcontext.mc_gpregs.gp_sp
|
||||
# define context_pc uc_mcontext.mc_gpregs.gp_elr
|
||||
#endif
|
||||
|
||||
#ifdef __NetBSD__
|
||||
# define context_x uc_mcontext.__gregs
|
||||
# define context_fp uc_mcontext.__gregs[_REG_FP]
|
||||
# define context_lr uc_mcontext.__gregs[_REG_LR]
|
||||
# define context_sp uc_mcontext.__gregs[_REG_SP]
|
||||
# define context_pc uc_mcontext.__gregs[_REG_ELR]
|
||||
#endif
|
||||
|
||||
#ifdef __OpenBSD__
|
||||
# define context_x sc_x
|
||||
# define context_fp sc_x[REG_FP]
|
||||
# define context_lr sc_lr
|
||||
# define context_sp sc_sp
|
||||
# define context_pc sc_elr
|
||||
#endif
|
||||
|
||||
#define REG_BCP context_x[22]
|
||||
|
||||
@@ -497,9 +520,11 @@ int os::extra_bang_size_in_bytes() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
void os::current_thread_enable_wx(WXMode mode) {
|
||||
pthread_jit_write_protect_np(mode == WXExec);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
|
||||
*(jlong *) dst = *(const jlong *) src;
|
||||
|
||||
@@ -481,7 +481,3 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
|
||||
*legal_text = legal_start;
|
||||
return (int) (legal_end - legal_start);
|
||||
}
|
||||
|
||||
void *operator new( size_t size, int, const char *, int ) throw() {
|
||||
return ::operator new( size );
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user