mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-20 16:29:43 +01:00
Compare commits
209 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2d09284060 | ||
|
|
0a70da46f6 | ||
|
|
400d8cfb69 | ||
|
|
c1ad393e25 | ||
|
|
23fc21a3e2 | ||
|
|
821e9ff965 | ||
|
|
6c9999ca8d | ||
|
|
dca55b4366 | ||
|
|
b5ac8f8368 | ||
|
|
45642acf1b | ||
|
|
14c93b2afb | ||
|
|
53e77d21c2 | ||
|
|
45cf042099 | ||
|
|
5eb8774909 | ||
|
|
e72f205ae3 | ||
|
|
360777c3ad | ||
|
|
f0add88545 | ||
|
|
623164651c | ||
|
|
f88cbfb8c6 | ||
|
|
8a93658e87 | ||
|
|
6cbfc7691f | ||
|
|
0b2712400b | ||
|
|
7a7e7c9ae1 | ||
|
|
b848ddf6d3 | ||
|
|
3258e4dafa | ||
|
|
629e4ac6f4 | ||
|
|
2c0d9a79b8 | ||
|
|
2ba423db99 | ||
|
|
4f283f188c | ||
|
|
d8eb1259f4 | ||
|
|
c6da35d7c7 | ||
|
|
e5ca77838b | ||
|
|
3f20eb9435 | ||
|
|
a31e6e0d3b | ||
|
|
859830694b | ||
|
|
e67805067a | ||
|
|
00050f84d4 | ||
|
|
b446262541 | ||
|
|
0146077a51 | ||
|
|
ea5834415d | ||
|
|
c16ce929c7 | ||
|
|
17d633a8ee | ||
|
|
232b41b222 | ||
|
|
b3fab41460 | ||
|
|
e75726ee03 | ||
|
|
f3a48560b5 | ||
|
|
4e05748f08 | ||
|
|
9862f8f0d3 | ||
|
|
39306d7ab9 | ||
|
|
5e7ae28132 | ||
|
|
e4636d69e7 | ||
|
|
9a23f8aa33 | ||
|
|
fc76403b01 | ||
|
|
af18fbd42d | ||
|
|
4924b29fa5 | ||
|
|
9e2008bf5e | ||
|
|
386ad61458 | ||
|
|
94c51ce314 | ||
|
|
e9b4696acc | ||
|
|
e635330ae1 | ||
|
|
3f07710270 | ||
|
|
87d881fee0 | ||
|
|
30be94086a | ||
|
|
2241218ef6 | ||
|
|
fb99ba6ccd | ||
|
|
d02abfe765 | ||
|
|
1e357e9e97 | ||
|
|
817e3dfde9 | ||
|
|
a0dd66f92d | ||
|
|
b0b42e7eb1 | ||
|
|
81e3757688 | ||
|
|
76e79dbb3e | ||
|
|
89e77512fd | ||
|
|
a61394b1da | ||
|
|
53ebcdbd02 | ||
|
|
41d28c1838 | ||
|
|
43d4456181 | ||
|
|
8402891889 | ||
|
|
78c2d57259 | ||
|
|
b1e8c4e030 | ||
|
|
3f33eaa42a | ||
|
|
1748737b99 | ||
|
|
317788ff12 | ||
|
|
6aeabd4bfa | ||
|
|
f52d49925f | ||
|
|
45ee89c4c8 | ||
|
|
ad29642d8f | ||
|
|
ea6493c4e1 | ||
|
|
34f241317e | ||
|
|
1f47294cd3 | ||
|
|
f5187ebf7a | ||
|
|
629bf20f59 | ||
|
|
3559eeca0e | ||
|
|
ad6611a9a3 | ||
|
|
895232fc65 | ||
|
|
5141e1a4f4 | ||
|
|
01adf28c94 | ||
|
|
dc1b0b5f81 | ||
|
|
0e7bc6b092 | ||
|
|
5edeb71e3b | ||
|
|
eda1ab2143 | ||
|
|
d03e7cb87a | ||
|
|
99f90befaf | ||
|
|
fb531cdaf3 | ||
|
|
104d0cb542 | ||
|
|
4f1dcf89b8 | ||
|
|
17744fbfc0 | ||
|
|
d054865200 | ||
|
|
23c39757ec | ||
|
|
f2e56e4c18 | ||
|
|
4e9525ef36 | ||
|
|
b6319f5b42 | ||
|
|
6e2ab84154 | ||
|
|
9b12c0bb19 | ||
|
|
e65e06867e | ||
|
|
0eb2bcd260 | ||
|
|
6ec36d348b | ||
|
|
a99f340e1b | ||
|
|
d854a04231 | ||
|
|
410014377c | ||
|
|
a05d5d2514 | ||
|
|
180d8c1b57 | ||
|
|
dc6255261f | ||
|
|
650de99fc6 | ||
|
|
325cdb7fc5 | ||
|
|
c46bed7292 | ||
|
|
ae85d899d0 | ||
|
|
66d7b0ce8f | ||
|
|
431dcf84e9 | ||
|
|
692edc4879 | ||
|
|
2a1c676e0a | ||
|
|
b0bd0c398e | ||
|
|
e1d1d53cd1 | ||
|
|
aa986be752 | ||
|
|
6a6ff876c5 | ||
|
|
4b774cb46d | ||
|
|
b46aef88b3 | ||
|
|
920a99faeb | ||
|
|
74dca863c2 | ||
|
|
52aa7fe1c9 | ||
|
|
413f852bdb | ||
|
|
11aa6e10c0 | ||
|
|
54430a8722 | ||
|
|
655e9cda3f | ||
|
|
b58e3b600b | ||
|
|
8eaeb6990b | ||
|
|
b60ac710be | ||
|
|
00068a8030 | ||
|
|
1bbbce75c5 | ||
|
|
a5968f9364 | ||
|
|
d36a234c12 | ||
|
|
b6732d6048 | ||
|
|
a26221299e | ||
|
|
eef9813ad4 | ||
|
|
7f9951a934 | ||
|
|
1ae4a6c43e | ||
|
|
b2daf9de30 | ||
|
|
b99be505a5 | ||
|
|
831fe94c75 | ||
|
|
8c8d21db6f | ||
|
|
a4eb57c5ec | ||
|
|
830c4d3b19 | ||
|
|
0a557890a5 | ||
|
|
1f49edd978 | ||
|
|
786833cd1b | ||
|
|
9c91c68d1d | ||
|
|
24244e4121 | ||
|
|
3a8a6e07f2 | ||
|
|
cba09cd10d | ||
|
|
020e3f9591 | ||
|
|
35fe0b1101 | ||
|
|
c9ab330b7b | ||
|
|
3ea82b9ff9 | ||
|
|
b1c9550182 | ||
|
|
c03d445a8c | ||
|
|
b86b2cbc7d | ||
|
|
8df3f3d341 | ||
|
|
b118caf677 | ||
|
|
d34ef196c2 | ||
|
|
811591c5c3 | ||
|
|
355755d35d | ||
|
|
ac81ce51fa | ||
|
|
ed5fc9ad2d | ||
|
|
6700baa505 | ||
|
|
b83bf0717e | ||
|
|
a659479483 | ||
|
|
3500150882 | ||
|
|
7da91533aa | ||
|
|
5f083abafc | ||
|
|
b0f59f6021 | ||
|
|
2596608ba1 | ||
|
|
be8cbfa612 | ||
|
|
f3dd8daaa9 | ||
|
|
4378789029 | ||
|
|
a20b7eb943 | ||
|
|
520c092a65 | ||
|
|
4d696d0d0e | ||
|
|
ee0b8a72c6 | ||
|
|
c09167df60 | ||
|
|
674cc3eeca | ||
|
|
7e91d34f3e | ||
|
|
15f2538943 | ||
|
|
13e32bf166 | ||
|
|
6db1c4f5b9 | ||
|
|
c8b30da7ef | ||
|
|
5ec5a6ea6c | ||
|
|
8e653d394e | ||
|
|
ef7532e7e6 | ||
|
|
b19163b107 |
@@ -1,7 +1,7 @@
|
||||
# Welcome to the JDK!
|
||||
|
||||
For build instructions please see the
|
||||
[online documentation](https://openjdk.org/groups/build/doc/building.html),
|
||||
[online documentation](https://git.openjdk.org/jdk/blob/master/doc/building.md),
|
||||
or either of these files:
|
||||
|
||||
- [doc/building.html](doc/building.html) (html version)
|
||||
|
||||
@@ -185,77 +185,30 @@ endif
|
||||
|
||||
ifneq ($(filter product-bundles% legacy-bundles, $(MAKECMDGOALS)), )
|
||||
|
||||
SYMBOLS_EXCLUDE_PATTERN := %.debuginfo %.diz %.map
|
||||
|
||||
# There may be files with spaces in the names, so use ShellFindFiles
|
||||
# explicitly.
|
||||
# There may be files with spaces in the names, so use ShellFindFiles explicitly.
|
||||
ALL_JDK_FILES := $(call ShellFindFiles, $(JDK_IMAGE_DIR))
|
||||
ifneq ($(JDK_IMAGE_DIR), $(JDK_SYMBOLS_IMAGE_DIR))
|
||||
ALL_JDK_SYMBOLS_FILES := $(call ShellFindFiles, $(JDK_SYMBOLS_IMAGE_DIR))
|
||||
else
|
||||
ALL_JDK_SYMBOLS_FILES := $(ALL_JDK_FILES)
|
||||
endif
|
||||
ifneq ($(JDK_IMAGE_DIR), $(JDK_DEMOS_IMAGE_DIR))
|
||||
ALL_JDK_DEMOS_FILES := $(call ShellFindFiles, $(JDK_DEMOS_IMAGE_DIR))
|
||||
else
|
||||
ALL_JDK_DEMOS_FILES := $(ALL_JDK_FILES)
|
||||
endif
|
||||
|
||||
# Create special filter rules when dealing with unzipped .dSYM directories on
|
||||
# macosx
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
|
||||
JDK_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
|
||||
$(call containing, .dSYM/, $(patsubst $(JDK_IMAGE_DIR)/%, %, \
|
||||
$(ALL_JDK_SYMBOLS_FILES))))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create special filter rules when dealing with debug symbols on windows
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JDK_SYMBOLS_EXCLUDE_PATTERN := %.pdb
|
||||
endif
|
||||
endif
|
||||
|
||||
JDK_BUNDLE_FILES := \
|
||||
$(filter-out \
|
||||
$(JDK_SYMBOLS_EXCLUDE_PATTERN) \
|
||||
$(JDK_EXTRA_EXCLUDES) \
|
||||
$(SYMBOLS_EXCLUDE_PATTERN) \
|
||||
$(JDK_IMAGE_HOMEDIR)/demo/% \
|
||||
, \
|
||||
$(ALL_JDK_FILES) \
|
||||
)
|
||||
|
||||
JDK_SYMBOLS_BUNDLE_FILES := \
|
||||
$(call FindFiles, $(SYMBOLS_IMAGE_DIR))
|
||||
JDK_SYMBOLS_BUNDLE_FILES := $(call FindFiles, $(SYMBOLS_IMAGE_DIR))
|
||||
|
||||
TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_DEMOS_IMAGE_HOMEDIR)/demo/%, \
|
||||
$(ALL_JDK_DEMOS_FILES))
|
||||
|
||||
ALL_JRE_FILES := $(call ShellFindFiles, $(JRE_IMAGE_DIR))
|
||||
|
||||
# Create special filter rules when dealing with unzipped .dSYM directories on
|
||||
# macosx
|
||||
ifeq ($(OPENJDK_TARGET_OS), macosx)
|
||||
ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
|
||||
JRE_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \
|
||||
$(call containing, .dSYM/, $(patsubst $(JRE_IMAGE_DIR)/%, %, $(ALL_JRE_FILES))))
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create special filter rules when dealing with debug symbols on windows
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JRE_SYMBOLS_EXCLUDE_PATTERN := %.pdb
|
||||
endif
|
||||
endif
|
||||
|
||||
JRE_BUNDLE_FILES := $(filter-out \
|
||||
$(JRE_SYMBOLS_EXCLUDE_PATTERN) \
|
||||
$(SYMBOLS_EXCLUDE_PATTERN), \
|
||||
$(ALL_JRE_FILES))
|
||||
JRE_BUNDLE_FILES := $(ALL_JRE_FILES)
|
||||
|
||||
ifeq ($(MACOSX_CODESIGN_MODE), hardened)
|
||||
# Macosx release build and code signing available.
|
||||
|
||||
@@ -218,10 +218,14 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.pdb,*.map}'
|
||||
else
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.map}'
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.map}'
|
||||
endif
|
||||
else
|
||||
ifeq ($(SHIP_DEBUG_SYMBOLS), )
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*,*.diz,*.debuginfo,*.dSYM/**,*.dSYM}'
|
||||
else
|
||||
JMOD_FLAGS += --exclude '**{_the.*,_*.marker*}'
|
||||
endif
|
||||
endif
|
||||
|
||||
# Unless we are creating a very large module, use the small tool JVM options
|
||||
|
||||
@@ -93,16 +93,19 @@ JAVADOC_DISABLED_DOCLINT_WARNINGS := missing
|
||||
JAVADOC_DISABLED_DOCLINT_PACKAGES := org.w3c.* javax.smartcardio
|
||||
|
||||
# The initial set of options for javadoc
|
||||
# -XDaccessInternalAPI is a temporary workaround, see 8373909
|
||||
JAVADOC_OPTIONS := -use -keywords -notimestamp \
|
||||
-serialwarn -encoding utf-8 -docencoding utf-8 -breakiterator \
|
||||
-splitIndex --system none -javafx --expand-requires transitive \
|
||||
--override-methods=summary
|
||||
--override-methods=summary \
|
||||
-XDaccessInternalAPI
|
||||
|
||||
# The reference options must stay stable to allow for comparisons across the
|
||||
# development cycle.
|
||||
REFERENCE_OPTIONS := -XDignore.symbol.file=true -use -keywords -notimestamp \
|
||||
-serialwarn -encoding utf-8 -breakiterator -splitIndex --system none \
|
||||
-html5 -javafx --expand-requires transitive
|
||||
-html5 -javafx --expand-requires transitive \
|
||||
-XDaccessInternalAPI
|
||||
|
||||
# Should we add DRAFT stamps to the generated javadoc?
|
||||
ifeq ($(VERSION_IS_GA), true)
|
||||
|
||||
@@ -63,7 +63,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
fi
|
||||
|
||||
BASIC_LDFLAGS_JVM_ONLY=""
|
||||
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
|
||||
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing $DEBUG_PREFIX_CFLAGS"
|
||||
|
||||
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
|
||||
|
||||
@@ -71,7 +71,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
BASIC_LDFLAGS_JVM_ONLY="-mno-omit-leaf-frame-pointer -mstack-alignment=16 \
|
||||
-fPIC"
|
||||
|
||||
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing"
|
||||
LDFLAGS_LTO="-flto=auto -fuse-linker-plugin -fno-strict-aliasing $DEBUG_PREFIX_CFLAGS"
|
||||
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
|
||||
@@ -316,23 +316,36 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
|
||||
AC_MSG_CHECKING([if we should add external native debug symbols to the shipped bundles])
|
||||
AC_ARG_WITH([external-symbols-in-bundles],
|
||||
[AS_HELP_STRING([--with-external-symbols-in-bundles],
|
||||
[which type of external native debug symbol information shall be shipped in product bundles (none, public, full)
|
||||
(e.g. ship full/stripped pdbs on Windows) @<:@none@:>@])])
|
||||
[which type of external native debug symbol information shall be shipped with bundles/images (none, public, full).
|
||||
@<:@none in release builds, full otherwise. --with-native-debug-symbols=external/zipped is a prerequisite. public is only supported on Windows@:>@])],
|
||||
[],
|
||||
[with_external_symbols_in_bundles=default])
|
||||
|
||||
if test "x$with_external_symbols_in_bundles" = x || test "x$with_external_symbols_in_bundles" = xnone ; then
|
||||
AC_MSG_RESULT([no])
|
||||
elif test "x$with_external_symbols_in_bundles" = xfull || test "x$with_external_symbols_in_bundles" = xpublic ; then
|
||||
if test "x$OPENJDK_TARGET_OS" != xwindows ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles currently only works on windows!])
|
||||
elif test "x$COPY_DEBUG_SYMBOLS" != xtrue ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles only works when --with-native-debug-symbols=external is used!])
|
||||
elif test "x$with_external_symbols_in_bundles" = xfull ; then
|
||||
if test "x$COPY_DEBUG_SYMBOLS" != xtrue ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles only works when --with-native-debug-symbols=external/zipped is used!])
|
||||
elif test "x$with_external_symbols_in_bundles" = xpublic && test "x$OPENJDK_TARGET_OS" != xwindows ; then
|
||||
AC_MSG_ERROR([--with-external-symbols-in-bundles=public is only supported on Windows!])
|
||||
fi
|
||||
|
||||
if test "x$with_external_symbols_in_bundles" = xfull ; then
|
||||
AC_MSG_RESULT([full])
|
||||
SHIP_DEBUG_SYMBOLS=full
|
||||
else
|
||||
AC_MSG_RESULT([public])
|
||||
SHIP_DEBUG_SYMBOLS=public
|
||||
fi
|
||||
elif test "x$with_external_symbols_in_bundles" = xdefault ; then
|
||||
if test "x$DEBUG_LEVEL" = xrelease ; then
|
||||
AC_MSG_RESULT([no (default)])
|
||||
elif test "x$COPY_DEBUG_SYMBOLS" = xtrue ; then
|
||||
AC_MSG_RESULT([full (default)])
|
||||
SHIP_DEBUG_SYMBOLS=full
|
||||
else
|
||||
AC_MSG_RESULT([no (default, native debug symbols are not external/zipped)])
|
||||
fi
|
||||
else
|
||||
AC_MSG_ERROR([$with_external_symbols_in_bundles is an unknown value for --with-external-symbols-in-bundles])
|
||||
fi
|
||||
|
||||
@@ -234,6 +234,9 @@ define SetupLinkerFlags
|
||||
ifeq ($(call isTargetOs, macosx), true)
|
||||
$1_EXTRA_LDFLAGS += -Wl,-object_path_lto,$$($1_OBJECT_DIR)/$$($1_NAME)_lto_helper.o
|
||||
endif
|
||||
ifeq ($(TOOLCHAIN_TYPE), microsoft)
|
||||
$1_EXTRA_LDFLAGS += -LTCGOUT:$$($1_OBJECT_DIR)/$$($1_NAME).iobj
|
||||
endif
|
||||
endif
|
||||
|
||||
$1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
|
||||
|
||||
@@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
server: "jpg",
|
||||
product: "jcov",
|
||||
version: "3.0",
|
||||
build_number: "3",
|
||||
file: "bundles/jcov-3.0+3.zip",
|
||||
build_number: "5",
|
||||
file: "bundles/jcov-3.0+5.zip",
|
||||
environment_name: "JCOV_HOME",
|
||||
},
|
||||
|
||||
|
||||
@@ -31,10 +31,9 @@ import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import javax.lang.model.element.Element;
|
||||
import javax.lang.model.element.PackageElement;
|
||||
import javax.lang.model.element.TypeElement;
|
||||
|
||||
import com.sun.source.doctree.DocTree;
|
||||
import com.sun.source.doctree.LiteralTree;
|
||||
@@ -160,9 +159,10 @@ public class JSpec implements Taglet {
|
||||
if (m.find()) {
|
||||
String chapter = m.group("chapter");
|
||||
String section = m.group("section");
|
||||
String rootParent = currentPath().replaceAll("[^/]+", "..");
|
||||
|
||||
String url = String.format("%1$s/../specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
|
||||
docRoot(elem), idPrefix, chapter, section);
|
||||
String url = String.format("%1$s/specs/%2$s/%2$s-%3$s.html#%2$s-%3$s%4$s",
|
||||
rootParent, idPrefix, chapter, section);
|
||||
|
||||
sb.append("<a href=\"")
|
||||
.append(url)
|
||||
@@ -183,6 +183,22 @@ public class JSpec implements Taglet {
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static ThreadLocal<String> CURRENT_PATH = null;
|
||||
|
||||
private String currentPath() {
|
||||
if (CURRENT_PATH == null) {
|
||||
try {
|
||||
Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter")
|
||||
.getField("CURRENT_PATH");
|
||||
@SuppressWarnings("unchecked")
|
||||
ThreadLocal<String> tl = (ThreadLocal<String>) f.get(null);
|
||||
CURRENT_PATH = tl;
|
||||
} catch (ReflectiveOperationException e) {
|
||||
throw new RuntimeException("Cannot determine current path", e);
|
||||
}
|
||||
}
|
||||
return CURRENT_PATH.get();
|
||||
}
|
||||
|
||||
private String expand(List<? extends DocTree> trees) {
|
||||
return (new SimpleDocTreeVisitor<StringBuilder, StringBuilder>() {
|
||||
@@ -209,34 +225,4 @@ public class JSpec implements Taglet {
|
||||
}).visit(trees, new StringBuilder()).toString();
|
||||
}
|
||||
|
||||
private String docRoot(Element elem) {
|
||||
switch (elem.getKind()) {
|
||||
case MODULE:
|
||||
return "..";
|
||||
|
||||
case PACKAGE:
|
||||
PackageElement pe = (PackageElement)elem;
|
||||
String pkgPart = pe.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
return pe.getEnclosingElement() != null
|
||||
? "../" + pkgPart
|
||||
: pkgPart;
|
||||
|
||||
case CLASS, ENUM, RECORD, INTERFACE, ANNOTATION_TYPE:
|
||||
TypeElement te = (TypeElement)elem;
|
||||
return te.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
|
||||
default:
|
||||
var enclosing = elem.getEnclosingElement();
|
||||
if (enclosing == null)
|
||||
throw new IllegalArgumentException(elem.getKind().toString());
|
||||
return docRoot(enclosing);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -31,10 +31,9 @@ import java.util.Set;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
import javax.lang.model.element.Element;
|
||||
import javax.lang.model.element.PackageElement;
|
||||
import javax.lang.model.element.TypeElement;
|
||||
|
||||
import com.sun.source.doctree.DocTree;
|
||||
import com.sun.source.doctree.UnknownBlockTagTree;
|
||||
@@ -68,7 +67,7 @@ public class ToolGuide implements Taglet {
|
||||
|
||||
static final String TAG_NAME = "toolGuide";
|
||||
|
||||
static final String BASE_URL = "../specs/man";
|
||||
static final String BASE_URL = "specs/man";
|
||||
|
||||
static final Pattern TAG_PATTERN = Pattern.compile("(?s)(?<name>[A-Za-z0-9]+)\\s*(?<label>.*)$");
|
||||
|
||||
@@ -119,9 +118,10 @@ public class ToolGuide implements Taglet {
|
||||
if (label.isEmpty()) {
|
||||
label = name;
|
||||
}
|
||||
String rootParent = currentPath().replaceAll("[^/]+", "..");
|
||||
|
||||
String url = String.format("%s/%s/%s.html",
|
||||
docRoot(elem), BASE_URL, name);
|
||||
rootParent, BASE_URL, name);
|
||||
|
||||
if (needComma) {
|
||||
sb.append(",\n");
|
||||
@@ -142,33 +142,21 @@ public class ToolGuide implements Taglet {
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private String docRoot(Element elem) {
|
||||
switch (elem.getKind()) {
|
||||
case MODULE:
|
||||
return "..";
|
||||
private static ThreadLocal<String> CURRENT_PATH = null;
|
||||
|
||||
case PACKAGE:
|
||||
PackageElement pe = (PackageElement)elem;
|
||||
String pkgPart = pe.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
return pe.getEnclosingElement() != null
|
||||
? "../" + pkgPart
|
||||
: pkgPart;
|
||||
|
||||
case CLASS, ENUM, RECORD, INTERFACE, ANNOTATION_TYPE:
|
||||
TypeElement te = (TypeElement)elem;
|
||||
return te.getQualifiedName()
|
||||
.toString()
|
||||
.replace('.', '/')
|
||||
.replaceAll("[^/]+", "..");
|
||||
|
||||
default:
|
||||
var enclosing = elem.getEnclosingElement();
|
||||
if (enclosing == null)
|
||||
throw new IllegalArgumentException(elem.getKind().toString());
|
||||
return docRoot(enclosing);
|
||||
private String currentPath() {
|
||||
if (CURRENT_PATH == null) {
|
||||
try {
|
||||
Field f = Class.forName("jdk.javadoc.internal.doclets.formats.html.HtmlDocletWriter")
|
||||
.getField("CURRENT_PATH");
|
||||
@SuppressWarnings("unchecked")
|
||||
ThreadLocal<String> tl = (ThreadLocal<String>) f.get(null);
|
||||
CURRENT_PATH = tl;
|
||||
} catch (ReflectiveOperationException e) {
|
||||
throw new RuntimeException("Cannot determine current path", e);
|
||||
}
|
||||
}
|
||||
return CURRENT_PATH.get();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -346,8 +346,14 @@ source %{
|
||||
}
|
||||
|
||||
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
|
||||
// Only SVE has partial vector operations
|
||||
if (UseSVE == 0) {
|
||||
// 1. Only SVE requires partial vector operations.
|
||||
// 2. The vector size in bytes must be smaller than MaxVectorSize.
|
||||
// 3. Predicated vectors have a mask input, which guarantees that
|
||||
// out-of-bounds lanes remain inactive.
|
||||
int length_in_bytes = vt->length_in_bytes();
|
||||
if (UseSVE == 0 ||
|
||||
length_in_bytes == MaxVectorSize ||
|
||||
node->is_predicated_vector()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -370,21 +376,22 @@ source %{
|
||||
return !node->in(1)->is_Con();
|
||||
case Op_LoadVector:
|
||||
case Op_StoreVector:
|
||||
// We use NEON load/store instructions if the vector length is <= 128 bits.
|
||||
return vt->length_in_bytes() > 16;
|
||||
case Op_AddReductionVI:
|
||||
case Op_AddReductionVL:
|
||||
// We may prefer using NEON instructions rather than SVE partial operations.
|
||||
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
// For these ops, we prefer using NEON instructions rather than SVE
|
||||
// predicated instructions for better performance.
|
||||
return !VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
case Op_MinReductionV:
|
||||
case Op_MaxReductionV:
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
|
||||
// instructions rather than SVE partial operations.
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
|
||||
// instructions rather than SVE predicated instructions for
|
||||
// better performance.
|
||||
return vt->element_basic_type() == T_LONG ||
|
||||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
!VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
default:
|
||||
// For other ops whose vector size is smaller than the max vector size, a
|
||||
// full-sized unpredicated operation does not impact the final vector result.
|
||||
// For other ops whose vector size is smaller than the max vector
|
||||
// size, a full-sized unpredicated operation does not impact the
|
||||
// vector result.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,8 +336,14 @@ source %{
|
||||
}
|
||||
|
||||
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
|
||||
// Only SVE has partial vector operations
|
||||
if (UseSVE == 0) {
|
||||
// 1. Only SVE requires partial vector operations.
|
||||
// 2. The vector size in bytes must be smaller than MaxVectorSize.
|
||||
// 3. Predicated vectors have a mask input, which guarantees that
|
||||
// out-of-bounds lanes remain inactive.
|
||||
int length_in_bytes = vt->length_in_bytes();
|
||||
if (UseSVE == 0 ||
|
||||
length_in_bytes == MaxVectorSize ||
|
||||
node->is_predicated_vector()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -360,21 +366,22 @@ source %{
|
||||
return !node->in(1)->is_Con();
|
||||
case Op_LoadVector:
|
||||
case Op_StoreVector:
|
||||
// We use NEON load/store instructions if the vector length is <= 128 bits.
|
||||
return vt->length_in_bytes() > 16;
|
||||
case Op_AddReductionVI:
|
||||
case Op_AddReductionVL:
|
||||
// We may prefer using NEON instructions rather than SVE partial operations.
|
||||
return !VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
// For these ops, we prefer using NEON instructions rather than SVE
|
||||
// predicated instructions for better performance.
|
||||
return !VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
case Op_MinReductionV:
|
||||
case Op_MaxReductionV:
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we may prefer using NEON
|
||||
// instructions rather than SVE partial operations.
|
||||
// For BYTE/SHORT/INT/FLOAT/DOUBLE types, we prefer using NEON
|
||||
// instructions rather than SVE predicated instructions for
|
||||
// better performance.
|
||||
return vt->element_basic_type() == T_LONG ||
|
||||
!VM_Version::use_neon_for_vector(vt->length_in_bytes());
|
||||
!VM_Version::use_neon_for_vector(length_in_bytes);
|
||||
default:
|
||||
// For other ops whose vector size is smaller than the max vector size, a
|
||||
// full-sized unpredicated operation does not impact the final vector result.
|
||||
// For other ops whose vector size is smaller than the max vector
|
||||
// size, a full-sized unpredicated operation does not impact the
|
||||
// vector result.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -310,7 +310,18 @@ static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registe
|
||||
__ add(sp, sp, 32 * wordSize);
|
||||
}
|
||||
|
||||
#ifdef R18_RESERVED
|
||||
/*
|
||||
Do not modify r18_tls when restoring registers if it is a reserved register. On Windows,
|
||||
for example, r18_tls is used to store the pointer to the current thread's TEB (where TLS
|
||||
variables are stored). Therefore, modifying r18_tls would corrupt the TEB pointer.
|
||||
*/
|
||||
__ pop(RegSet::range(r0, r17), sp);
|
||||
__ ldp(zr, r19, Address(__ post(sp, 2 * wordSize)));
|
||||
__ pop(RegSet::range(r20, r29), sp);
|
||||
#else
|
||||
__ pop(RegSet::range(r0, r29), sp);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_fpu_registers = true) {
|
||||
@@ -323,8 +334,20 @@ static void restore_live_registers_except_r0(StubAssembler* sasm, bool restore_f
|
||||
__ add(sp, sp, 32 * wordSize);
|
||||
}
|
||||
|
||||
#ifdef R18_RESERVED
|
||||
/*
|
||||
Do not modify r18_tls when restoring registers if it is a reserved register. On Windows,
|
||||
for example, r18_tls is used to store the pointer to the current thread's TEB (where TLS
|
||||
variables are stored). Therefore, modifying r18_tls would corrupt the TEB pointer.
|
||||
*/
|
||||
__ ldp(zr, r1, Address(__ post(sp, 2 * wordSize)));
|
||||
__ pop(RegSet::range(r2, r17), sp);
|
||||
__ ldp(zr, r19, Address(__ post(sp, 2 * wordSize)));
|
||||
__ pop(RegSet::range(r20, r29), sp);
|
||||
#else
|
||||
__ ldp(zr, r1, Address(__ post(sp, 16)));
|
||||
__ pop(RegSet::range(r2, r29), sp);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5379,7 +5379,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
int index = oop_recorder()->find_index(k);
|
||||
assert(! Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
InstructionMark im(this);
|
||||
RelocationHolder rspec = metadata_Relocation::spec(index);
|
||||
|
||||
@@ -6335,8 +6335,36 @@ instruct loadConD_Ex(regD dst, immD src) %{
|
||||
// Prefetch instructions.
|
||||
// Must be safe to execute with invalid address (cannot fault).
|
||||
|
||||
// Special prefetch versions which use the dcbz instruction.
|
||||
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($src$$Register, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
|
||||
@@ -6349,6 +6377,7 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
|
||||
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
|
||||
|
||||
@@ -2662,6 +2662,9 @@ enum Nf {
|
||||
INSN(vsha2ch_vv, 0b1110111, 0b010, 0b1, 0b101110);
|
||||
INSN(vsha2cl_vv, 0b1110111, 0b010, 0b1, 0b101111);
|
||||
|
||||
// Vector GHASH (Zvkg) Extension
|
||||
INSN(vghsh_vv, 0b1110111, 0b010, 0b1, 0b101100);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, op, funct3, Vs1, funct6) \
|
||||
|
||||
@@ -2067,6 +2067,83 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::enc_cmove_fp_cmp(int cmpFlag, Register op1, Register op2,
|
||||
FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
bool is_unsigned = (cmpFlag & unsigned_branch_mask) == unsigned_branch_mask;
|
||||
int op_select = cmpFlag & (~unsigned_branch_mask);
|
||||
|
||||
switch (op_select) {
|
||||
case BoolTest::eq:
|
||||
cmov_fp_eq(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::ne:
|
||||
cmov_fp_ne(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::le:
|
||||
if (is_unsigned) {
|
||||
cmov_fp_leu(op1, op2, dst, src, is_single);
|
||||
} else {
|
||||
cmov_fp_le(op1, op2, dst, src, is_single);
|
||||
}
|
||||
break;
|
||||
case BoolTest::ge:
|
||||
if (is_unsigned) {
|
||||
cmov_fp_geu(op1, op2, dst, src, is_single);
|
||||
} else {
|
||||
cmov_fp_ge(op1, op2, dst, src, is_single);
|
||||
}
|
||||
break;
|
||||
case BoolTest::lt:
|
||||
if (is_unsigned) {
|
||||
cmov_fp_ltu(op1, op2, dst, src, is_single);
|
||||
} else {
|
||||
cmov_fp_lt(op1, op2, dst, src, is_single);
|
||||
}
|
||||
break;
|
||||
case BoolTest::gt:
|
||||
if (is_unsigned) {
|
||||
cmov_fp_gtu(op1, op2, dst, src, is_single);
|
||||
} else {
|
||||
cmov_fp_gt(op1, op2, dst, src, is_single);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::enc_cmove_fp_cmp_fp(int cmpFlag,
|
||||
FloatRegister op1, FloatRegister op2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
int op_select = cmpFlag & (~unsigned_branch_mask);
|
||||
|
||||
switch (op_select) {
|
||||
case BoolTest::eq:
|
||||
cmov_fp_cmp_fp_eq(op1, op2, dst, src, cmp_single, cmov_single);
|
||||
break;
|
||||
case BoolTest::ne:
|
||||
cmov_fp_cmp_fp_ne(op1, op2, dst, src, cmp_single, cmov_single);
|
||||
break;
|
||||
case BoolTest::le:
|
||||
cmov_fp_cmp_fp_le(op1, op2, dst, src, cmp_single, cmov_single);
|
||||
break;
|
||||
case BoolTest::ge:
|
||||
cmov_fp_cmp_fp_ge(op1, op2, dst, src, cmp_single, cmov_single);
|
||||
break;
|
||||
case BoolTest::lt:
|
||||
cmov_fp_cmp_fp_lt(op1, op2, dst, src, cmp_single, cmov_single);
|
||||
break;
|
||||
case BoolTest::gt:
|
||||
cmov_fp_cmp_fp_gt(op1, op2, dst, src, cmp_single, cmov_single);
|
||||
break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// Set dst to NaN if any NaN input.
|
||||
void C2_MacroAssembler::minmax_fp(FloatRegister dst, FloatRegister src1, FloatRegister src2,
|
||||
FLOAT_TYPE ft, bool is_min) {
|
||||
|
||||
@@ -132,6 +132,13 @@
|
||||
FloatRegister op1, FloatRegister op2,
|
||||
Register dst, Register src, bool is_single);
|
||||
|
||||
void enc_cmove_fp_cmp(int cmpFlag, Register op1, Register op2,
|
||||
FloatRegister dst, FloatRegister src, bool is_single);
|
||||
|
||||
void enc_cmove_fp_cmp_fp(int cmpFlag, FloatRegister op1, FloatRegister op2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single);
|
||||
|
||||
void spill(Register r, bool is64, int offset) {
|
||||
is64 ? sd(r, Address(sp, offset))
|
||||
: sw(r, Address(sp, offset));
|
||||
|
||||
@@ -123,6 +123,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
product(bool, UseZvkn, false, EXPERIMENTAL, \
|
||||
"Use Zvkn group extension, Zvkned, Zvknhb, Zvkb, Zvkt") \
|
||||
product(bool, UseCtxFencei, false, EXPERIMENTAL, \
|
||||
"Use PR_RISCV_CTX_SW_FENCEI_ON to avoid explicit icache flush")
|
||||
"Use PR_RISCV_CTX_SW_FENCEI_ON to avoid explicit icache flush") \
|
||||
product(bool, UseZvkg, false, EXPERIMENTAL, "Use Zvkg instructions")
|
||||
|
||||
#endif // CPU_RISCV_GLOBALS_RISCV_HPP
|
||||
|
||||
@@ -1233,7 +1233,119 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// ----------- cmove, compare float -----------
|
||||
// ----------- cmove float/double -----------
|
||||
|
||||
void MacroAssembler::cmov_fp_eq(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bne(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_ne(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
beq(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_le(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bgt(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_leu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bgtu(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_ge(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
blt(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_geu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bltu(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_lt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bge(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_ltu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bgeu(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_gt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
ble(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_gtu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single) {
|
||||
Label no_set;
|
||||
bleu(cmp1, cmp2, no_set);
|
||||
if (is_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// ----------- cmove, compare float/double -----------
|
||||
//
|
||||
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
|
||||
// so, just list behaviour of unordered ones as follow.
|
||||
@@ -1391,6 +1503,148 @@ void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// ----------- cmove float/double, compare float/double -----------
|
||||
|
||||
// Move src to dst only if cmp1 == cmp2,
|
||||
// otherwise leave dst unchanged, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 != cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
|
||||
void MacroAssembler::cmov_fp_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
Label no_set;
|
||||
if (cmp_single) {
|
||||
// jump if cmp1 != cmp2, including the case of NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 == cmp2
|
||||
float_bne(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bne(cmp1, cmp2, no_set);
|
||||
}
|
||||
if (cmov_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Keep dst unchanged only if cmp1 == cmp2,
|
||||
// otherwise move src to dst, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 == cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
|
||||
void MacroAssembler::cmov_fp_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
Label no_set;
|
||||
if (cmp_single) {
|
||||
// jump if cmp1 == cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
float_beq(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_beq(cmp1, cmp2, no_set);
|
||||
}
|
||||
if (cmov_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 < cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 > cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
void MacroAssembler::cmov_fp_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
Label no_set;
|
||||
if (cmp_single) {
|
||||
// jump if cmp1 > cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
float_bgt(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bgt(cmp1, cmp2, no_set);
|
||||
}
|
||||
if (cmov_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
Label no_set;
|
||||
if (cmp_single) {
|
||||
// jump if cmp1 < cmp2 or either is NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 >= cmp2
|
||||
float_blt(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_blt(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
if (cmov_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 <= cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 >= cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
void MacroAssembler::cmov_fp_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
Label no_set;
|
||||
if (cmp_single) {
|
||||
// jump if cmp1 >= cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
float_bge(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bge(cmp1, cmp2, no_set);
|
||||
}
|
||||
if (cmov_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_fp_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2,
|
||||
FloatRegister dst, FloatRegister src,
|
||||
bool cmp_single, bool cmov_single) {
|
||||
Label no_set;
|
||||
if (cmp_single) {
|
||||
// jump if cmp1 <= cmp2 or either is NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 > cmp2
|
||||
float_ble(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_ble(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
if (cmov_single) {
|
||||
fmv_s(dst, src);
|
||||
} else {
|
||||
fmv_d(dst, src);
|
||||
}
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Float compare branch instructions
|
||||
|
||||
#define INSN(NAME, FLOATCMP, BRANCH) \
|
||||
@@ -4933,7 +5187,6 @@ void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
|
||||
assert (UseCompressedClassPointers, "should only be used for compressed headers");
|
||||
assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder");
|
||||
int index = oop_recorder()->find_index(k);
|
||||
assert(!Universe::heap()->is_in(k), "should not be an oop");
|
||||
|
||||
narrowKlass nk = CompressedKlassPointers::encode(k);
|
||||
relocate(metadata_Relocation::spec(index), [&] {
|
||||
|
||||
@@ -665,6 +665,24 @@ class MacroAssembler: public Assembler {
|
||||
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
|
||||
void cmov_fp_eq(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_ne(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_le(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_leu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_ge(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_geu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_lt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_ltu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_gt(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
void cmov_fp_gtu(Register cmp1, Register cmp2, FloatRegister dst, FloatRegister src, bool is_single);
|
||||
|
||||
void cmov_fp_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
|
||||
void cmov_fp_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
|
||||
void cmov_fp_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
|
||||
void cmov_fp_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
|
||||
void cmov_fp_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
|
||||
void cmov_fp_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, FloatRegister dst, FloatRegister src, bool cmp_single, bool cmov_single);
|
||||
|
||||
public:
|
||||
// We try to follow risc-v asm menomics.
|
||||
// But as we don't layout a reachable GOT,
|
||||
|
||||
@@ -1924,8 +1924,6 @@ bool Matcher::match_rule_supported(int opcode) {
|
||||
case Op_SubHF:
|
||||
return UseZfh;
|
||||
|
||||
case Op_CMoveF:
|
||||
case Op_CMoveD:
|
||||
case Op_CMoveP:
|
||||
case Op_CMoveN:
|
||||
return false;
|
||||
@@ -8544,7 +8542,7 @@ instruct convD2F_reg(fRegF dst, fRegD src) %{
|
||||
|
||||
// single <-> half precision
|
||||
|
||||
instruct convHF2F_reg_reg(fRegF dst, iRegINoSp src, iRegINoSp tmp) %{
|
||||
instruct convHF2F_reg_reg(fRegF dst, iRegIorL2I src, iRegINoSp tmp) %{
|
||||
match(Set dst (ConvHF2F src));
|
||||
effect(TEMP tmp);
|
||||
format %{ "fmv.h.x $dst, $src\t# move source from $src to $dst\n\t"
|
||||
@@ -10466,6 +10464,286 @@ instruct cmovL_cmpP(iRegLNoSp dst, iRegL src, iRegP op1, iRegP op2, cmpOpU cop)
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
// --------- CMoveF ---------
|
||||
|
||||
instruct cmovF_cmpI(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpI op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpI\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpU(fRegF dst, fRegF src, iRegI op1, iRegI op2, cmpOpU cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpU op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpU\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpL op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpL\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpUL(fRegF dst, fRegF src, iRegL op1, iRegL op2, cmpOpU cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpUL op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpUL\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpF(fRegF dst, fRegF src, fRegF op1, fRegF op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpF op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpF\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp_fp($cop$$cmpcode,
|
||||
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
|
||||
true /* cmp_single */, true /* cmov_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpD(fRegF dst, fRegF src, fRegD op1, fRegD op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpD op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpD\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
|
||||
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
|
||||
false /* cmp_single */, true /* cmov_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpN(fRegF dst, fRegF src, iRegN op1, iRegN op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpN op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpN\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovF_cmpP(fRegF dst, fRegF src, iRegP op1, iRegP op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveF (Binary cop (CmpP op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveF $dst, ($op1 $cop $op2), $dst, $src\t#@cmovF_cmpP\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), true /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
// --------- CMoveD ---------
|
||||
|
||||
instruct cmovD_cmpI(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpI op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpI\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpU(fRegD dst, fRegD src, iRegI op1, iRegI op2, cmpOpU cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpU op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpU\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpL op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpL\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpUL(fRegD dst, fRegD src, iRegL op1, iRegL op2, cmpOpU cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpUL op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpUL\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpF(fRegD dst, fRegD src, fRegF op1, fRegF op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpF op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpF\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp_fp($cop$$cmpcode,
|
||||
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
|
||||
true /* cmp_single */, false /* cmov_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpD(fRegD dst, fRegD src, fRegD op1, fRegD op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpD op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpD\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp_fp($cop$$cmpcode | C2_MacroAssembler::double_branch_mask,
|
||||
as_FloatRegister($op1$$reg), as_FloatRegister($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg),
|
||||
false /* cmp_single */, false /* cmov_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpN(fRegD dst, fRegD src, iRegN op1, iRegN op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpN op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpN\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmovD_cmpP(fRegD dst, fRegD src, iRegP op1, iRegP op2, cmpOp cop) %{
|
||||
match(Set dst (CMoveD (Binary cop (CmpP op1 op2)) (Binary dst src)));
|
||||
ins_cost(ALU_COST + BRANCH_COST);
|
||||
|
||||
format %{
|
||||
"CMoveD $dst, ($op1 $cop $op2), $dst, $src\t#@cmovD_cmpP\n\t"
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ enc_cmove_fp_cmp($cop$$cmpcode | C2_MacroAssembler::unsigned_branch_mask,
|
||||
as_Register($op1$$reg), as_Register($op2$$reg),
|
||||
as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg), false /* is_single */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
// ============================================================================
|
||||
// Procedure Call/Return Instructions
|
||||
|
||||
|
||||
@@ -2493,8 +2493,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
__ vle32_v(res, from);
|
||||
|
||||
__ mv(t2, 52);
|
||||
__ blt(keylen, t2, L_aes128);
|
||||
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
|
||||
__ bltu(keylen, t2, L_aes128);
|
||||
__ beq(keylen, t2, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
@@ -2572,8 +2572,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
__ vle32_v(res, from);
|
||||
|
||||
__ mv(t2, 52);
|
||||
__ blt(keylen, t2, L_aes128);
|
||||
__ mv(t2, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
|
||||
__ bltu(keylen, t2, L_aes128);
|
||||
__ beq(keylen, t2, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
@@ -2606,6 +2606,454 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
void cipherBlockChaining_encryptAESCrypt(int round, Register from, Register to, Register key,
|
||||
Register rvec, Register input_len) {
|
||||
const Register len = x29;
|
||||
|
||||
VectorRegister working_vregs[] = {
|
||||
v1, v2, v3, v4, v5, v6, v7, v8,
|
||||
v9, v10, v11, v12, v13, v14, v15
|
||||
};
|
||||
|
||||
const unsigned int BLOCK_SIZE = 16;
|
||||
|
||||
__ mv(len, input_len);
|
||||
// load init rvec
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
__ vle32_v(v16, rvec);
|
||||
|
||||
generate_aes_loadkeys(key, working_vregs, round);
|
||||
Label L_enc_loop;
|
||||
__ bind(L_enc_loop);
|
||||
// Encrypt from source by block size
|
||||
__ vle32_v(v17, from);
|
||||
__ addi(from, from, BLOCK_SIZE);
|
||||
__ vxor_vv(v16, v16, v17);
|
||||
generate_aes_encrypt(v16, working_vregs, round);
|
||||
__ vse32_v(v16, to);
|
||||
__ addi(to, to, BLOCK_SIZE);
|
||||
__ subi(len, len, BLOCK_SIZE);
|
||||
__ bnez(len, L_enc_loop);
|
||||
|
||||
// save current rvec and return
|
||||
__ vse32_v(v16, rvec);
|
||||
__ mv(x10, input_len);
|
||||
__ leave();
|
||||
__ ret();
|
||||
}
|
||||
|
||||
// Arguments:
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg3 - r vector byte array address
|
||||
// c_rarg4 - input length
|
||||
//
|
||||
// Output:
|
||||
// x10 - input length
|
||||
//
|
||||
address generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubId stub_id = StubId::stubgen_cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
const Register from = c_rarg0;
|
||||
const Register to = c_rarg1;
|
||||
const Register key = c_rarg2;
|
||||
const Register rvec = c_rarg3;
|
||||
const Register input_len = c_rarg4;
|
||||
|
||||
const Register keylen = x28;
|
||||
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
Label L_aes128, L_aes192;
|
||||
// Compute #rounds for AES based on the length of the key array
|
||||
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ mv(t0, 52);
|
||||
__ bltu(keylen, t0, L_aes128);
|
||||
__ beq(keylen, t0, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
// Note: the following function performs key += 15*16
|
||||
cipherBlockChaining_encryptAESCrypt(15, from, to, key, rvec, input_len);
|
||||
|
||||
// Note: the following function performs key += 11*16
|
||||
__ bind(L_aes128);
|
||||
cipherBlockChaining_encryptAESCrypt(11, from, to, key, rvec, input_len);
|
||||
|
||||
// Note: the following function performs key += 13*16
|
||||
__ bind(L_aes192);
|
||||
cipherBlockChaining_encryptAESCrypt(13, from, to, key, rvec, input_len);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
void cipherBlockChaining_decryptAESCrypt(int round, Register from, Register to, Register key,
|
||||
Register rvec, Register input_len) {
|
||||
const Register len = x29;
|
||||
|
||||
VectorRegister working_vregs[] = {
|
||||
v1, v2, v3, v4, v5, v6, v7, v8,
|
||||
v9, v10, v11, v12, v13, v14, v15
|
||||
};
|
||||
|
||||
const unsigned int BLOCK_SIZE = 16;
|
||||
|
||||
__ mv(len, input_len);
|
||||
// load init rvec
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
__ vle32_v(v16, rvec);
|
||||
|
||||
generate_aes_loadkeys(key, working_vregs, round);
|
||||
Label L_dec_loop;
|
||||
// Decrypt from source by block size
|
||||
__ bind(L_dec_loop);
|
||||
__ vle32_v(v17, from);
|
||||
__ addi(from, from, BLOCK_SIZE);
|
||||
__ vmv_v_v(v18, v17);
|
||||
generate_aes_decrypt(v17, working_vregs, round);
|
||||
__ vxor_vv(v17, v17, v16);
|
||||
__ vse32_v(v17, to);
|
||||
__ vmv_v_v(v16, v18);
|
||||
__ addi(to, to, BLOCK_SIZE);
|
||||
__ subi(len, len, BLOCK_SIZE);
|
||||
__ bnez(len, L_dec_loop);
|
||||
|
||||
// save current rvec and return
|
||||
__ vse32_v(v16, rvec);
|
||||
__ mv(x10, input_len);
|
||||
__ leave();
|
||||
__ ret();
|
||||
}
|
||||
|
||||
// Arguments:
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg3 - r vector byte array address
|
||||
// c_rarg4 - input length
|
||||
//
|
||||
// Output:
|
||||
// x10 - input length
|
||||
//
|
||||
address generate_cipherBlockChaining_decryptAESCrypt() {
|
||||
assert(UseAESIntrinsics, "need AES instructions (Zvkned extension) support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubId stub_id = StubId::stubgen_cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
const Register from = c_rarg0;
|
||||
const Register to = c_rarg1;
|
||||
const Register key = c_rarg2;
|
||||
const Register rvec = c_rarg3;
|
||||
const Register input_len = c_rarg4;
|
||||
|
||||
const Register keylen = x28;
|
||||
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
Label L_aes128, L_aes192, L_aes128_loop, L_aes192_loop, L_aes256_loop;
|
||||
// Compute #rounds for AES based on the length of the key array
|
||||
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ mv(t0, 52);
|
||||
__ bltu(keylen, t0, L_aes128);
|
||||
__ beq(keylen, t0, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
// Note: the following function performs key += 15*16
|
||||
cipherBlockChaining_decryptAESCrypt(15, from, to, key, rvec, input_len);
|
||||
|
||||
// Note: the following function performs key += 11*16
|
||||
__ bind(L_aes128);
|
||||
cipherBlockChaining_decryptAESCrypt(11, from, to, key, rvec, input_len);
|
||||
|
||||
// Note: the following function performs key += 13*16
|
||||
__ bind(L_aes192);
|
||||
cipherBlockChaining_decryptAESCrypt(13, from, to, key, rvec, input_len);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Load big-endian 128-bit from memory.
|
||||
void be_load_counter_128(Register counter_hi, Register counter_lo, Register counter) {
|
||||
__ ld(counter_lo, Address(counter, 8)); // Load 128-bits from counter
|
||||
__ ld(counter_hi, Address(counter));
|
||||
__ rev8(counter_lo, counter_lo); // Convert big-endian to little-endian
|
||||
__ rev8(counter_hi, counter_hi);
|
||||
}
|
||||
|
||||
// Little-endian 128-bit + 64-bit -> 128-bit addition.
|
||||
void add_counter_128(Register counter_hi, Register counter_lo) {
|
||||
assert_different_registers(counter_hi, counter_lo, t0);
|
||||
__ addi(counter_lo, counter_lo, 1);
|
||||
__ seqz(t0, counter_lo); // Check for result overflow
|
||||
__ add(counter_hi, counter_hi, t0); // Add 1 if overflow otherwise 0
|
||||
}
|
||||
|
||||
// Store big-endian 128-bit to memory.
|
||||
void be_store_counter_128(Register counter_hi, Register counter_lo, Register counter) {
|
||||
assert_different_registers(counter_hi, counter_lo, t0, t1);
|
||||
__ rev8(t0, counter_lo); // Convert little-endian to big-endian
|
||||
__ rev8(t1, counter_hi);
|
||||
__ sd(t0, Address(counter, 8)); // Store 128-bits to counter
|
||||
__ sd(t1, Address(counter));
|
||||
}
|
||||
|
||||
void counterMode_AESCrypt(int round, Register in, Register out, Register key, Register counter,
|
||||
Register input_len, Register saved_encrypted_ctr, Register used_ptr) {
|
||||
// Algorithm:
|
||||
//
|
||||
// generate_aes_loadkeys();
|
||||
// load_counter_128(counter_hi, counter_lo, counter);
|
||||
//
|
||||
// L_next:
|
||||
// if (used >= BLOCK_SIZE) goto L_main_loop;
|
||||
//
|
||||
// L_encrypt_next:
|
||||
// *out = *in ^ saved_encrypted_ctr[used]);
|
||||
// out++; in++; used++; len--;
|
||||
// if (len == 0) goto L_exit;
|
||||
// goto L_next;
|
||||
//
|
||||
// L_main_loop:
|
||||
// if (len == 0) goto L_exit;
|
||||
// saved_encrypted_ctr = generate_aes_encrypt(counter);
|
||||
//
|
||||
// add_counter_128(counter_hi, counter_lo);
|
||||
// be_store_counter_128(counter_hi, counter_lo, counter);
|
||||
// used = 0;
|
||||
//
|
||||
// if(len < BLOCK_SIZE) goto L_encrypt_next;
|
||||
//
|
||||
// v_in = load_16Byte(in);
|
||||
// v_out = load_16Byte(out);
|
||||
// v_saved_encrypted_ctr = load_16Byte(saved_encrypted_ctr);
|
||||
// v_out = v_in ^ v_saved_encrypted_ctr;
|
||||
// out += BLOCK_SIZE;
|
||||
// in += BLOCK_SIZE;
|
||||
// len -= BLOCK_SIZE;
|
||||
// used = BLOCK_SIZE;
|
||||
// goto L_main_loop;
|
||||
//
|
||||
//
|
||||
// L_exit:
|
||||
// store(used);
|
||||
// result = input_len
|
||||
// return result;
|
||||
|
||||
const Register used = x28;
|
||||
const Register len = x29;
|
||||
const Register counter_hi = x30;
|
||||
const Register counter_lo = x31;
|
||||
const Register block_size = t2;
|
||||
|
||||
const unsigned int BLOCK_SIZE = 16;
|
||||
|
||||
VectorRegister working_vregs[] = {
|
||||
v1, v2, v3, v4, v5, v6, v7, v8,
|
||||
v9, v10, v11, v12, v13, v14, v15
|
||||
};
|
||||
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
|
||||
__ lwu(used, Address(used_ptr));
|
||||
__ mv(len, input_len);
|
||||
__ mv(block_size, BLOCK_SIZE);
|
||||
|
||||
// load keys to working_vregs according to round
|
||||
generate_aes_loadkeys(key, working_vregs, round);
|
||||
|
||||
// 128-bit big-endian load
|
||||
be_load_counter_128(counter_hi, counter_lo, counter);
|
||||
|
||||
Label L_next, L_encrypt_next, L_main_loop, L_exit;
|
||||
// Check the last saved_encrypted_ctr used value, we fall through
|
||||
// to L_encrypt_next when the used value lower than block_size
|
||||
__ bind(L_next);
|
||||
__ bgeu(used, block_size, L_main_loop);
|
||||
|
||||
// There is still data left fewer than block_size after L_main_loop
|
||||
// or last used, we encrypt them one by one.
|
||||
__ bind(L_encrypt_next);
|
||||
__ add(t0, saved_encrypted_ctr, used);
|
||||
__ lbu(t1, Address(t0));
|
||||
__ lbu(t0, Address(in));
|
||||
__ xorr(t1, t1, t0);
|
||||
__ sb(t1, Address(out));
|
||||
__ addi(in, in, 1);
|
||||
__ addi(out, out, 1);
|
||||
__ addi(used, used, 1);
|
||||
__ subi(len, len, 1);
|
||||
__ beqz(len, L_exit);
|
||||
__ j(L_next);
|
||||
|
||||
// We will calculate the next saved_encrypted_ctr and encrypt the blocks of data
|
||||
// one by one until there is less than a full block remaining if len not zero
|
||||
__ bind(L_main_loop);
|
||||
__ beqz(len, L_exit);
|
||||
__ vle32_v(v16, counter);
|
||||
|
||||
// encrypt counter according to round
|
||||
generate_aes_encrypt(v16, working_vregs, round);
|
||||
|
||||
__ vse32_v(v16, saved_encrypted_ctr);
|
||||
|
||||
// 128-bit little-endian increment
|
||||
add_counter_128(counter_hi, counter_lo);
|
||||
// 128-bit big-endian store
|
||||
be_store_counter_128(counter_hi, counter_lo, counter);
|
||||
|
||||
__ mv(used, 0);
|
||||
// Check if we have a full block_size
|
||||
__ bltu(len, block_size, L_encrypt_next);
|
||||
|
||||
// We have one full block to encrypt at least
|
||||
__ vle32_v(v17, in);
|
||||
__ vxor_vv(v16, v16, v17);
|
||||
__ vse32_v(v16, out);
|
||||
__ add(out, out, block_size);
|
||||
__ add(in, in, block_size);
|
||||
__ sub(len, len, block_size);
|
||||
__ mv(used, block_size);
|
||||
__ j(L_main_loop);
|
||||
|
||||
__ bind(L_exit);
|
||||
__ sw(used, Address(used_ptr));
|
||||
__ mv(x10, input_len);
|
||||
__ leave();
|
||||
__ ret();
|
||||
};
|
||||
|
||||
// CTR AES crypt.
|
||||
// Arguments:
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source byte array address
|
||||
// c_rarg1 - destination byte array address
|
||||
// c_rarg2 - K (key) in little endian int array
|
||||
// c_rarg3 - counter vector byte array address
|
||||
// c_rarg4 - input length
|
||||
// c_rarg5 - saved encryptedCounter start
|
||||
// c_rarg6 - saved used length
|
||||
//
|
||||
// Output:
|
||||
// x10 - input length
|
||||
//
|
||||
address generate_counterMode_AESCrypt() {
|
||||
assert(UseAESCTRIntrinsics, "need AES instructions (Zvkned extension) and Zbb extension support");
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubId stub_id = StubId::stubgen_counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
const Register in = c_rarg0;
|
||||
const Register out = c_rarg1;
|
||||
const Register key = c_rarg2;
|
||||
const Register counter = c_rarg3;
|
||||
const Register input_len = c_rarg4;
|
||||
const Register saved_encrypted_ctr = c_rarg5;
|
||||
const Register used_len_ptr = c_rarg6;
|
||||
|
||||
const Register keylen = c_rarg7; // temporary register
|
||||
|
||||
const address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
Label L_exit;
|
||||
__ beqz(input_len, L_exit);
|
||||
|
||||
Label L_aes128, L_aes192;
|
||||
// Compute #rounds for AES based on the length of the key array
|
||||
__ lwu(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
|
||||
__ mv(t0, 52); // key length could be only {11, 13, 15} * 4 = {44, 52, 60}
|
||||
__ bltu(keylen, t0, L_aes128);
|
||||
__ beq(keylen, t0, L_aes192);
|
||||
// Else we fallthrough to the biggest case (256-bit key size)
|
||||
|
||||
// Note: the following function performs crypt with key += 15*16
|
||||
counterMode_AESCrypt(15, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
|
||||
|
||||
// Note: the following function performs crypt with key += 13*16
|
||||
__ bind(L_aes192);
|
||||
counterMode_AESCrypt(13, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
|
||||
|
||||
// Note: the following function performs crypt with key += 11*16
|
||||
__ bind(L_aes128);
|
||||
counterMode_AESCrypt(11, in, out, key, counter, input_len, saved_encrypted_ctr, used_len_ptr);
|
||||
|
||||
__ bind(L_exit);
|
||||
__ mv(x10, input_len);
|
||||
__ leave();
|
||||
__ ret();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* Arguments:
|
||||
*
|
||||
* Input:
|
||||
* c_rarg0 - current state address
|
||||
* c_rarg1 - H key address
|
||||
* c_rarg2 - data address
|
||||
* c_rarg3 - number of blocks
|
||||
*
|
||||
* Output:
|
||||
* Updated state at c_rarg0
|
||||
*/
|
||||
address generate_ghash_processBlocks() {
|
||||
assert(UseGHASHIntrinsics, "need GHASH instructions (Zvkg extension) and Zvbb support");
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubId stub_id = StubId::stubgen_ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
__ enter();
|
||||
|
||||
Register state = c_rarg0;
|
||||
Register subkeyH = c_rarg1;
|
||||
Register data = c_rarg2;
|
||||
Register blocks = c_rarg3;
|
||||
|
||||
VectorRegister partial_hash = v1;
|
||||
VectorRegister hash_subkey = v2;
|
||||
VectorRegister cipher_text = v3;
|
||||
|
||||
const unsigned int BLOCK_SIZE = 16;
|
||||
|
||||
__ vsetivli(x0, 2, Assembler::e64, Assembler::m1);
|
||||
__ vle64_v(hash_subkey, subkeyH);
|
||||
__ vrev8_v(hash_subkey, hash_subkey);
|
||||
__ vle64_v(partial_hash, state);
|
||||
__ vrev8_v(partial_hash, partial_hash);
|
||||
|
||||
__ vsetivli(x0, 4, Assembler::e32, Assembler::m1);
|
||||
Label L_ghash_loop;
|
||||
__ bind(L_ghash_loop);
|
||||
__ vle32_v(cipher_text, data);
|
||||
__ addi(data, data, BLOCK_SIZE);
|
||||
__ vghsh_vv(partial_hash, hash_subkey, cipher_text);
|
||||
__ subi(blocks, blocks, 1);
|
||||
__ bnez(blocks, L_ghash_loop);
|
||||
|
||||
__ vsetivli(x0, 2, Assembler::e64, Assembler::m1);
|
||||
__ vrev8_v(partial_hash, partial_hash);
|
||||
__ vse64_v(partial_hash, state);
|
||||
__ leave();
|
||||
__ ret();
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// code for comparing 8 characters of strings with Latin1 and Utf16 encoding
|
||||
void compare_string_8_x_LU(Register tmpL, Register tmpU,
|
||||
Register strL, Register strU, Label& DIFF) {
|
||||
@@ -6824,6 +7272,16 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
if (UseAESIntrinsics) {
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
|
||||
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt();
|
||||
}
|
||||
|
||||
if (UseGHASHIntrinsics) {
|
||||
StubRoutines::_ghash_processBlocks = generate_ghash_processBlocks();
|
||||
}
|
||||
|
||||
if (UsePoly1305Intrinsics) {
|
||||
|
||||
@@ -434,6 +434,15 @@ void VM_Version::c2_initialize() {
|
||||
warning("UseAESIntrinsics enabled, but UseAES not, enabling");
|
||||
UseAES = true;
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics) && UseZbb) {
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics && !UseZbb) {
|
||||
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZbb support.");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
} else {
|
||||
if (UseAES) {
|
||||
warning("AES instructions are not available on this CPU");
|
||||
@@ -443,11 +452,26 @@ void VM_Version::c2_initialize() {
|
||||
warning("AES intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESIntrinsics, false);
|
||||
}
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("Cannot enable UseAESCTRIntrinsics on cpu without UseZvkn support.");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
warning("AES/CTR intrinsics are not available on this CPU");
|
||||
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
|
||||
if (UseZvkg) {
|
||||
if (FLAG_IS_DEFAULT(UseGHASHIntrinsics) && UseZvbb) {
|
||||
FLAG_SET_DEFAULT(UseGHASHIntrinsics, true);
|
||||
}
|
||||
|
||||
if (UseGHASHIntrinsics && !UseZvbb) {
|
||||
warning("Cannot enable UseGHASHIntrinsics on cpu without UseZvbb support");
|
||||
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
|
||||
}
|
||||
} else {
|
||||
if (UseGHASHIntrinsics) {
|
||||
warning("Cannot enable UseGHASHIntrinsics on cpu without UseZvkg support");
|
||||
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -289,6 +289,8 @@ class VM_Version : public Abstract_VM_Version {
|
||||
decl(Zvfh , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvfh, &ext_v, &ext_Zfh, nullptr)) \
|
||||
/* Shorthand for Zvkned + Zvknhb + Zvkb + Zvkt */ \
|
||||
decl(Zvkn , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvkn, &ext_v, nullptr)) \
|
||||
/* Zvkg crypto extension for ghash and gcm */ \
|
||||
decl(Zvkg , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT_DEP(UseZvkg, &ext_v, nullptr)) \
|
||||
|
||||
#define DECLARE_RV_EXT_FEATURE(PRETTY, LINUX_BIT, FSTRING, FLAGF) \
|
||||
struct ext_##PRETTY##RVExtFeatureValue : public RVExtFeatureValue { \
|
||||
|
||||
@@ -1715,6 +1715,8 @@ bool Matcher::match_rule_supported(int opcode) {
|
||||
switch (opcode) {
|
||||
case Op_ReverseBytesI:
|
||||
case Op_ReverseBytesL:
|
||||
case Op_ReverseBytesS:
|
||||
case Op_ReverseBytesUS:
|
||||
return UseByteReverseInstruction;
|
||||
case Op_PopCountI:
|
||||
case Op_PopCountL:
|
||||
@@ -11615,6 +11617,38 @@ instruct vround2D_reg(vecX dst, vecX src, immI8 rmode) %{
|
||||
|
||||
// Byte reverse
|
||||
|
||||
instruct bytes_reverse_short(iRegI dst, iRegI src) %{
|
||||
match(Set dst (ReverseBytesS src));
|
||||
predicate(UseByteReverseInstruction);
|
||||
ins_cost(2 * DEFAULT_COST);
|
||||
size(8);
|
||||
|
||||
format %{ "LRVR $dst, $src\n\t # byte reverse int"
|
||||
"SRA $dst, 0x0010\t # right shift by 16, sign extended" %}
|
||||
|
||||
ins_encode %{
|
||||
__ z_lrvr($dst$$Register, $src$$Register);
|
||||
__ z_sra($dst$$Register, 0x0010);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_unsigned_short(iRegI dst, iRegI src) %{
|
||||
match(Set dst (ReverseBytesUS src));
|
||||
predicate(UseByteReverseInstruction);
|
||||
ins_cost(2 * DEFAULT_COST);
|
||||
size(8);
|
||||
|
||||
format %{ "LRVR $dst, $src\n\t # byte reverse int"
|
||||
"SRL $dst, 0x0010\t # right shift by 16, zero extended" %}
|
||||
|
||||
ins_encode %{
|
||||
__ z_lrvr($dst$$Register, $src$$Register);
|
||||
__ z_srl($dst$$Register, 0x0010);
|
||||
%}
|
||||
ins_pipe(pipe_class_dummy);
|
||||
%}
|
||||
|
||||
instruct bytes_reverse_int(iRegI dst, iRegI src) %{
|
||||
match(Set dst (ReverseBytesI src));
|
||||
predicate(UseByteReverseInstruction); // See Matcher::match_rule_supported
|
||||
|
||||
@@ -449,8 +449,8 @@ const int FPUStateSizeInWords = 2688 / wordSize;
|
||||
// imm8[1:0] = 00 (min) / 01 (max)
|
||||
//
|
||||
// [1] https://www.intel.com/content/www/us/en/content-details/856721/intel-advanced-vector-extensions-10-2-intel-avx10-2-architecture-specification.html?wapkw=AVX10
|
||||
const int AVX10_MINMAX_MAX_COMPARE_SIGN = 0x5;
|
||||
const int AVX10_MINMAX_MIN_COMPARE_SIGN = 0x4;
|
||||
const int AVX10_2_MINMAX_MAX_COMPARE_SIGN = 0x5;
|
||||
const int AVX10_2_MINMAX_MIN_COMPARE_SIGN = 0x4;
|
||||
|
||||
// The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
|
||||
// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
|
||||
|
||||
@@ -1033,8 +1033,8 @@ void C2_MacroAssembler::vminmax_fp(int opc, BasicType elem_bt, XMMRegister dst,
|
||||
assert(opc == Op_MinV || opc == Op_MinReductionV ||
|
||||
opc == Op_MaxV || opc == Op_MaxReductionV, "sanity");
|
||||
|
||||
int imm8 = (opc == Op_MinV || opc == Op_MinReductionV) ? AVX10_MINMAX_MIN_COMPARE_SIGN
|
||||
: AVX10_MINMAX_MAX_COMPARE_SIGN;
|
||||
int imm8 = (opc == Op_MinV || opc == Op_MinReductionV) ? AVX10_2_MINMAX_MIN_COMPARE_SIGN
|
||||
: AVX10_2_MINMAX_MAX_COMPARE_SIGN;
|
||||
if (elem_bt == T_FLOAT) {
|
||||
evminmaxps(dst, mask, src1, src2, true, imm8, vlen_enc);
|
||||
} else {
|
||||
@@ -5163,7 +5163,7 @@ void C2_MacroAssembler::vector_castD2X_evex(BasicType to_elem_bt, XMMRegister ds
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
|
||||
void C2_MacroAssembler::vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
|
||||
switch(to_elem_bt) {
|
||||
case T_LONG:
|
||||
evcvttps2qqs(dst, src, vec_enc);
|
||||
@@ -5183,7 +5183,7 @@ void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister d
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
|
||||
void C2_MacroAssembler::vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
|
||||
switch(to_elem_bt) {
|
||||
case T_LONG:
|
||||
evcvttps2qqs(dst, src, vec_enc);
|
||||
@@ -5203,7 +5203,7 @@ void C2_MacroAssembler::vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister d
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
|
||||
void C2_MacroAssembler::vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc) {
|
||||
switch(to_elem_bt) {
|
||||
case T_LONG:
|
||||
evcvttpd2qqs(dst, src, vec_enc);
|
||||
@@ -5223,7 +5223,7 @@ void C2_MacroAssembler::vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister d
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
|
||||
void C2_MacroAssembler::vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc) {
|
||||
switch(to_elem_bt) {
|
||||
case T_LONG:
|
||||
evcvttpd2qqs(dst, src, vec_enc);
|
||||
|
||||
@@ -347,13 +347,13 @@ public:
|
||||
XMMRegister xtmp2, XMMRegister xtmp3, XMMRegister xtmp4, XMMRegister xtmp5,
|
||||
AddressLiteral float_sign_flip, Register rscratch, int vec_enc);
|
||||
|
||||
void vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
|
||||
void vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
|
||||
|
||||
void vector_castF2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
|
||||
void vector_castF2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
|
||||
|
||||
void vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
|
||||
void vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, XMMRegister src, int vec_enc);
|
||||
|
||||
void vector_castD2X_avx10(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
|
||||
void vector_castD2X_avx10_2(BasicType to_elem_bt, XMMRegister dst, Address src, int vec_enc);
|
||||
|
||||
void vector_cast_double_to_int_special_cases_avx(XMMRegister dst, XMMRegister src, XMMRegister xtmp1, XMMRegister xtmp2,
|
||||
XMMRegister xtmp3, XMMRegister xtmp4, XMMRegister xtmp5, Register rscratch,
|
||||
|
||||
@@ -8899,9 +8899,9 @@ void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XM
|
||||
case T_LONG:
|
||||
evpminsq(dst, mask, nds, src, merge, vector_len); break;
|
||||
case T_FLOAT:
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
case T_DOUBLE:
|
||||
evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
default:
|
||||
fatal("Unexpected type argument %s", type2name(type)); break;
|
||||
}
|
||||
@@ -8918,9 +8918,9 @@ void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XM
|
||||
case T_LONG:
|
||||
evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
|
||||
case T_FLOAT:
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
case T_DOUBLE:
|
||||
evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
default:
|
||||
fatal("Unexpected type argument %s", type2name(type)); break;
|
||||
}
|
||||
@@ -8937,9 +8937,9 @@ void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XM
|
||||
case T_LONG:
|
||||
evpminsq(dst, mask, nds, src, merge, vector_len); break;
|
||||
case T_FLOAT:
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
case T_DOUBLE:
|
||||
evminmaxpd(dst, mask, nds, src, merge, AVX10_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxpd(dst, mask, nds, src, merge, AVX10_2_MINMAX_MIN_COMPARE_SIGN, vector_len); break;
|
||||
default:
|
||||
fatal("Unexpected type argument %s", type2name(type)); break;
|
||||
}
|
||||
@@ -8956,9 +8956,9 @@ void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XM
|
||||
case T_LONG:
|
||||
evpmaxsq(dst, mask, nds, src, merge, vector_len); break;
|
||||
case T_FLOAT:
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
case T_DOUBLE:
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
evminmaxps(dst, mask, nds, src, merge, AVX10_2_MINMAX_MAX_COMPARE_SIGN, vector_len); break;
|
||||
default:
|
||||
fatal("Unexpected type argument %s", type2name(type)); break;
|
||||
}
|
||||
|
||||
@@ -7289,12 +7289,12 @@ instruct loadD(regD dst, memory mem)
|
||||
%}
|
||||
|
||||
// max = java.lang.Math.max(float a, float b)
|
||||
instruct maxF_avx10_reg(regF dst, regF a, regF b) %{
|
||||
instruct maxF_reg_avx10_2(regF dst, regF a, regF b) %{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MaxF a b));
|
||||
format %{ "maxF $dst, $a, $b" %}
|
||||
ins_encode %{
|
||||
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MAX_COMPARE_SIGN);
|
||||
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
@@ -7325,12 +7325,12 @@ instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRe
|
||||
%}
|
||||
|
||||
// max = java.lang.Math.max(double a, double b)
|
||||
instruct maxD_avx10_reg(regD dst, regD a, regD b) %{
|
||||
instruct maxD_reg_avx10_2(regD dst, regD a, regD b) %{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MaxD a b));
|
||||
format %{ "maxD $dst, $a, $b" %}
|
||||
ins_encode %{
|
||||
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MAX_COMPARE_SIGN);
|
||||
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MAX_COMPARE_SIGN);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
@@ -7361,12 +7361,12 @@ instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xtmp, rRe
|
||||
%}
|
||||
|
||||
// max = java.lang.Math.min(float a, float b)
|
||||
instruct minF_avx10_reg(regF dst, regF a, regF b) %{
|
||||
instruct minF_reg_avx10_2(regF dst, regF a, regF b) %{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MinF a b));
|
||||
format %{ "minF $dst, $a, $b" %}
|
||||
ins_encode %{
|
||||
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MIN_COMPARE_SIGN);
|
||||
__ eminmaxss($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
@@ -7397,12 +7397,12 @@ instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xtmp, rRe
|
||||
%}
|
||||
|
||||
// max = java.lang.Math.min(double a, double b)
|
||||
instruct minD_avx10_reg(regD dst, regD a, regD b) %{
|
||||
instruct minD_reg_avx10_2(regD dst, regD a, regD b) %{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MinD a b));
|
||||
format %{ "minD $dst, $a, $b" %}
|
||||
ins_encode %{
|
||||
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_MINMAX_MIN_COMPARE_SIGN);
|
||||
__ eminmaxsd($dst$$XMMRegister, $a$$XMMRegister, $b$$XMMRegister, AVX10_2_MINMAX_MIN_COMPARE_SIGN);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
@@ -14586,7 +14586,7 @@ instruct convF2I_reg_reg(rRegI dst, regF src, rFlagsReg cr)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convF2I_reg_reg_avx10(rRegI dst, regF src)
|
||||
instruct convF2I_reg_reg_avx10_2(rRegI dst, regF src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvF2I src));
|
||||
@@ -14597,7 +14597,7 @@ instruct convF2I_reg_reg_avx10(rRegI dst, regF src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convF2I_reg_mem_avx10(rRegI dst, memory src)
|
||||
instruct convF2I_reg_mem_avx10_2(rRegI dst, memory src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvF2I (LoadF src)));
|
||||
@@ -14620,7 +14620,7 @@ instruct convF2L_reg_reg(rRegL dst, regF src, rFlagsReg cr)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convF2L_reg_reg_avx10(rRegL dst, regF src)
|
||||
instruct convF2L_reg_reg_avx10_2(rRegL dst, regF src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvF2L src));
|
||||
@@ -14631,7 +14631,7 @@ instruct convF2L_reg_reg_avx10(rRegL dst, regF src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convF2L_reg_mem_avx10(rRegL dst, memory src)
|
||||
instruct convF2L_reg_mem_avx10_2(rRegL dst, memory src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvF2L (LoadF src)));
|
||||
@@ -14654,7 +14654,7 @@ instruct convD2I_reg_reg(rRegI dst, regD src, rFlagsReg cr)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convD2I_reg_reg_avx10(rRegI dst, regD src)
|
||||
instruct convD2I_reg_reg_avx10_2(rRegI dst, regD src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvD2I src));
|
||||
@@ -14665,7 +14665,7 @@ instruct convD2I_reg_reg_avx10(rRegI dst, regD src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convD2I_reg_mem_avx10(rRegI dst, memory src)
|
||||
instruct convD2I_reg_mem_avx10_2(rRegI dst, memory src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvD2I (LoadD src)));
|
||||
@@ -14688,7 +14688,7 @@ instruct convD2L_reg_reg(rRegL dst, regD src, rFlagsReg cr)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convD2L_reg_reg_avx10(rRegL dst, regD src)
|
||||
instruct convD2L_reg_reg_avx10_2(rRegL dst, regD src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvD2L src));
|
||||
@@ -14699,7 +14699,7 @@ instruct convD2L_reg_reg_avx10(rRegL dst, regD src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct convD2L_reg_mem_avx10(rRegL dst, memory src)
|
||||
instruct convD2L_reg_mem_avx10_2(rRegL dst, memory src)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (ConvD2L (LoadD src)));
|
||||
@@ -19660,7 +19660,7 @@ instruct minmax_reductionF_av(legRegF dst, legVec src, legVec tmp, legVec atmp,
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reduction2F_avx10(regF dst, immF src1, vec src2, vec xtmp1) %{
|
||||
instruct minmax_reduction2F_avx10_2(regF dst, immF src1, vec src2, vec xtmp1) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
|
||||
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeF::POS_INF) ||
|
||||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeF::NEG_INF)) &&
|
||||
@@ -19678,7 +19678,7 @@ instruct minmax_reduction2F_avx10(regF dst, immF src1, vec src2, vec xtmp1) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reductionF_avx10(regF dst, immF src1, vec src2, vec xtmp1, vec xtmp2) %{
|
||||
instruct minmax_reductionF_avx10_2(regF dst, immF src1, vec src2, vec xtmp1, vec xtmp2) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
|
||||
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeF::POS_INF) ||
|
||||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeF::NEG_INF)) &&
|
||||
@@ -19696,7 +19696,7 @@ instruct minmax_reductionF_avx10(regF dst, immF src1, vec src2, vec xtmp1, vec x
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reduction2F_avx10_av(regF dst, vec src, vec xtmp1) %{
|
||||
instruct minmax_reduction2F_av_avx10_2(regF dst, vec src, vec xtmp1) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
|
||||
Matcher::vector_length(n->in(2)) == 2);
|
||||
match(Set dst (MinReductionV dst src));
|
||||
@@ -19712,7 +19712,7 @@ instruct minmax_reduction2F_avx10_av(regF dst, vec src, vec xtmp1) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reductionF_avx10_av(regF dst, vec src, vec xtmp1, vec xtmp2) %{
|
||||
instruct minmax_reductionF_av_avx10_2(regF dst, vec src, vec xtmp1, vec xtmp2) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_FLOAT &&
|
||||
Matcher::vector_length(n->in(2)) >= 4);
|
||||
match(Set dst (MinReductionV dst src));
|
||||
@@ -19810,7 +19810,7 @@ instruct minmax_reductionD_av(legRegD dst, legVec src, legVec tmp1, legVec tmp2,
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reduction2D_avx10(regD dst, immD src1, vec src2, vec xtmp1) %{
|
||||
instruct minmax_reduction2D_avx10_2(regD dst, immD src1, vec src2, vec xtmp1) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
|
||||
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeD::POS_INF) ||
|
||||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeD::NEG_INF)) &&
|
||||
@@ -19828,7 +19828,7 @@ instruct minmax_reduction2D_avx10(regD dst, immD src1, vec src2, vec xtmp1) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reductionD_avx10(regD dst, immD src1, vec src2, vec xtmp1, vec xtmp2) %{
|
||||
instruct minmax_reductionD_avx10_2(regD dst, immD src1, vec src2, vec xtmp1, vec xtmp2) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
|
||||
((n->Opcode() == Op_MinReductionV && n->in(1)->bottom_type() == TypeD::POS_INF) ||
|
||||
(n->Opcode() == Op_MaxReductionV && n->in(1)->bottom_type() == TypeD::NEG_INF)) &&
|
||||
@@ -19847,7 +19847,7 @@ instruct minmax_reductionD_avx10(regD dst, immD src1, vec src2, vec xtmp1, vec x
|
||||
%}
|
||||
|
||||
|
||||
instruct minmax_reduction2D_av_avx10(regD dst, vec src, vec xtmp1) %{
|
||||
instruct minmax_reduction2D_av_avx10_2(regD dst, vec src, vec xtmp1) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
|
||||
Matcher::vector_length(n->in(2)) == 2);
|
||||
match(Set dst (MinReductionV dst src));
|
||||
@@ -19863,7 +19863,7 @@ instruct minmax_reduction2D_av_avx10(regD dst, vec src, vec xtmp1) %{
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct minmax_reductionD_av_avx10(regD dst, vec src, vec xtmp1, vec xtmp2) %{
|
||||
instruct minmax_reductionD_av_avx10_2(regD dst, vec src, vec xtmp1, vec xtmp2) %{
|
||||
predicate(VM_Version::supports_avx10_2() && Matcher::vector_element_basic_type(n->in(2)) == T_DOUBLE &&
|
||||
Matcher::vector_length(n->in(2)) >= 4);
|
||||
match(Set dst (MinReductionV dst src));
|
||||
@@ -20766,7 +20766,7 @@ instruct vminmaxL_reg_evex(vec dst, vec src1, vec src2) %{
|
||||
%}
|
||||
|
||||
// Float/Double vector Min/Max
|
||||
instruct minmaxFP_avx10_reg(vec dst, vec a, vec b) %{
|
||||
instruct minmaxFP_reg_avx10_2(vec dst, vec a, vec b) %{
|
||||
predicate(VM_Version::supports_avx10_2() &&
|
||||
is_floating_point_type(Matcher::vector_element_basic_type(n))); // T_FLOAT, T_DOUBLE
|
||||
match(Set dst (MinV a b));
|
||||
@@ -22113,29 +22113,29 @@ instruct castFtoX_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct castFtoX_reg_avx10(vec dst, vec src) %{
|
||||
instruct castFtoX_reg_avx10_2(vec dst, vec src) %{
|
||||
predicate(VM_Version::supports_avx10_2() &&
|
||||
is_integral_type(Matcher::vector_element_basic_type(n)));
|
||||
match(Set dst (VectorCastF2X src));
|
||||
format %{ "vector_cast_f2x_avx10 $dst, $src\t!" %}
|
||||
format %{ "vector_cast_f2x_avx10_2 $dst, $src\t!" %}
|
||||
ins_encode %{
|
||||
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
|
||||
int vlen_enc = (to_elem_bt == T_LONG) ? vector_length_encoding(this) : vector_length_encoding(this, $src);
|
||||
__ vector_castF2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
|
||||
__ vector_castF2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct castFtoX_mem_avx10(vec dst, memory src) %{
|
||||
instruct castFtoX_mem_avx10_2(vec dst, memory src) %{
|
||||
predicate(VM_Version::supports_avx10_2() &&
|
||||
is_integral_type(Matcher::vector_element_basic_type(n)));
|
||||
match(Set dst (VectorCastF2X (LoadVector src)));
|
||||
format %{ "vector_cast_f2x_avx10 $dst, $src\t!" %}
|
||||
format %{ "vector_cast_f2x_avx10_2 $dst, $src\t!" %}
|
||||
ins_encode %{
|
||||
int vlen = Matcher::vector_length(this);
|
||||
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
|
||||
int vlen_enc = (to_elem_bt == T_LONG) ? vector_length_encoding(this) : vector_length_encoding(vlen * sizeof(jfloat));
|
||||
__ vector_castF2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
|
||||
__ vector_castF2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
@@ -22187,29 +22187,29 @@ instruct castDtoX_reg_evex(vec dst, vec src, vec xtmp1, vec xtmp2, kReg ktmp1, k
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct castDtoX_reg_avx10(vec dst, vec src) %{
|
||||
instruct castDtoX_reg_avx10_2(vec dst, vec src) %{
|
||||
predicate(VM_Version::supports_avx10_2() &&
|
||||
is_integral_type(Matcher::vector_element_basic_type(n)));
|
||||
match(Set dst (VectorCastD2X src));
|
||||
format %{ "vector_cast_d2x_avx10 $dst, $src\t!" %}
|
||||
format %{ "vector_cast_d2x_avx10_2 $dst, $src\t!" %}
|
||||
ins_encode %{
|
||||
int vlen_enc = vector_length_encoding(this, $src);
|
||||
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
|
||||
__ vector_castD2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
|
||||
__ vector_castD2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$XMMRegister, vlen_enc);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct castDtoX_mem_avx10(vec dst, memory src) %{
|
||||
instruct castDtoX_mem_avx10_2(vec dst, memory src) %{
|
||||
predicate(VM_Version::supports_avx10_2() &&
|
||||
is_integral_type(Matcher::vector_element_basic_type(n)));
|
||||
match(Set dst (VectorCastD2X (LoadVector src)));
|
||||
format %{ "vector_cast_d2x_avx10 $dst, $src\t!" %}
|
||||
format %{ "vector_cast_d2x_avx10_2 $dst, $src\t!" %}
|
||||
ins_encode %{
|
||||
int vlen = Matcher::vector_length(this);
|
||||
int vlen_enc = vector_length_encoding(vlen * sizeof(jdouble));
|
||||
BasicType to_elem_bt = Matcher::vector_element_basic_type(this);
|
||||
__ vector_castD2X_avx10(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
|
||||
__ vector_castD2X_avx10_2(to_elem_bt, $dst$$XMMRegister, $src$$Address, vlen_enc);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
@@ -25181,14 +25181,14 @@ instruct scalar_binOps_HF_reg(regF dst, regF src1, regF src2)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct scalar_minmax_HF_avx10_reg(regF dst, regF src1, regF src2)
|
||||
instruct scalar_minmax_HF_reg_avx10_2(regF dst, regF src1, regF src2)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MaxHF src1 src2));
|
||||
match(Set dst (MinHF src1 src2));
|
||||
format %{ "scalar_min_max_fp16 $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
int function = this->ideal_Opcode() == Op_MinHF ? AVX10_MINMAX_MIN_COMPARE_SIGN : AVX10_MINMAX_MAX_COMPARE_SIGN;
|
||||
int function = this->ideal_Opcode() == Op_MinHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
|
||||
__ eminmaxsh($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, function);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
@@ -25296,7 +25296,7 @@ instruct vector_fma_HF_mem(vec dst, memory src1, vec src2)
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vector_minmax_HF_avx10_mem(vec dst, vec src1, memory src2)
|
||||
instruct vector_minmax_HF_mem_avx10_2(vec dst, vec src1, memory src2)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MinVHF src1 (VectorReinterpret (LoadVector src2))));
|
||||
@@ -25304,13 +25304,13 @@ instruct vector_minmax_HF_avx10_mem(vec dst, vec src1, memory src2)
|
||||
format %{ "vector_min_max_fp16_mem $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
int vlen_enc = vector_length_encoding(this);
|
||||
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_MINMAX_MIN_COMPARE_SIGN : AVX10_MINMAX_MAX_COMPARE_SIGN;
|
||||
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
|
||||
__ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$Address, true, function, vlen_enc);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
%}
|
||||
|
||||
instruct vector_minmax_HF_avx10_reg(vec dst, vec src1, vec src2)
|
||||
instruct vector_minmax_HF_reg_avx10_2(vec dst, vec src1, vec src2)
|
||||
%{
|
||||
predicate(VM_Version::supports_avx10_2());
|
||||
match(Set dst (MinVHF src1 src2));
|
||||
@@ -25318,7 +25318,7 @@ instruct vector_minmax_HF_avx10_reg(vec dst, vec src1, vec src2)
|
||||
format %{ "vector_min_max_fp16 $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
int vlen_enc = vector_length_encoding(this);
|
||||
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_MINMAX_MIN_COMPARE_SIGN : AVX10_MINMAX_MAX_COMPARE_SIGN;
|
||||
int function = this->ideal_Opcode() == Op_MinVHF ? AVX10_2_MINMAX_MIN_COMPARE_SIGN : AVX10_2_MINMAX_MAX_COMPARE_SIGN;
|
||||
__ evminmaxph($dst$$XMMRegister, k0, $src1$$XMMRegister, $src2$$XMMRegister, true, function, vlen_enc);
|
||||
%}
|
||||
ins_pipe( pipe_slow );
|
||||
|
||||
@@ -2333,8 +2333,8 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
|
||||
if (ret != -1) {
|
||||
if ((st_mode & S_IFMT) == S_IFDIR) {
|
||||
errno = EISDIR;
|
||||
::close(fd);
|
||||
errno = EISDIR;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -861,6 +861,90 @@ pid_t os::Bsd::gettid() {
|
||||
}
|
||||
}
|
||||
|
||||
// Returns the uid of a process or -1 on error.
|
||||
uid_t os::Bsd::get_process_uid(pid_t pid) {
|
||||
struct kinfo_proc kp;
|
||||
size_t size = sizeof kp;
|
||||
int mib_kern[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
|
||||
if (sysctl(mib_kern, 4, &kp, &size, nullptr, 0) == 0) {
|
||||
if (size > 0 && kp.kp_proc.p_pid == pid) {
|
||||
return kp.kp_eproc.e_ucred.cr_uid;
|
||||
}
|
||||
}
|
||||
return (uid_t)-1;
|
||||
}
|
||||
|
||||
// Returns true if the process is running as root.
|
||||
bool os::Bsd::is_process_root(pid_t pid) {
|
||||
uid_t uid = get_process_uid(pid);
|
||||
return (uid != (uid_t)-1) ? os::Posix::is_root(uid) : false;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
// macOS has a secure per-user temporary directory.
|
||||
// Root can attach to a non-root process, hence it needs
|
||||
// to lookup /var/folders for the user specific temporary directory
|
||||
// of the form /var/folders/*/*/T, that contains PERFDATA_NAME_user
|
||||
// directory.
|
||||
static const char VAR_FOLDERS[] = "/var/folders/";
|
||||
int os::Bsd::get_user_tmp_dir_macos(const char* user, int vmid, char* output_path, int output_size) {
|
||||
|
||||
// read the var/folders directory
|
||||
DIR* varfolders_dir = os::opendir(VAR_FOLDERS);
|
||||
if (varfolders_dir != nullptr) {
|
||||
|
||||
// var/folders directory contains 2-characters subdirectories (buckets)
|
||||
struct dirent* bucket_de;
|
||||
|
||||
// loop until the PERFDATA_NAME_user directory has been found
|
||||
while ((bucket_de = os::readdir(varfolders_dir)) != nullptr) {
|
||||
// skip over files and special "." and ".."
|
||||
if (bucket_de->d_type != DT_DIR || bucket_de->d_name[0] == '.') {
|
||||
continue;
|
||||
}
|
||||
// absolute path to the bucket
|
||||
char bucket[PATH_MAX];
|
||||
int b = os::snprintf(bucket, PATH_MAX, "%s%s/", VAR_FOLDERS, bucket_de->d_name);
|
||||
|
||||
// the total length of the absolute path must not exceed the buffer size
|
||||
if (b >= PATH_MAX || b < 0) {
|
||||
continue;
|
||||
}
|
||||
// each bucket contains next level subdirectories
|
||||
DIR* bucket_dir = os::opendir(bucket);
|
||||
if (bucket_dir == nullptr) {
|
||||
continue;
|
||||
}
|
||||
// read each subdirectory, skipping over regular files
|
||||
struct dirent* subbucket_de;
|
||||
while ((subbucket_de = os::readdir(bucket_dir)) != nullptr) {
|
||||
if (subbucket_de->d_type != DT_DIR || subbucket_de->d_name[0] == '.') {
|
||||
continue;
|
||||
}
|
||||
// If the PERFDATA_NAME_user directory exists in the T subdirectory,
|
||||
// this means the subdirectory is the temporary directory of the user.
|
||||
char perfdata_path[PATH_MAX];
|
||||
int p = os::snprintf(perfdata_path, PATH_MAX, "%s%s/T/%s_%s/", bucket, subbucket_de->d_name, PERFDATA_NAME, user);
|
||||
|
||||
// the total length must not exceed the output buffer size
|
||||
if (p >= PATH_MAX || p < 0) {
|
||||
continue;
|
||||
}
|
||||
// check if the subdirectory exists
|
||||
if (os::file_exists(perfdata_path)) {
|
||||
// the return value of snprintf is not checked for the second time
|
||||
return os::snprintf(output_path, output_size, "%s%s/T", bucket, subbucket_de->d_name);
|
||||
}
|
||||
}
|
||||
os::closedir(bucket_dir);
|
||||
}
|
||||
os::closedir(varfolders_dir);
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
intx os::current_thread_id() {
|
||||
#ifdef __APPLE__
|
||||
return (intx)os::Bsd::gettid();
|
||||
@@ -2277,8 +2361,8 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
|
||||
if (ret != -1) {
|
||||
if ((st_mode & S_IFMT) == S_IFDIR) {
|
||||
errno = EISDIR;
|
||||
::close(fd);
|
||||
errno = EISDIR;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -61,6 +61,12 @@ class os::Bsd {
|
||||
static pthread_t main_thread(void) { return _main_thread; }
|
||||
|
||||
static pid_t gettid();
|
||||
static uid_t get_process_uid(pid_t pid);
|
||||
static bool is_process_root(pid_t pid);
|
||||
|
||||
#ifdef __APPLE__
|
||||
static int get_user_tmp_dir_macos(const char* user, int vmid, char* output_buffer, int buffer_size);
|
||||
#endif
|
||||
|
||||
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
|
||||
static intptr_t* ucontext_get_fp(const ucontext_t* uc);
|
||||
|
||||
@@ -467,9 +467,9 @@ void CgroupV1MemoryController::print_version_specific_info(outputStream* st, phy
|
||||
kmem_max_usage.set_value(temp);
|
||||
}
|
||||
|
||||
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, kmem_max_usage, "kernel_memory_max_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit");
|
||||
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage");
|
||||
OSContainer::print_container_helper(st, kmem_max_usage, "kernel_memory_max_usage");
|
||||
}
|
||||
|
||||
char* CgroupV1Subsystem::cpu_cpuset_cpus() {
|
||||
|
||||
@@ -378,8 +378,8 @@ void CgroupV2MemoryController::print_version_specific_info(outputStream* st, phy
|
||||
if (memory_swap_limit_value(reader(), swap_limit_val)) {
|
||||
swap_limit.set_value(swap_limit_val);
|
||||
}
|
||||
OSContainer::print_container_helper(st, swap_current, "memory_swap_current_in_bytes");
|
||||
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, swap_current, "memory_swap_current");
|
||||
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit");
|
||||
}
|
||||
|
||||
char* CgroupV2Controller::construct_path(char* mount_path, const char* cgroup_path) {
|
||||
|
||||
@@ -287,20 +287,44 @@ bool OSContainer::pids_current(uint64_t& value) {
|
||||
return cgroup_subsystem->pids_current(value);
|
||||
}
|
||||
|
||||
template<typename T> struct metric_fmt;
|
||||
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
|
||||
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
|
||||
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
|
||||
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
|
||||
|
||||
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
|
||||
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
|
||||
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
|
||||
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
|
||||
|
||||
template <typename T>
|
||||
void OSContainer::print_container_metric(outputStream* st, const char* metrics, T value, const char* unit) {
|
||||
constexpr int max_length = 38; // Longest "metric: value" string ("maximum number of tasks: not supported")
|
||||
constexpr int longest_value = max_length - 11; // Max length - shortest "metric: " string ("cpu_quota: ")
|
||||
char value_str[longest_value + 1] = {};
|
||||
os::snprintf_checked(value_str, longest_value, metric_fmt<T>::fmt, value);
|
||||
st->print("%s: %*s", metrics, max_length - static_cast<int>(strlen(metrics)) - 2, value_str); // -2 for the ": "
|
||||
if (unit[0] != '\0') {
|
||||
st->print_cr(" %s", unit);
|
||||
} else {
|
||||
st->print_cr("");
|
||||
}
|
||||
}
|
||||
|
||||
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
|
||||
st->print("%s: ", metrics);
|
||||
if (res.success()) {
|
||||
if (res.value() != value_unlimited) {
|
||||
if (res.value() >= 1024) {
|
||||
st->print_cr(PHYS_MEM_TYPE_FORMAT " k", (physical_memory_size_type)(res.value() / K));
|
||||
print_container_metric(st, metrics, res.value() / K, "kB");
|
||||
} else {
|
||||
st->print_cr(PHYS_MEM_TYPE_FORMAT, res.value());
|
||||
print_container_metric(st, metrics, res.value(), "B");
|
||||
}
|
||||
} else {
|
||||
st->print_cr("%s", "unlimited");
|
||||
print_container_metric(st, metrics, "unlimited");
|
||||
}
|
||||
} else {
|
||||
// Not supported
|
||||
st->print_cr("%s", "unavailable");
|
||||
print_container_metric(st, metrics, "unavailable");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,6 +65,8 @@ class OSContainer: AllStatic {
|
||||
static void init();
|
||||
static void print_version_specific_info(outputStream* st);
|
||||
static void print_container_helper(outputStream* st, MetricResult& res, const char* metrics);
|
||||
template <typename T>
|
||||
static void print_container_metric(outputStream* st, const char* metrics, T value, const char* unit = "");
|
||||
|
||||
static inline bool is_containerized();
|
||||
static const char * container_type();
|
||||
|
||||
@@ -2432,62 +2432,57 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
st->print_cr("container (cgroup) information:");
|
||||
|
||||
const char *p_ct = OSContainer::container_type();
|
||||
st->print_cr("container_type: %s", p_ct != nullptr ? p_ct : "not supported");
|
||||
OSContainer::print_container_metric(st, "container_type", p_ct != nullptr ? p_ct : "not supported");
|
||||
|
||||
char *p = OSContainer::cpu_cpuset_cpus();
|
||||
st->print_cr("cpu_cpuset_cpus: %s", p != nullptr ? p : "not supported");
|
||||
OSContainer::print_container_metric(st, "cpu_cpuset_cpus", p != nullptr ? p : "not supported");
|
||||
free(p);
|
||||
|
||||
p = OSContainer::cpu_cpuset_memory_nodes();
|
||||
st->print_cr("cpu_memory_nodes: %s", p != nullptr ? p : "not supported");
|
||||
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
|
||||
free(p);
|
||||
|
||||
int i = -1;
|
||||
bool supported = OSContainer::active_processor_count(i);
|
||||
st->print("active_processor_count: ");
|
||||
if (supported) {
|
||||
assert(i > 0, "must be");
|
||||
if (ActiveProcessorCount > 0) {
|
||||
st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
|
||||
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
|
||||
} else {
|
||||
st->print_cr("%d", i);
|
||||
OSContainer::print_container_metric(st, "active_processor_count", i);
|
||||
}
|
||||
} else {
|
||||
st->print_cr("not supported");
|
||||
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
|
||||
}
|
||||
|
||||
|
||||
supported = OSContainer::cpu_quota(i);
|
||||
st->print("cpu_quota: ");
|
||||
if (supported && i > 0) {
|
||||
st->print_cr("%d", i);
|
||||
OSContainer::print_container_metric(st, "cpu_quota", i);
|
||||
} else {
|
||||
st->print_cr("%s", !supported ? "not supported" : "no quota");
|
||||
OSContainer::print_container_metric(st, "cpu_quota", !supported ? "not supported" : "no quota");
|
||||
}
|
||||
|
||||
supported = OSContainer::cpu_period(i);
|
||||
st->print("cpu_period: ");
|
||||
if (supported && i > 0) {
|
||||
st->print_cr("%d", i);
|
||||
OSContainer::print_container_metric(st, "cpu_period", i);
|
||||
} else {
|
||||
st->print_cr("%s", !supported ? "not supported" : "no period");
|
||||
OSContainer::print_container_metric(st, "cpu_period", !supported ? "not supported" : "no period");
|
||||
}
|
||||
|
||||
supported = OSContainer::cpu_shares(i);
|
||||
st->print("cpu_shares: ");
|
||||
if (supported && i > 0) {
|
||||
st->print_cr("%d", i);
|
||||
OSContainer::print_container_metric(st, "cpu_shares", i);
|
||||
} else {
|
||||
st->print_cr("%s", !supported ? "not supported" : "no shares");
|
||||
OSContainer::print_container_metric(st, "cpu_shares", !supported ? "not supported" : "no shares");
|
||||
}
|
||||
|
||||
uint64_t j = 0;
|
||||
supported = OSContainer::cpu_usage_in_micros(j);
|
||||
st->print("cpu_usage_in_micros: ");
|
||||
if (supported && j > 0) {
|
||||
st->print_cr(UINT64_FORMAT, j);
|
||||
OSContainer::print_container_metric(st, "cpu_usage", j, "us");
|
||||
} else {
|
||||
st->print_cr("%s", !supported ? "not supported" : "no usage");
|
||||
OSContainer::print_container_metric(st, "cpu_usage", !supported ? "not supported" : "no usage");
|
||||
}
|
||||
|
||||
MetricResult memory_limit;
|
||||
@@ -2530,31 +2525,29 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
if (OSContainer::cache_usage_in_bytes(val)) {
|
||||
cache_usage.set_value(val);
|
||||
}
|
||||
OSContainer::print_container_helper(st, memory_limit, "memory_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_usage, "memory_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, rss_usage, "rss_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, cache_usage, "cache_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, memory_limit, "memory_limit");
|
||||
OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit");
|
||||
OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit");
|
||||
OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit");
|
||||
OSContainer::print_container_helper(st, mem_usage, "memory_usage");
|
||||
OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage");
|
||||
OSContainer::print_container_helper(st, rss_usage, "rss_usage");
|
||||
OSContainer::print_container_helper(st, cache_usage, "cache_usage");
|
||||
|
||||
OSContainer::print_version_specific_info(st);
|
||||
|
||||
supported = OSContainer::pids_max(j);
|
||||
st->print("maximum number of tasks: ");
|
||||
if (supported && j != value_unlimited) {
|
||||
st->print_cr(UINT64_FORMAT, j);
|
||||
OSContainer::print_container_metric(st, "maximum number of tasks", j);
|
||||
} else {
|
||||
st->print_cr("%s", !supported ? "not supported" : "unlimited");
|
||||
OSContainer::print_container_metric(st, "maximum number of tasks", !supported ? "not supported" : "unlimited");
|
||||
}
|
||||
|
||||
supported = OSContainer::pids_current(j);
|
||||
st->print("current number of tasks: ");
|
||||
if (supported && j > 0) {
|
||||
st->print_cr(UINT64_FORMAT, j);
|
||||
OSContainer::print_container_metric(st, "current number of tasks", j);
|
||||
} else {
|
||||
st->print_cr("%s", !supported ? "not supported" : "no current tasks");
|
||||
OSContainer::print_container_metric(st, "current number of tasks", !supported ? "not supported" : "no tasks");
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -4298,13 +4291,6 @@ OSReturn os::get_native_priority(const Thread* const thread,
|
||||
return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR);
|
||||
}
|
||||
|
||||
// This is the fastest way to get thread cpu time on Linux.
|
||||
// Returns cpu time (user+sys) for any thread, not only for current.
|
||||
// POSIX compliant clocks are implemented in the kernels 2.6.16+.
|
||||
// It might work on 2.6.10+ with a special kernel/glibc patch.
|
||||
// For reference, please, see IEEE Std 1003.1-2004:
|
||||
// http://www.unix.org/single_unix_specification
|
||||
|
||||
jlong os::Linux::thread_cpu_time(clockid_t clockid) {
|
||||
struct timespec tp;
|
||||
int status = clock_gettime(clockid, &tp);
|
||||
@@ -4932,8 +4918,8 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
|
||||
if (ret != -1) {
|
||||
if ((st_mode & S_IFMT) == S_IFDIR) {
|
||||
errno = EISDIR;
|
||||
::close(fd);
|
||||
errno = EISDIR;
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1028,6 +1028,7 @@ char* os::realpath(const char* filename, char* outbuf, size_t outbuflen) {
|
||||
} else {
|
||||
errno = ENAMETOOLONG;
|
||||
}
|
||||
ErrnoPreserver ep;
|
||||
permit_forbidden_function::free(p); // *not* os::free
|
||||
} else {
|
||||
// Fallback for platforms struggling with modern Posix standards (AIX 5.3, 6.1). If realpath
|
||||
@@ -1351,6 +1352,10 @@ bool os::Posix::is_root(uid_t uid){
|
||||
return ROOT_UID == uid;
|
||||
}
|
||||
|
||||
bool os::Posix::is_current_user_root(){
|
||||
return is_root(geteuid());
|
||||
}
|
||||
|
||||
bool os::Posix::matches_effective_uid_or_root(uid_t uid) {
|
||||
return is_root(uid) || geteuid() == uid;
|
||||
}
|
||||
|
||||
@@ -76,6 +76,9 @@ public:
|
||||
// Returns true if given uid is root.
|
||||
static bool is_root(uid_t uid);
|
||||
|
||||
// Returns true if the current user is root.
|
||||
static bool is_current_user_root();
|
||||
|
||||
// Returns true if given uid is effective or root uid.
|
||||
static bool matches_effective_uid_or_root(uid_t uid);
|
||||
|
||||
|
||||
@@ -40,6 +40,9 @@
|
||||
#if defined(LINUX)
|
||||
#include "os_linux.hpp"
|
||||
#endif
|
||||
#if defined(BSD)
|
||||
#include "os_bsd.hpp"
|
||||
#endif
|
||||
|
||||
# include <errno.h>
|
||||
# include <pwd.h>
|
||||
@@ -142,6 +145,18 @@ static char* get_user_tmp_dir(const char* user, int vmid, int nspid) {
|
||||
jio_snprintf(buffer, TMP_BUFFER_LEN, "/proc/%d/root%s", vmid, tmpdir);
|
||||
tmpdir = buffer;
|
||||
}
|
||||
#endif
|
||||
#ifdef __APPLE__
|
||||
char buffer[PATH_MAX] = {0};
|
||||
// Check if the current user is root and the target VM is running as non-root.
|
||||
// Otherwise the output of os::get_temp_directory() is used.
|
||||
//
|
||||
if (os::Posix::is_current_user_root() && !os::Bsd::is_process_root(vmid)) {
|
||||
int path_size = os::Bsd::get_user_tmp_dir_macos(user, vmid, buffer, sizeof buffer);
|
||||
if (path_size > 0 && (size_t)path_size < sizeof buffer) {
|
||||
tmpdir = buffer;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
const char* perfdir = PERFDATA_NAME;
|
||||
size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3;
|
||||
@@ -1138,7 +1153,8 @@ static void mmap_attach_shared(int vmid, char** addr, size_t* sizep, TRAPS) {
|
||||
|
||||
// for linux, determine if vmid is for a containerized process
|
||||
int nspid = LINUX_ONLY(os::Linux::get_namespace_pid(vmid)) NOT_LINUX(-1);
|
||||
const char* luser = get_user_name(vmid, &nspid, CHECK);
|
||||
const char* luser = NOT_MACOS(get_user_name(vmid, &nspid, CHECK))
|
||||
MACOS_ONLY(get_user_name(os::Bsd::get_process_uid(vmid)));
|
||||
|
||||
if (luser == nullptr) {
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
|
||||
|
||||
@@ -1645,7 +1645,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
|
||||
|
||||
// Save and restore errno to avoid confusing native code with EINTR
|
||||
// after sigsuspend.
|
||||
int old_errno = errno;
|
||||
ErrnoPreserver ep;
|
||||
|
||||
PosixSignals::unblock_error_signals();
|
||||
|
||||
@@ -1669,21 +1669,21 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
|
||||
|
||||
// On some systems we have seen signal delivery get "stuck" until the signal
|
||||
// mask is changed as part of thread termination. Check that the current thread
|
||||
// has not already terminated - else the following assertion
|
||||
// will fail because the thread is no longer a JavaThread as the ~JavaThread
|
||||
// destructor has completed.
|
||||
|
||||
// has not already terminated, else the osthread may already have been freed.
|
||||
if (thread->has_terminated()) {
|
||||
return;
|
||||
}
|
||||
|
||||
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
|
||||
|
||||
OSThread* osthread = thread->osthread();
|
||||
|
||||
SuspendResume::State current = osthread->sr.state();
|
||||
|
||||
if (current == SuspendResume::SR_SUSPEND_REQUEST) {
|
||||
// Only check this on an active suspend request. It is possible to get a late delivered
|
||||
// signal from a cancelled suspend request that hits after the JavaThread destructor
|
||||
// completes, but before the Thread destructor causes `is_terminated()` to be true.
|
||||
assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
|
||||
|
||||
suspend_save_context(osthread, siginfo, context);
|
||||
|
||||
// attempt to switch the state, we assume we had a SUSPEND_REQUEST
|
||||
@@ -1727,7 +1727,6 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
|
||||
// ignore
|
||||
}
|
||||
|
||||
errno = old_errno;
|
||||
}
|
||||
|
||||
static int SR_initialize() {
|
||||
|
||||
@@ -4782,8 +4782,8 @@ int os::stat(const char *path, struct stat *sbuf) {
|
||||
path_to_target = get_path_to_target(wide_path);
|
||||
if (path_to_target == nullptr) {
|
||||
// it is a symbolic link, but we failed to resolve it
|
||||
errno = ENOENT;
|
||||
os::free(wide_path);
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -4794,14 +4794,14 @@ int os::stat(const char *path, struct stat *sbuf) {
|
||||
// if getting attributes failed, GetLastError should be called immediately after that
|
||||
if (!bret) {
|
||||
DWORD errcode = ::GetLastError();
|
||||
log_debug(os)("os::stat() failed to GetFileAttributesExW: GetLastError->%lu.", errcode);
|
||||
os::free(wide_path);
|
||||
os::free(path_to_target);
|
||||
if (errcode == ERROR_FILE_NOT_FOUND || errcode == ERROR_PATH_NOT_FOUND) {
|
||||
errno = ENOENT;
|
||||
} else {
|
||||
errno = 0;
|
||||
}
|
||||
log_debug(os)("os::stat() failed to GetFileAttributesExW: GetLastError->%lu.", errcode);
|
||||
os::free(wide_path);
|
||||
os::free(path_to_target);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@@ -5000,8 +5000,8 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
path_to_target = get_path_to_target(wide_path);
|
||||
if (path_to_target == nullptr) {
|
||||
// it is a symbolic link, but we failed to resolve it
|
||||
errno = ENOENT;
|
||||
os::free(wide_path);
|
||||
errno = ENOENT;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@@ -5275,6 +5275,7 @@ char* os::realpath(const char* filename, char* outbuf, size_t outbuflen) {
|
||||
} else {
|
||||
errno = ENAMETOOLONG;
|
||||
}
|
||||
ErrnoPreserver ep;
|
||||
permit_forbidden_function::free(p); // *not* os::free
|
||||
}
|
||||
return result;
|
||||
|
||||
@@ -256,6 +256,9 @@ void RiscvHwprobe::add_features_from_query_result() {
|
||||
is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVKT)) {
|
||||
VM_Version::ext_Zvkn.enable_feature();
|
||||
}
|
||||
if (is_set(RISCV_HWPROBE_KEY_IMA_EXT_0, RISCV_HWPROBE_EXT_ZVKG)) {
|
||||
VM_Version::ext_Zvkg.enable_feature();
|
||||
}
|
||||
#endif
|
||||
|
||||
// ====== non-extensions ======
|
||||
|
||||
@@ -145,7 +145,7 @@ void AOTArtifactFinder::find_artifacts() {
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
// Keep scanning until we discover no more class that need to be AOT-initialized.
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
while (_pending_aot_inited_classes->length() > 0) {
|
||||
InstanceKlass* ik = _pending_aot_inited_classes->pop();
|
||||
HeapShared::copy_and_rescan_aot_inited_mirror(ik);
|
||||
@@ -188,7 +188,7 @@ void AOTArtifactFinder::end_scanning_for_oops() {
|
||||
}
|
||||
|
||||
void AOTArtifactFinder::add_aot_inited_class(InstanceKlass* ik) {
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
if (RegeneratedClasses::is_regenerated_object(ik)) {
|
||||
precond(RegeneratedClasses::get_original_object(ik)->is_initialized());
|
||||
} else {
|
||||
@@ -258,7 +258,7 @@ void AOTArtifactFinder::add_cached_instance_class(InstanceKlass* ik) {
|
||||
return;
|
||||
}
|
||||
scan_oops_in_instance_class(ik);
|
||||
if (ik->is_hidden() && CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (ik->is_hidden() && CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
bool succeed = AOTClassLinker::try_add_candidate(ik);
|
||||
guarantee(succeed, "All cached hidden classes must be aot-linkable");
|
||||
add_aot_inited_class(ik);
|
||||
|
||||
@@ -40,7 +40,7 @@ DEBUG_ONLY(InstanceKlass* _aot_init_class = nullptr;)
|
||||
|
||||
bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
|
||||
assert(!ArchiveBuilder::is_active() || !ArchiveBuilder::current()->is_in_buffer_space(ik), "must be source klass");
|
||||
if (!CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (!CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) {
|
||||
// Automatic selection for aot-inited classes
|
||||
// ==========================================
|
||||
//
|
||||
// When CDSConfig::is_initing_classes_at_dump_time is enabled,
|
||||
// When CDSConfig::is_dumping_aot_linked_classes is enabled,
|
||||
// AOTArtifactFinder::find_artifacts() finds the classes of all
|
||||
// heap objects that are reachable from HeapShared::_run_time_special_subgraph,
|
||||
// and mark these classes as aot-inited. This preserves the initialized
|
||||
@@ -310,7 +310,7 @@ void AOTClassInitializer::init_test_class(TRAPS) {
|
||||
//
|
||||
// -XX:AOTInitTestClass is NOT a general mechanism for including user-defined objects into
|
||||
// the AOT cache. Therefore, this option is NOT available in product JVM.
|
||||
if (AOTInitTestClass != nullptr && CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (AOTInitTestClass != nullptr && CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
log_info(aot)("Debug build only: force initialization of AOTInitTestClass %s", AOTInitTestClass);
|
||||
TempNewSymbol class_name = SymbolTable::new_symbol(AOTInitTestClass);
|
||||
Handle app_loader(THREAD, SystemDictionary::java_system_loader());
|
||||
|
||||
@@ -96,6 +96,7 @@
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "sanitizers/leak.hpp"
|
||||
#include "services/management.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
@@ -1140,7 +1141,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
|
||||
AOTReferenceObjSupport::initialize(CHECK);
|
||||
AOTReferenceObjSupport::stabilize_cached_reference_objects(CHECK);
|
||||
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
// java.lang.Class::reflectionFactory cannot be archived yet. We set this field
|
||||
// to null, and it will be initialized again at runtime.
|
||||
log_debug(aot)("Resetting Class::reflectionFactory");
|
||||
@@ -2204,7 +2205,7 @@ void AOTMetaspace::initialize_shared_spaces() {
|
||||
CountSharedSymbols cl;
|
||||
SymbolTable::shared_symbols_do(&cl);
|
||||
tty->print_cr("Number of shared symbols: %zu", cl.total());
|
||||
if (HeapShared::is_loading_mapping_mode()) {
|
||||
if (HeapShared::is_loading() && HeapShared::is_loading_mapping_mode()) {
|
||||
tty->print_cr("Number of shared strings: %zu", StringTable::shared_entry_count());
|
||||
}
|
||||
tty->print_cr("VM version: %s\r\n", static_mapinfo->vm_version());
|
||||
|
||||
@@ -1026,23 +1026,19 @@ void CDSConfig::set_has_aot_linked_classes(bool has_aot_linked_classes) {
|
||||
_has_aot_linked_classes |= has_aot_linked_classes;
|
||||
}
|
||||
|
||||
bool CDSConfig::is_initing_classes_at_dump_time() {
|
||||
return is_dumping_heap() && is_dumping_aot_linked_classes();
|
||||
}
|
||||
|
||||
bool CDSConfig::is_dumping_invokedynamic() {
|
||||
// Requires is_dumping_aot_linked_classes(). Otherwise the classes of some archived heap
|
||||
// objects used by the archive indy callsites may be replaced at runtime.
|
||||
return AOTInvokeDynamicLinking && is_dumping_aot_linked_classes() && is_dumping_heap();
|
||||
}
|
||||
|
||||
// When we are dumping aot-linked classes and we are able to write archived heap objects, we automatically
|
||||
// enable the archiving of MethodHandles. This will in turn enable the archiving of MethodTypes and hidden
|
||||
// When we are dumping aot-linked classes, we automatically enable the archiving of MethodHandles.
|
||||
// This will in turn enable the archiving of MethodTypes and hidden
|
||||
// classes that are used in the implementation of MethodHandles.
|
||||
// Archived MethodHandles are required for higher-level optimizations such as AOT resolution of invokedynamic
|
||||
// and dynamic proxies.
|
||||
bool CDSConfig::is_dumping_method_handles() {
|
||||
return is_initing_classes_at_dump_time();
|
||||
return is_dumping_aot_linked_classes();
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
@@ -187,7 +187,6 @@ public:
|
||||
static void disable_heap_dumping() { CDS_ONLY(_disable_heap_dumping = true); }
|
||||
static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
static bool is_initing_classes_at_dump_time() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
|
||||
static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
|
||||
@@ -40,7 +40,7 @@ bool CDSEnumKlass::is_enum_obj(oop orig_obj) {
|
||||
}
|
||||
|
||||
// !!! This is legacy support for enum classes before JEP 483. This file is not used when
|
||||
// !!! CDSConfig::is_initing_classes_at_dump_time()==true.
|
||||
// !!! CDSConfig::is_dumping_aot_linked_classes()==true.
|
||||
//
|
||||
// Java Enum classes have synthetic <clinit> methods that look like this
|
||||
// enum MyEnum {FOO, BAR}
|
||||
@@ -63,7 +63,7 @@ bool CDSEnumKlass::is_enum_obj(oop orig_obj) {
|
||||
void CDSEnumKlass::handle_enum_obj(int level,
|
||||
KlassSubGraphInfo* subgraph_info,
|
||||
oop orig_obj) {
|
||||
assert(!CDSConfig::is_initing_classes_at_dump_time(), "only for legacy support of enums");
|
||||
assert(!CDSConfig::is_dumping_aot_linked_classes(), "only for legacy support of enums");
|
||||
assert(level > 1, "must never be called at the first (outermost) level");
|
||||
assert(is_enum_obj(orig_obj), "must be");
|
||||
|
||||
|
||||
@@ -35,7 +35,7 @@ class JavaFieldStream;
|
||||
class KlassSubGraphInfo;
|
||||
|
||||
// This is legacy support for enum classes before JEP 483. This code is not needed when
|
||||
// CDSConfig::is_initing_classes_at_dump_time()==true.
|
||||
// CDSConfig::is_dumping_aot_linked_classes()==true.
|
||||
class CDSEnumKlass: AllStatic {
|
||||
public:
|
||||
static bool is_enum_obj(oop orig_obj);
|
||||
|
||||
@@ -156,7 +156,7 @@ CDSHeapVerifier::CDSHeapVerifier() : _archived_objs(0), _problems(0)
|
||||
|
||||
# undef ADD_EXCL
|
||||
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
add_shared_secret_accessors();
|
||||
}
|
||||
ClassLoaderDataGraph::classes_do(this);
|
||||
|
||||
@@ -206,6 +206,8 @@ void FinalImageRecipes::load_all_classes(TRAPS) {
|
||||
|
||||
if (ik->has_aot_safe_initializer() && (flags & WAS_INITED) != 0) {
|
||||
assert(ik->class_loader() == nullptr, "supported only for boot classes for now");
|
||||
ResourceMark rm(THREAD);
|
||||
log_info(aot, init)("Initializing %s", ik->external_name());
|
||||
ik->initialize(CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,8 +209,14 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
|
||||
}
|
||||
|
||||
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
|
||||
assert(CDSConfig::is_dumping_heap(), "dump-time only");
|
||||
if (!CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
// Legacy CDS archive support (to be deprecated)
|
||||
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
|
||||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
oop HeapShared::CachedOopInfo::orig_referrer() const {
|
||||
@@ -934,12 +940,16 @@ void HeapShared::scan_java_class(Klass* orig_k) {
|
||||
void HeapShared::archive_subgraphs() {
|
||||
assert(CDSConfig::is_dumping_heap(), "must be");
|
||||
|
||||
if (!CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
archive_object_subgraphs(archive_subgraph_entry_fields,
|
||||
false /* is_full_module_graph */);
|
||||
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
archive_object_subgraphs(fmg_archive_subgraph_entry_fields,
|
||||
true /* is_full_module_graph */);
|
||||
}
|
||||
}
|
||||
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
Modules::verify_archived_modules();
|
||||
}
|
||||
}
|
||||
@@ -1295,8 +1305,10 @@ void HeapShared::resolve_classes(JavaThread* current) {
|
||||
if (!is_archived_heap_in_use()) {
|
||||
return; // nothing to do
|
||||
}
|
||||
if (!CDSConfig::is_using_aot_linked_classes()) {
|
||||
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
|
||||
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
|
||||
@@ -1734,13 +1746,13 @@ bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGrap
|
||||
}
|
||||
}
|
||||
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
if (java_lang_Class::is_instance(orig_obj)) {
|
||||
orig_obj = scratch_java_mirror(orig_obj);
|
||||
assert(orig_obj != nullptr, "must be archived");
|
||||
}
|
||||
} else if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _dump_time_special_subgraph) {
|
||||
// Without CDSConfig::is_initing_classes_at_dump_time(), we only allow archived objects to
|
||||
// Without CDSConfig::is_dumping_aot_linked_classes(), we only allow archived objects to
|
||||
// point to the mirrors of (1) j.l.Object, (2) primitive classes, and (3) box classes. These are initialized
|
||||
// very early by HeapShared::init_box_classes().
|
||||
if (orig_obj == vmClasses::Object_klass()->java_mirror()
|
||||
@@ -1808,9 +1820,9 @@ bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGrap
|
||||
orig_obj->oop_iterate(&pusher);
|
||||
}
|
||||
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
// The classes of all archived enum instances have been marked as aot-init,
|
||||
// so there's nothing else to be done in the production run.
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
// The enum klasses are archived with aot-initialized mirror.
|
||||
// See AOTClassInitializer::can_archive_initialized_mirror().
|
||||
} else {
|
||||
// This is legacy support for enum classes before JEP 483 -- we cannot rerun
|
||||
// the enum's <clinit> in the production run, so special handling is needed.
|
||||
@@ -1949,7 +1961,7 @@ void HeapShared::verify_reachable_objects_from(oop obj) {
|
||||
#endif
|
||||
|
||||
void HeapShared::check_special_subgraph_classes() {
|
||||
if (CDSConfig::is_initing_classes_at_dump_time()) {
|
||||
if (CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
// We can have aot-initialized classes (such as Enums) that can reference objects
|
||||
// of arbitrary types. Currently, we trust the JEP 483 implementation to only
|
||||
// aot-initialize classes that are "safe".
|
||||
@@ -2136,10 +2148,12 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
|
||||
void HeapShared::init_subgraph_entry_fields(TRAPS) {
|
||||
assert(CDSConfig::is_dumping_heap(), "must be");
|
||||
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
|
||||
if (!CDSConfig::is_dumping_aot_linked_classes()) {
|
||||
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
|
||||
if (CDSConfig::is_dumping_full_module_graph()) {
|
||||
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
@@ -149,6 +149,10 @@ public:
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _flags;
|
||||
}
|
||||
|
||||
// Fetch Klass::access_flags.
|
||||
jint access_flags() { return flags().as_int(); }
|
||||
|
||||
bool has_finalizer() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
return _has_finalizer; }
|
||||
|
||||
@@ -216,15 +216,6 @@ jint ciKlass::modifier_flags() {
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::access_flags
|
||||
jint ciKlass::access_flags() {
|
||||
assert(is_loaded(), "not loaded");
|
||||
GUARDED_VM_ENTRY(
|
||||
return get_Klass()->access_flags().as_unsigned_short();
|
||||
)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciKlass::misc_flags
|
||||
klass_flags_t ciKlass::misc_flags() {
|
||||
|
||||
@@ -122,9 +122,6 @@ public:
|
||||
// Fetch modifier flags.
|
||||
jint modifier_flags();
|
||||
|
||||
// Fetch Klass::access_flags.
|
||||
jint access_flags();
|
||||
|
||||
// Fetch Klass::misc_flags.
|
||||
klass_flags_t misc_flags();
|
||||
|
||||
|
||||
@@ -28,9 +28,10 @@
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
class ClassPrinter::KlassPrintClosure : public LockedClassesDo {
|
||||
@@ -42,16 +43,15 @@ class ClassPrinter::KlassPrintClosure : public LockedClassesDo {
|
||||
outputStream* _st;
|
||||
int _num;
|
||||
bool _has_printed_methods;
|
||||
GrowableArray<InstanceKlass*> _klasses;
|
||||
|
||||
public:
|
||||
KlassPrintClosure(const char* class_name_pattern,
|
||||
const char* method_name_pattern,
|
||||
const char* method_signature_pattern,
|
||||
bool always_print_class_name,
|
||||
int flags, outputStream* st)
|
||||
: _class_name_pattern(class_name_pattern),
|
||||
_method_name_pattern(method_name_pattern),
|
||||
_method_signature_pattern(method_signature_pattern),
|
||||
_always_print_class_name(always_print_class_name),
|
||||
: _always_print_class_name(always_print_class_name),
|
||||
_flags(flags), _st(st), _num(0), _has_printed_methods(false)
|
||||
{
|
||||
if (has_mode(_flags, PRINT_METHOD_HANDLE)) {
|
||||
@@ -66,27 +66,87 @@ public:
|
||||
if (has_mode(_flags, PRINT_BYTECODE)) {
|
||||
_flags |= (PRINT_METHOD_NAME);
|
||||
}
|
||||
|
||||
if (has_mode(_flags, PRINT_CLASS_DETAILS)) {
|
||||
_always_print_class_name = true;
|
||||
}
|
||||
|
||||
_class_name_pattern = copy_pattern(class_name_pattern);
|
||||
_method_name_pattern = copy_pattern(method_name_pattern);
|
||||
_method_signature_pattern = copy_pattern(method_signature_pattern);
|
||||
}
|
||||
|
||||
static const char* copy_pattern(const char* pattern) {
|
||||
if (pattern == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
char* copy = ResourceArea::strdup(pattern);
|
||||
for (char* p = copy; *p; p++) {
|
||||
if (*p == '.') {
|
||||
*p = '/';
|
||||
}
|
||||
}
|
||||
return copy;
|
||||
}
|
||||
|
||||
virtual void do_klass(Klass* k) {
|
||||
if (!k->is_instance_klass()) {
|
||||
return;
|
||||
}
|
||||
print_instance_klass(InstanceKlass::cast(k));
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
if (ik->is_loaded() && ik->name()->is_star_match(_class_name_pattern)) {
|
||||
_klasses.append(ik);
|
||||
}
|
||||
}
|
||||
|
||||
void print() {
|
||||
_klasses.sort(compare_klasses_alphabetically);
|
||||
for (int i = 0; i < _klasses.length(); i++) {
|
||||
print_instance_klass(_klasses.at(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static bool match(const char* pattern, Symbol* sym) {
|
||||
return (pattern == nullptr || sym->is_star_match(pattern));
|
||||
}
|
||||
|
||||
static int compare_klasses_alphabetically(InstanceKlass** a, InstanceKlass** b) {
|
||||
return compare_symbols_alphabetically((*a)->name(), (*b)->name());
|
||||
}
|
||||
|
||||
static int compare_methods_alphabetically(const void* a, const void* b) {
|
||||
Method* ma = *(Method**)a;
|
||||
Method* mb = *(Method**)b;
|
||||
int n = compare_symbols_alphabetically(ma->name(), mb->name());
|
||||
if (n == 0) {
|
||||
n = compare_symbols_alphabetically(ma->signature(), mb->signature());
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static int compare_symbols_alphabetically(Symbol* a, Symbol *b) {
|
||||
if (a == b) {
|
||||
return 0;
|
||||
}
|
||||
if (a != nullptr && b == nullptr) {
|
||||
return 1;
|
||||
}
|
||||
if (a == nullptr && b != nullptr) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return strcmp(a->as_C_string(), b->as_C_string());
|
||||
}
|
||||
|
||||
void print_klass_name(InstanceKlass* ik) {
|
||||
_st->print("[%3d] " INTPTR_FORMAT " class %s ", _num++, p2i(ik), ik->name()->as_C_string());
|
||||
_st->print("[%3d] " INTPTR_FORMAT " class: %s mirror: " INTPTR_FORMAT " ", _num++,
|
||||
p2i(ik), ik->name()->as_C_string(), p2i(ik->java_mirror()));
|
||||
ik->class_loader_data()->print_value_on(_st);
|
||||
_st->cr();
|
||||
}
|
||||
|
||||
void print_instance_klass(InstanceKlass* ik) {
|
||||
if (ik->is_loaded() && ik->name()->is_star_match(_class_name_pattern)) {
|
||||
ResourceMark rm;
|
||||
if (_has_printed_methods) {
|
||||
// We have printed some methods in the previous class.
|
||||
@@ -98,13 +158,30 @@ public:
|
||||
print_klass_name(ik);
|
||||
}
|
||||
|
||||
if (has_mode(_flags, ClassPrinter::PRINT_CLASS_DETAILS)) {
|
||||
_st->print("InstanceKlass: ");
|
||||
ik->print_on(_st);
|
||||
oop mirror = ik->java_mirror();
|
||||
if (mirror != nullptr) {
|
||||
_st->print("\nJava mirror oop for %s: ", ik->name()->as_C_string());
|
||||
mirror->print_on(_st);
|
||||
}
|
||||
}
|
||||
|
||||
if (has_mode(_flags, ClassPrinter::PRINT_METHOD_NAME)) {
|
||||
bool print_codes = has_mode(_flags, ClassPrinter::PRINT_BYTECODE);
|
||||
int len = ik->methods()->length();
|
||||
int num_methods_printed = 0;
|
||||
|
||||
Method** sorted_methods = NEW_RESOURCE_ARRAY(Method*, len);
|
||||
for (int index = 0; index < len; index++) {
|
||||
Method* m = ik->methods()->at(index);
|
||||
sorted_methods[index] = ik->methods()->at(index);
|
||||
}
|
||||
|
||||
qsort(sorted_methods, len, sizeof(Method*), compare_methods_alphabetically);
|
||||
|
||||
for (int index = 0; index < len; index++) {
|
||||
Method* m = sorted_methods[index];
|
||||
if (match(_method_name_pattern, m->name()) &&
|
||||
match(_method_signature_pattern, m->signature())) {
|
||||
if (print_codes && num_methods_printed++ > 0) {
|
||||
@@ -122,14 +199,17 @@ public:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void print_method(Method* m) {
|
||||
bool print_codes = has_mode(_flags, ClassPrinter::PRINT_BYTECODE);
|
||||
_st->print_cr(INTPTR_FORMAT " %smethod %s : %s", p2i(m),
|
||||
m->is_static() ? "static " : "",
|
||||
m->name()->as_C_string(), m->signature()->as_C_string());
|
||||
if (print_codes) {
|
||||
|
||||
if (has_mode(_flags, ClassPrinter::PRINT_METHOD_DETAILS)) {
|
||||
m->print_on(_st);
|
||||
}
|
||||
|
||||
if (has_mode(_flags, ClassPrinter::PRINT_BYTECODE)) {
|
||||
m->print_codes_on(_st, _flags);
|
||||
}
|
||||
}
|
||||
@@ -142,12 +222,16 @@ void ClassPrinter::print_flags_help(outputStream* os) {
|
||||
os->print_cr(" 0x%02x - print the address of bytecodes", PRINT_BYTECODE_ADDR);
|
||||
os->print_cr(" 0x%02x - print info for invokedynamic", PRINT_DYNAMIC);
|
||||
os->print_cr(" 0x%02x - print info for invokehandle", PRINT_METHOD_HANDLE);
|
||||
os->print_cr(" 0x%02x - print details of the C++ and Java objects that represent classes", PRINT_CLASS_DETAILS);
|
||||
os->print_cr(" 0x%02x - print details of the C++ objects that represent methods", PRINT_METHOD_DETAILS);
|
||||
os->cr();
|
||||
}
|
||||
|
||||
void ClassPrinter::print_classes(const char* class_name_pattern, int flags, outputStream* os) {
|
||||
ResourceMark rm;
|
||||
KlassPrintClosure closure(class_name_pattern, nullptr, nullptr, true, flags, os);
|
||||
ClassLoaderDataGraph::classes_do(&closure);
|
||||
closure.print();
|
||||
}
|
||||
|
||||
void ClassPrinter::print_methods(const char* class_name_pattern,
|
||||
@@ -174,4 +258,5 @@ void ClassPrinter::print_methods(const char* class_name_pattern,
|
||||
KlassPrintClosure closure(class_name_pattern, method_name_pattern, method_signature_pattern,
|
||||
false, flags | PRINT_METHOD_NAME, os);
|
||||
ClassLoaderDataGraph::classes_do(&closure);
|
||||
closure.print();
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -52,6 +52,8 @@ public:
|
||||
PRINT_BYTECODE_ADDR = 1 << 2,
|
||||
PRINT_DYNAMIC = 1 << 3, // extra information for invokedynamic (and dynamic constant ...)
|
||||
PRINT_METHOD_HANDLE = 1 << 4, // extra information for invokehandle
|
||||
PRINT_CLASS_DETAILS = 1 << 5, // print details of the C++ and Java objects that represent classes
|
||||
PRINT_METHOD_DETAILS = 1 << 6, // print details of the C++ objects that represent methods
|
||||
};
|
||||
static bool has_mode(int flags, Mode mode) {
|
||||
return (flags & static_cast<int>(mode)) != 0;
|
||||
|
||||
@@ -439,7 +439,7 @@ class MethodFamily : public ResourceObj {
|
||||
StreamIndentor si(str, indent * 2);
|
||||
str->print("Selected method: ");
|
||||
print_method(str, _selected_target);
|
||||
Klass* method_holder = _selected_target->method_holder();
|
||||
InstanceKlass* method_holder = _selected_target->method_holder();
|
||||
if (!method_holder->is_interface()) {
|
||||
str->print(" : in superclass");
|
||||
}
|
||||
|
||||
@@ -1091,10 +1091,6 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
|
||||
// Set the modifiers flag.
|
||||
u2 computed_modifiers = k->compute_modifier_flags();
|
||||
set_modifiers(mirror(), computed_modifiers);
|
||||
// Set the raw access_flags, this is used by reflection instead of modifier flags.
|
||||
// The Java code for array classes gets the access flags from the element type.
|
||||
assert(!k->is_array_klass() || k->access_flags().as_unsigned_short() == 0, "access flags are not set for arrays");
|
||||
set_raw_access_flags(mirror(), k->access_flags().as_unsigned_short());
|
||||
|
||||
InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
|
||||
assert(oop_size(mirror()) == mk->instance_size(k), "should have been set");
|
||||
@@ -1103,6 +1099,8 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
|
||||
|
||||
// It might also have a component mirror. This mirror must already exist.
|
||||
if (k->is_array_klass()) {
|
||||
// The Java code for array classes gets the access flags from the element type.
|
||||
set_raw_access_flags(mirror(), 0);
|
||||
if (k->is_typeArray_klass()) {
|
||||
BasicType type = TypeArrayKlass::cast(k)->element_type();
|
||||
if (is_scratch) {
|
||||
@@ -1129,6 +1127,8 @@ void java_lang_Class::allocate_mirror(Klass* k, bool is_scratch, Handle protecti
|
||||
// and java_mirror in this klass.
|
||||
} else {
|
||||
assert(k->is_instance_klass(), "Must be");
|
||||
// Set the raw access_flags, this is used by reflection instead of modifier flags.
|
||||
set_raw_access_flags(mirror(), InstanceKlass::cast(k)->access_flags().as_unsigned_short());
|
||||
initialize_mirror_fields(InstanceKlass::cast(k), mirror, protection_domain, classData, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// If any of the fields throws an exception like OOM remove the klass field
|
||||
|
||||
@@ -127,10 +127,8 @@ ResolutionErrorEntry::~ResolutionErrorEntry() {
|
||||
}
|
||||
|
||||
void ResolutionErrorEntry::set_nest_host_error(const char* message) {
|
||||
// If a message is already set, free it.
|
||||
if (nest_host_error() != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(char, _nest_host_error);
|
||||
}
|
||||
assert(_nest_host_error == nullptr, "caller should have checked");
|
||||
assert_lock_strong(SystemDictionary_lock);
|
||||
_nest_host_error = message;
|
||||
}
|
||||
|
||||
|
||||
@@ -1859,7 +1859,7 @@ Symbol* SystemDictionary::find_resolution_error(const constantPoolHandle& pool,
|
||||
|
||||
void SystemDictionary::add_nest_host_error(const constantPoolHandle& pool,
|
||||
int which,
|
||||
const char* message) {
|
||||
const stringStream& message) {
|
||||
{
|
||||
MutexLocker ml(Thread::current(), SystemDictionary_lock);
|
||||
ResolutionErrorEntry* entry = ResolutionErrorTable::find_entry(pool, which);
|
||||
@@ -1868,14 +1868,19 @@ void SystemDictionary::add_nest_host_error(const constantPoolHandle& pool,
|
||||
// constant pool index. In this case resolution succeeded but there's an error in this nest host
|
||||
// that we use the table to record.
|
||||
assert(pool->resolved_klass_at(which) != nullptr, "klass should be resolved if there is no entry");
|
||||
ResolutionErrorTable::add_entry(pool, which, message);
|
||||
ResolutionErrorTable::add_entry(pool, which, message.as_string(true /* on C-heap */));
|
||||
} else {
|
||||
// An existing entry means we had a true resolution failure (LinkageError) with our nest host, but we
|
||||
// still want to add the error message for the higher-level access checks to report. We should
|
||||
// only reach here under the same error condition, so we can ignore the potential race with setting
|
||||
// the message, and set it again.
|
||||
assert(entry->nest_host_error() == nullptr || strcmp(entry->nest_host_error(), message) == 0, "should be the same message");
|
||||
entry->set_nest_host_error(message);
|
||||
// the message.
|
||||
const char* nhe = entry->nest_host_error();
|
||||
if (nhe == nullptr) {
|
||||
entry->set_nest_host_error(message.as_string(true /* on C-heap */));
|
||||
} else {
|
||||
DEBUG_ONLY(const char* msg = message.base();)
|
||||
assert(strcmp(nhe, msg) == 0, "New message %s, differs from original %s", msg, nhe);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2172,9 +2177,10 @@ static bool is_always_visible_class(oop mirror) {
|
||||
return true; // primitive array
|
||||
}
|
||||
assert(klass->is_instance_klass(), "%s", klass->external_name());
|
||||
return klass->is_public() &&
|
||||
(InstanceKlass::cast(klass)->is_same_class_package(vmClasses::Object_klass()) || // java.lang
|
||||
InstanceKlass::cast(klass)->is_same_class_package(vmClasses::MethodHandle_klass())); // java.lang.invoke
|
||||
InstanceKlass* ik = InstanceKlass::cast(klass);
|
||||
return ik->is_public() &&
|
||||
(ik->is_same_class_package(vmClasses::Object_klass()) || // java.lang
|
||||
ik->is_same_class_package(vmClasses::MethodHandle_klass())); // java.lang.invoke
|
||||
}
|
||||
|
||||
// Find or construct the Java mirror (java.lang.Class instance) for
|
||||
|
||||
@@ -280,7 +280,7 @@ public:
|
||||
|
||||
// Record a nest host resolution/validation error
|
||||
static void add_nest_host_error(const constantPoolHandle& pool, int which,
|
||||
const char* message);
|
||||
const stringStream& message);
|
||||
static const char* find_nest_host_error(const constantPoolHandle& pool, int which);
|
||||
|
||||
static void add_to_initiating_loader(JavaThread* current, InstanceKlass* k,
|
||||
|
||||
@@ -190,6 +190,9 @@
|
||||
/* GC support */ \
|
||||
do_klass(FillerObject_klass, jdk_internal_vm_FillerObject ) \
|
||||
\
|
||||
/* Scoped Values */ \
|
||||
do_klass(ScopedValue_Carrier_klass, java_lang_ScopedValue_Carrier ) \
|
||||
\
|
||||
/*end*/
|
||||
|
||||
#endif // SHARE_CLASSFILE_VMCLASSMACROS_HPP
|
||||
|
||||
@@ -227,11 +227,6 @@ void CodeCache::initialize_heaps() {
|
||||
|
||||
if (!non_nmethod.set) {
|
||||
non_nmethod.size += compiler_buffer_size;
|
||||
// Further down, just before FLAG_SET_ERGO(), all segment sizes are
|
||||
// aligned down to the next lower multiple of min_size. For large page
|
||||
// sizes, this may result in (non_nmethod.size == 0) which is not acceptable.
|
||||
// Therefore, force non_nmethod.size to at least min_size.
|
||||
non_nmethod.size = MAX2(non_nmethod.size, min_size);
|
||||
}
|
||||
|
||||
if (!profiled.set && !non_profiled.set) {
|
||||
@@ -307,11 +302,10 @@ void CodeCache::initialize_heaps() {
|
||||
|
||||
// Note: if large page support is enabled, min_size is at least the large
|
||||
// page size. This ensures that the code cache is covered by large pages.
|
||||
non_profiled.size += non_nmethod.size & alignment_mask(min_size);
|
||||
non_profiled.size += profiled.size & alignment_mask(min_size);
|
||||
non_nmethod.size = align_down(non_nmethod.size, min_size);
|
||||
profiled.size = align_down(profiled.size, min_size);
|
||||
non_profiled.size = align_down(non_profiled.size, min_size);
|
||||
non_nmethod.size = align_up(non_nmethod.size, min_size);
|
||||
profiled.size = align_up(profiled.size, min_size);
|
||||
non_profiled.size = align_up(non_profiled.size, min_size);
|
||||
cache_size = non_nmethod.size + profiled.size + non_profiled.size;
|
||||
|
||||
FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size);
|
||||
FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size);
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
|
||||
#include "compiler/compiler_globals.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
inline bool CompilerConfig::is_interpreter_only() {
|
||||
return Arguments::is_interpreter_only() || TieredStopAtLevel == CompLevel_none;
|
||||
|
||||
@@ -561,6 +561,20 @@ bool DirectiveSet::should_not_inline(ciMethod* inlinee) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DirectiveSet::should_delay_inline(ciMethod* inlinee) {
|
||||
inlinee->check_is_loaded();
|
||||
VM_ENTRY_MARK;
|
||||
methodHandle mh(THREAD, inlinee->get_Method());
|
||||
|
||||
if (_inlinematchers != nullptr) {
|
||||
return matches_inline(mh, InlineMatcher::delay_inline);
|
||||
}
|
||||
if (!CompilerDirectivesIgnoreCompileCommandsOption) {
|
||||
return CompilerOracle::should_delay_inline(mh);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool DirectiveSet::parse_and_add_inline(char* str, const char*& error_msg) {
|
||||
InlineMatcher* m = InlineMatcher::parse_inline_pattern(str, error_msg);
|
||||
if (m != nullptr) {
|
||||
|
||||
@@ -142,6 +142,7 @@ public:
|
||||
void append_inline(InlineMatcher* m);
|
||||
bool should_inline(ciMethod* inlinee);
|
||||
bool should_not_inline(ciMethod* inlinee);
|
||||
bool should_delay_inline(ciMethod* inlinee);
|
||||
void print_inline(outputStream* st);
|
||||
DirectiveSet* compilecommand_compatibility_init(const methodHandle& method);
|
||||
bool is_exclusive_copy() { return _directive == nullptr; }
|
||||
|
||||
@@ -480,6 +480,10 @@ bool CompilerOracle::should_not_inline(const methodHandle& method) {
|
||||
return check_predicate(CompileCommandEnum::DontInline, method) || check_predicate(CompileCommandEnum::Exclude, method);
|
||||
}
|
||||
|
||||
bool CompilerOracle::should_delay_inline(const methodHandle& method) {
|
||||
return (check_predicate(CompileCommandEnum::DelayInline, method));
|
||||
}
|
||||
|
||||
bool CompilerOracle::should_print(const methodHandle& method) {
|
||||
return check_predicate(CompileCommandEnum::Print, method);
|
||||
}
|
||||
|
||||
@@ -51,6 +51,7 @@ class methodHandle;
|
||||
option(Log, "log", Bool) \
|
||||
option(Print, "print", Bool) \
|
||||
option(Inline, "inline", Bool) \
|
||||
option(DelayInline, "delayinline", Bool) \
|
||||
option(DontInline, "dontinline", Bool) \
|
||||
option(Blackhole, "blackhole", Bool) \
|
||||
option(CompileOnly, "compileonly", Bool)\
|
||||
@@ -150,6 +151,9 @@ class CompilerOracle : AllStatic {
|
||||
// Tells whether we want to disallow inlining of this method
|
||||
static bool should_not_inline(const methodHandle& method);
|
||||
|
||||
// Tells whether we want to delay inlining of this method
|
||||
static bool should_delay_inline(const methodHandle& method);
|
||||
|
||||
// Tells whether this method changes Thread.currentThread()
|
||||
static bool changes_current_thread(const methodHandle& method);
|
||||
|
||||
|
||||
@@ -100,6 +100,7 @@ public:
|
||||
enum InlineType {
|
||||
unknown_inline,
|
||||
dont_inline,
|
||||
delay_inline,
|
||||
force_inline
|
||||
};
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
#include "gc/g1/g1HeapSizingPolicy.hpp"
|
||||
#include "gc/g1/jvmFlagConstraintsG1.hpp"
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
#include "gc/shared/satbMarkQueue.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
|
||||
@@ -70,8 +70,8 @@
|
||||
nonstatic_field(G1HeapRegionSetBase, _length, uint) \
|
||||
\
|
||||
nonstatic_field(SATBMarkQueue, _active, bool) \
|
||||
nonstatic_field(PtrQueue, _buf, void**) \
|
||||
nonstatic_field(PtrQueue, _index, size_t)
|
||||
nonstatic_field(SATBMarkQueue, _buf, void**) \
|
||||
nonstatic_field(SATBMarkQueue, _index, size_t)
|
||||
|
||||
#define VM_INT_CONSTANTS_G1GC(declare_constant, declare_constant_with_value) \
|
||||
declare_constant(G1HeapRegionType::FreeTag) \
|
||||
@@ -96,7 +96,6 @@
|
||||
declare_toplevel_type(G1HeapRegionManager) \
|
||||
declare_toplevel_type(G1HeapRegionSetBase) \
|
||||
declare_toplevel_type(G1MonitoringSupport) \
|
||||
declare_toplevel_type(PtrQueue) \
|
||||
declare_toplevel_type(G1HeapRegionType) \
|
||||
declare_toplevel_type(SATBMarkQueue) \
|
||||
\
|
||||
|
||||
@@ -58,8 +58,6 @@
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
PSYoungGen* ParallelScavengeHeap::_young_gen = nullptr;
|
||||
PSOldGen* ParallelScavengeHeap::_old_gen = nullptr;
|
||||
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
|
||||
GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
|
||||
size_t ParallelScavengeHeap::_desired_page_size = 0;
|
||||
@@ -134,16 +132,16 @@ jint ParallelScavengeHeap::initialize() {
|
||||
|
||||
void ParallelScavengeHeap::initialize_serviceability() {
|
||||
|
||||
_eden_pool = new EdenMutableSpacePool(_young_gen,
|
||||
_eden_pool = new PSEdenSpacePool(_young_gen,
|
||||
_young_gen->eden_space(),
|
||||
"PS Eden Space",
|
||||
false /* support_usage_threshold */);
|
||||
|
||||
_survivor_pool = new SurvivorMutableSpacePool(_young_gen,
|
||||
_survivor_pool = new PSSurvivorSpacePool(_young_gen,
|
||||
"PS Survivor Space",
|
||||
false /* support_usage_threshold */);
|
||||
|
||||
_old_pool = new PSGenerationPool(_old_gen,
|
||||
_old_pool = new PSOldGenerationPool(_old_gen,
|
||||
"PS Old Gen",
|
||||
true /* support_usage_threshold */);
|
||||
|
||||
|
||||
@@ -69,8 +69,8 @@ class ReservedSpace;
|
||||
class ParallelScavengeHeap : public CollectedHeap {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
static PSYoungGen* _young_gen;
|
||||
static PSOldGen* _old_gen;
|
||||
PSYoungGen* _young_gen;
|
||||
PSOldGen* _old_gen;
|
||||
|
||||
// Sizing policy for entire heap
|
||||
static PSAdaptiveSizePolicy* _size_policy;
|
||||
@@ -160,8 +160,8 @@ public:
|
||||
GrowableArray<GCMemoryManager*> memory_managers() override;
|
||||
GrowableArray<MemoryPool*> memory_pools() override;
|
||||
|
||||
static PSYoungGen* young_gen() { return _young_gen; }
|
||||
static PSOldGen* old_gen() { return _old_gen; }
|
||||
PSYoungGen* young_gen() const { return _young_gen; }
|
||||
PSOldGen* old_gen() const { return _old_gen; }
|
||||
|
||||
PSAdaptiveSizePolicy* size_policy() { return _size_policy; }
|
||||
|
||||
|
||||
@@ -33,10 +33,7 @@
|
||||
constraint) \
|
||||
product(bool, UseMaximumCompactionOnSystemGC, true, \
|
||||
"Use maximum compaction in the Parallel Old garbage collector " \
|
||||
"for a system GC") \
|
||||
\
|
||||
product(bool, PSChunkLargeArrays, true, \
|
||||
"(Deprecated) Process large arrays in chunks")
|
||||
"for a system GC")
|
||||
|
||||
// end of GC_PARALLEL_FLAGS
|
||||
|
||||
|
||||
@@ -24,14 +24,14 @@
|
||||
|
||||
#include "gc/parallel/psMemoryPool.hpp"
|
||||
|
||||
PSGenerationPool::PSGenerationPool(PSOldGen* old_gen,
|
||||
PSOldGenerationPool::PSOldGenerationPool(PSOldGen* old_gen,
|
||||
const char* name,
|
||||
bool support_usage_threshold) :
|
||||
CollectedMemoryPool(name, old_gen->capacity_in_bytes(),
|
||||
old_gen->reserved().byte_size(), support_usage_threshold), _old_gen(old_gen) {
|
||||
}
|
||||
|
||||
MemoryUsage PSGenerationPool::get_memory_usage() {
|
||||
MemoryUsage PSOldGenerationPool::get_memory_usage() {
|
||||
size_t maxSize = (available_for_allocation() ? max_size() : 0);
|
||||
size_t used = used_in_bytes();
|
||||
size_t committed = _old_gen->capacity_in_bytes();
|
||||
@@ -39,13 +39,13 @@ MemoryUsage PSGenerationPool::get_memory_usage() {
|
||||
return MemoryUsage(initial_size(), used, committed, maxSize);
|
||||
}
|
||||
|
||||
// The max size of EdenMutableSpacePool =
|
||||
// The max size of PSEdenSpacePool =
|
||||
// max size of the PSYoungGen - capacity of two survivor spaces
|
||||
//
|
||||
// Max size of PS eden space is changing due to ergonomic.
|
||||
// PSYoungGen, PSOldGen, Eden, Survivor spaces are all resizable.
|
||||
//
|
||||
EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* young_gen,
|
||||
PSEdenSpacePool::PSEdenSpacePool(PSYoungGen* young_gen,
|
||||
MutableSpace* space,
|
||||
const char* name,
|
||||
bool support_usage_threshold) :
|
||||
@@ -58,7 +58,7 @@ EdenMutableSpacePool::EdenMutableSpacePool(PSYoungGen* young_gen,
|
||||
_space(space) {
|
||||
}
|
||||
|
||||
MemoryUsage EdenMutableSpacePool::get_memory_usage() {
|
||||
MemoryUsage PSEdenSpacePool::get_memory_usage() {
|
||||
size_t maxSize = (available_for_allocation() ? max_size() : 0);
|
||||
size_t used = used_in_bytes();
|
||||
size_t committed = _space->capacity_in_bytes();
|
||||
@@ -66,12 +66,12 @@ MemoryUsage EdenMutableSpacePool::get_memory_usage() {
|
||||
return MemoryUsage(initial_size(), used, committed, maxSize);
|
||||
}
|
||||
|
||||
// The max size of SurvivorMutableSpacePool =
|
||||
// The max size of PSSurvivorSpacePool =
|
||||
// current capacity of the from-space
|
||||
//
|
||||
// PS from and to survivor spaces could have different sizes.
|
||||
//
|
||||
SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* young_gen,
|
||||
PSSurvivorSpacePool::PSSurvivorSpacePool(PSYoungGen* young_gen,
|
||||
const char* name,
|
||||
bool support_usage_threshold) :
|
||||
CollectedMemoryPool(name, young_gen->from_space()->capacity_in_bytes(),
|
||||
@@ -79,7 +79,7 @@ SurvivorMutableSpacePool::SurvivorMutableSpacePool(PSYoungGen* young_gen,
|
||||
support_usage_threshold), _young_gen(young_gen) {
|
||||
}
|
||||
|
||||
MemoryUsage SurvivorMutableSpacePool::get_memory_usage() {
|
||||
MemoryUsage PSSurvivorSpacePool::get_memory_usage() {
|
||||
size_t maxSize = (available_for_allocation() ? max_size() : 0);
|
||||
size_t used = used_in_bytes();
|
||||
size_t committed = committed_in_bytes();
|
||||
|
||||
@@ -31,25 +31,25 @@
|
||||
#include "services/memoryPool.hpp"
|
||||
#include "services/memoryUsage.hpp"
|
||||
|
||||
class PSGenerationPool : public CollectedMemoryPool {
|
||||
class PSOldGenerationPool : public CollectedMemoryPool {
|
||||
private:
|
||||
PSOldGen* _old_gen;
|
||||
|
||||
public:
|
||||
PSGenerationPool(PSOldGen* pool, const char* name, bool support_usage_threshold);
|
||||
PSOldGenerationPool(PSOldGen* pool, const char* name, bool support_usage_threshold);
|
||||
|
||||
MemoryUsage get_memory_usage();
|
||||
size_t used_in_bytes() { return _old_gen->used_in_bytes(); }
|
||||
size_t max_size() const { return _old_gen->reserved().byte_size(); }
|
||||
};
|
||||
|
||||
class EdenMutableSpacePool : public CollectedMemoryPool {
|
||||
class PSEdenSpacePool : public CollectedMemoryPool {
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
MutableSpace* _space;
|
||||
|
||||
public:
|
||||
EdenMutableSpacePool(PSYoungGen* young_gen,
|
||||
PSEdenSpacePool(PSYoungGen* young_gen,
|
||||
MutableSpace* space,
|
||||
const char* name,
|
||||
bool support_usage_threshold);
|
||||
@@ -65,12 +65,12 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class SurvivorMutableSpacePool : public CollectedMemoryPool {
|
||||
class PSSurvivorSpacePool : public CollectedMemoryPool {
|
||||
private:
|
||||
PSYoungGen* _young_gen;
|
||||
|
||||
public:
|
||||
SurvivorMutableSpacePool(PSYoungGen* young_gen,
|
||||
PSSurvivorSpacePool(PSYoungGen* young_gen,
|
||||
const char* name,
|
||||
bool support_usage_threshold);
|
||||
|
||||
|
||||
@@ -299,8 +299,7 @@ inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
|
||||
// _min_array_size_for_chunking, and most of them will be arrays.
|
||||
// So, the objArray test would be very infrequent.
|
||||
if (new_obj_size > _min_array_size_for_chunking &&
|
||||
klass->is_objArray_klass() &&
|
||||
PSChunkLargeArrays) {
|
||||
klass->is_objArray_klass()) {
|
||||
push_objArray(o, new_obj);
|
||||
} else {
|
||||
// we'll just push its contents
|
||||
@@ -344,7 +343,6 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
|
||||
inline void PSPromotionManager::process_popped_location_depth(ScannerTask task,
|
||||
bool stolen) {
|
||||
if (task.is_partial_array_state()) {
|
||||
assert(PSChunkLargeArrays, "invariant");
|
||||
process_array_chunk(task.to_partial_array_state(), stolen);
|
||||
} else {
|
||||
if (task.is_narrow_oop_ptr()) {
|
||||
|
||||
@@ -115,7 +115,7 @@ class PSScavenge: AllStatic {
|
||||
}
|
||||
|
||||
static bool is_obj_in_to_space(oop o) {
|
||||
return ParallelScavengeHeap::young_gen()->to_space()->contains(o);
|
||||
return ParallelScavengeHeap::heap()->young_gen()->to_space()->contains(o);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -64,8 +64,8 @@
|
||||
nonstatic_field(PSOldGen, _max_gen_size, const size_t) \
|
||||
\
|
||||
\
|
||||
static_field(ParallelScavengeHeap, _young_gen, PSYoungGen*) \
|
||||
static_field(ParallelScavengeHeap, _old_gen, PSOldGen*) \
|
||||
nonstatic_field(ParallelScavengeHeap, _young_gen, PSYoungGen*) \
|
||||
nonstatic_field(ParallelScavengeHeap, _old_gen, PSOldGen*) \
|
||||
\
|
||||
|
||||
#define VM_TYPES_PARALLELGC(declare_type, \
|
||||
|
||||
@@ -91,14 +91,16 @@ SerialHeap::SerialHeap() :
|
||||
CollectedHeap(),
|
||||
_young_gen(nullptr),
|
||||
_old_gen(nullptr),
|
||||
_young_gen_saved_top(nullptr),
|
||||
_old_gen_saved_top(nullptr),
|
||||
_rem_set(nullptr),
|
||||
_gc_policy_counters(new GCPolicyCounters("Copy:MSC", 2, 2)),
|
||||
_young_manager(nullptr),
|
||||
_old_manager(nullptr),
|
||||
_is_heap_almost_full(false),
|
||||
_eden_pool(nullptr),
|
||||
_survivor_pool(nullptr),
|
||||
_old_pool(nullptr) {
|
||||
_old_pool(nullptr),
|
||||
_is_heap_almost_full(false) {
|
||||
_young_manager = new GCMemoryManager("Copy");
|
||||
_old_manager = new GCMemoryManager("MarkSweepCompact");
|
||||
GCLocker::initialize();
|
||||
@@ -630,6 +632,14 @@ bool SerialHeap::requires_barriers(stackChunkOop obj) const {
|
||||
|
||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||
bool SerialHeap::is_in(const void* p) const {
|
||||
// precondition
|
||||
verify_not_in_native_if_java_thread();
|
||||
|
||||
if (!is_in_reserved(p)) {
|
||||
// If it's not even in reserved.
|
||||
return false;
|
||||
}
|
||||
|
||||
return _young_gen->is_in(p) || _old_gen->is_in(p);
|
||||
}
|
||||
|
||||
@@ -797,3 +807,12 @@ void SerialHeap::gc_epilogue(bool full) {
|
||||
|
||||
MetaspaceCounters::update_performance_counters();
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
void SerialHeap::verify_not_in_native_if_java_thread() {
|
||||
if (Thread::current()->is_Java_thread()) {
|
||||
JavaThread* thread = JavaThread::current();
|
||||
assert(thread->thread_state() != _thread_in_native, "precondition");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -76,6 +76,8 @@ class SerialHeap : public CollectedHeap {
|
||||
private:
|
||||
DefNewGeneration* _young_gen;
|
||||
TenuredGeneration* _old_gen;
|
||||
|
||||
// Used during young-gc
|
||||
HeapWord* _young_gen_saved_top;
|
||||
HeapWord* _old_gen_saved_top;
|
||||
|
||||
@@ -94,6 +96,10 @@ private:
|
||||
GCMemoryManager* _young_manager;
|
||||
GCMemoryManager* _old_manager;
|
||||
|
||||
MemoryPool* _eden_pool;
|
||||
MemoryPool* _survivor_pool;
|
||||
MemoryPool* _old_pool;
|
||||
|
||||
// Indicate whether heap is almost or approaching full.
|
||||
// Usually, there is some memory headroom for application/gc to run properly.
|
||||
// However, in extreme cases, e.g. young-gen is non-empty after a full gc, we
|
||||
@@ -111,6 +117,21 @@ private:
|
||||
void print_tracing_info() const override;
|
||||
void stop() override {};
|
||||
|
||||
static void verify_not_in_native_if_java_thread() NOT_DEBUG_RETURN;
|
||||
|
||||
// Try to allocate space by expanding the heap.
|
||||
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
|
||||
|
||||
HeapWord* mem_allocate_cas_noexpand(size_t size, bool is_tlab);
|
||||
HeapWord* mem_allocate_work(size_t size, bool is_tlab);
|
||||
|
||||
void initialize_serviceability() override;
|
||||
|
||||
// Set the saved marks of generations, if that makes sense.
|
||||
// In particular, if any generation might iterate over the oops
|
||||
// in other generations, it should call this method.
|
||||
void save_marks();
|
||||
|
||||
public:
|
||||
// Returns JNI_OK on success
|
||||
jint initialize() override;
|
||||
@@ -209,26 +230,6 @@ public:
|
||||
// generations in a fully generational heap.
|
||||
CardTableRS* rem_set() { return _rem_set; }
|
||||
|
||||
public:
|
||||
// Set the saved marks of generations, if that makes sense.
|
||||
// In particular, if any generation might iterate over the oops
|
||||
// in other generations, it should call this method.
|
||||
void save_marks();
|
||||
|
||||
private:
|
||||
// Try to allocate space by expanding the heap.
|
||||
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
|
||||
|
||||
HeapWord* mem_allocate_cas_noexpand(size_t size, bool is_tlab);
|
||||
HeapWord* mem_allocate_work(size_t size, bool is_tlab);
|
||||
|
||||
MemoryPool* _eden_pool;
|
||||
MemoryPool* _survivor_pool;
|
||||
MemoryPool* _old_pool;
|
||||
|
||||
void initialize_serviceability() override;
|
||||
|
||||
public:
|
||||
static SerialHeap* heap();
|
||||
|
||||
SerialHeap();
|
||||
|
||||
@@ -281,7 +281,7 @@ public:
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
static OopCopyResult oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length);
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline bool BarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
inline OopCopyResult BarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length) {
|
||||
T* src = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
|
||||
@@ -42,7 +42,8 @@ inline bool BarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in
|
||||
|
||||
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
|
||||
// Covariant, copy without checks
|
||||
return Raw::oop_arraycopy(nullptr, 0, src, nullptr, 0, dst, length);
|
||||
Raw::oop_arraycopy(nullptr, 0, src, nullptr, 0, dst, length);
|
||||
return OopCopyResult::ok;
|
||||
}
|
||||
|
||||
// Copy each element with checking casts
|
||||
@@ -50,12 +51,12 @@ inline bool BarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_in
|
||||
for (const T* const end = src + length; src < end; src++, dst++) {
|
||||
const T elem = *src;
|
||||
if (!oopDesc::is_instanceof_or_null(CompressedOops::decode(elem), dst_klass)) {
|
||||
return false;
|
||||
return OopCopyResult::failed_check_class_cast;
|
||||
}
|
||||
*dst = elem;
|
||||
}
|
||||
|
||||
return true;
|
||||
return OopCopyResult::ok;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_SHARED_BARRIERSET_INLINE_HPP
|
||||
|
||||
@@ -104,7 +104,7 @@ public:
|
||||
static oop oop_atomic_xchg_in_heap(T* addr, oop new_value);
|
||||
|
||||
template <typename T>
|
||||
static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
static OopCopyResult oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length);
|
||||
|
||||
|
||||
@@ -99,7 +99,7 @@ oop_atomic_xchg_in_heap(T* addr, oop new_value) {
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
template <typename T>
|
||||
inline bool CardTableBarrierSet::AccessBarrier<decorators, BarrierSetT>::
|
||||
inline OopCopyResult CardTableBarrierSet::AccessBarrier<decorators, BarrierSetT>::
|
||||
oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
|
||||
size_t length) {
|
||||
@@ -131,12 +131,13 @@ oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
|
||||
// objArrayOop) which we assume is 32 bit.
|
||||
assert(pd == (size_t)(int)pd, "length field overflow");
|
||||
bs->write_ref_array((HeapWord*)dst_raw, pd);
|
||||
return false;
|
||||
return OopCopyResult::failed_check_class_cast;
|
||||
}
|
||||
}
|
||||
bs->write_ref_array((HeapWord*)dst_raw, length);
|
||||
}
|
||||
return true;
|
||||
|
||||
return OopCopyResult::ok;
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators, typename BarrierSetT>
|
||||
|
||||
@@ -1,111 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
|
||||
PtrQueue::PtrQueue(PtrQueueSet* qset) :
|
||||
_index(0),
|
||||
_buf(nullptr)
|
||||
{}
|
||||
|
||||
PtrQueue::~PtrQueue() {
|
||||
assert(_buf == nullptr, "queue must be flushed before delete");
|
||||
}
|
||||
|
||||
size_t PtrQueue::current_capacity() const {
|
||||
if (_buf == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return BufferNode::make_node_from_buffer(_buf)->capacity();
|
||||
}
|
||||
}
|
||||
|
||||
PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
|
||||
_allocator(allocator)
|
||||
{}
|
||||
|
||||
PtrQueueSet::~PtrQueueSet() {}
|
||||
|
||||
void PtrQueueSet::reset_queue(PtrQueue& queue) {
|
||||
queue.set_index(queue.current_capacity());
|
||||
}
|
||||
|
||||
void PtrQueueSet::flush_queue(PtrQueue& queue) {
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
size_t index = queue.index();
|
||||
queue.set_buffer(nullptr);
|
||||
queue.set_index(0);
|
||||
BufferNode* node = BufferNode::make_node_from_buffer(buffer, index);
|
||||
if (index == node->capacity()) {
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
enqueue_completed_buffer(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool PtrQueueSet::try_enqueue(PtrQueue& queue, void* value) {
|
||||
size_t index = queue.index();
|
||||
if (index == 0) return false;
|
||||
void** buffer = queue.buffer();
|
||||
assert(buffer != nullptr, "no buffer but non-zero index");
|
||||
buffer[--index] = value;
|
||||
queue.set_index(index);
|
||||
return true;
|
||||
}
|
||||
|
||||
void PtrQueueSet::retry_enqueue(PtrQueue& queue, void* value) {
|
||||
assert(queue.index() != 0, "precondition");
|
||||
assert(queue.buffer() != nullptr, "precondition");
|
||||
size_t index = queue.index();
|
||||
queue.buffer()[--index] = value;
|
||||
queue.set_index(index);
|
||||
}
|
||||
|
||||
BufferNode* PtrQueueSet::exchange_buffer_with_new(PtrQueue& queue) {
|
||||
BufferNode* node = nullptr;
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
node = BufferNode::make_node_from_buffer(buffer, queue.index());
|
||||
}
|
||||
install_new_buffer(queue);
|
||||
return node;
|
||||
}
|
||||
|
||||
void PtrQueueSet::install_new_buffer(PtrQueue& queue) {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
queue.set_buffer(BufferNode::make_buffer_from_node(node));
|
||||
queue.set_index(node->capacity());
|
||||
}
|
||||
|
||||
void** PtrQueueSet::allocate_buffer() {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
return BufferNode::make_buffer_from_node(node);
|
||||
}
|
||||
|
||||
void PtrQueueSet::deallocate_buffer(BufferNode* node) {
|
||||
_allocator->release(node);
|
||||
}
|
||||
@@ -1,168 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
|
||||
#define SHARE_GC_SHARED_PTRQUEUE_HPP
|
||||
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
|
||||
// There are various techniques that require threads to be able to log
|
||||
// addresses. For example, a generational write barrier might log
|
||||
// the addresses of modified old-generation objects. This type supports
|
||||
// this operation.
|
||||
|
||||
class PtrQueueSet;
|
||||
class PtrQueue {
|
||||
friend class VMStructs;
|
||||
|
||||
NONCOPYABLE(PtrQueue);
|
||||
|
||||
// The (byte) index at which an object was last enqueued. Starts at
|
||||
// capacity (in bytes) (indicating an empty buffer) and goes towards zero.
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _index;
|
||||
|
||||
static const size_t _element_size = sizeof(void*);
|
||||
|
||||
static size_t byte_index_to_index(size_t ind) {
|
||||
assert(is_aligned(ind, _element_size), "precondition");
|
||||
return ind / _element_size;
|
||||
}
|
||||
|
||||
static size_t index_to_byte_index(size_t ind) {
|
||||
return ind * _element_size;
|
||||
}
|
||||
|
||||
protected:
|
||||
// The buffer.
|
||||
void** _buf;
|
||||
|
||||
// Initialize this queue to contain a null buffer, and be part of the
|
||||
// given PtrQueueSet.
|
||||
PtrQueue(PtrQueueSet* qset);
|
||||
|
||||
// Requires queue flushed.
|
||||
~PtrQueue();
|
||||
|
||||
public:
|
||||
|
||||
void** buffer() const { return _buf; }
|
||||
void set_buffer(void** buffer) { _buf = buffer; }
|
||||
|
||||
size_t index() const {
|
||||
return byte_index_to_index(_index);
|
||||
}
|
||||
|
||||
void set_index(size_t new_index) {
|
||||
assert(new_index <= current_capacity(), "precondition");
|
||||
_index = index_to_byte_index(new_index);
|
||||
}
|
||||
|
||||
// Returns the capacity of the buffer, or 0 if the queue doesn't currently
|
||||
// have a buffer.
|
||||
size_t current_capacity() const;
|
||||
|
||||
bool is_empty() const { return index() == current_capacity(); }
|
||||
size_t size() const { return current_capacity() - index(); }
|
||||
|
||||
protected:
|
||||
// To support compiler.
|
||||
template<typename Derived>
|
||||
static ByteSize byte_offset_of_index() {
|
||||
return byte_offset_of(Derived, _index);
|
||||
}
|
||||
|
||||
static constexpr ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
|
||||
|
||||
template<typename Derived>
|
||||
static ByteSize byte_offset_of_buf() {
|
||||
return byte_offset_of(Derived, _buf);
|
||||
}
|
||||
|
||||
static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
|
||||
};
|
||||
|
||||
// A PtrQueueSet represents resources common to a set of pointer queues.
|
||||
// In particular, the individual queues allocate buffers from this shared
|
||||
// set, and return completed buffers to the set.
|
||||
class PtrQueueSet {
|
||||
BufferNode::Allocator* _allocator;
|
||||
|
||||
NONCOPYABLE(PtrQueueSet);
|
||||
|
||||
protected:
|
||||
// Create an empty ptr queue set.
|
||||
PtrQueueSet(BufferNode::Allocator* allocator);
|
||||
~PtrQueueSet();
|
||||
|
||||
// Discard any buffered enqueued data.
|
||||
void reset_queue(PtrQueue& queue);
|
||||
|
||||
// If queue has any buffered enqueued data, transfer it to this qset.
|
||||
// Otherwise, deallocate queue's buffer.
|
||||
void flush_queue(PtrQueue& queue);
|
||||
|
||||
// Add value to queue's buffer, returning true. If buffer is full
|
||||
// or if queue doesn't have a buffer, does nothing and returns false.
|
||||
bool try_enqueue(PtrQueue& queue, void* value);
|
||||
|
||||
// Add value to queue's buffer. The queue must have a non-full buffer.
|
||||
// Used after an initial try_enqueue has failed and the situation resolved.
|
||||
void retry_enqueue(PtrQueue& queue, void* value);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
// Returns the old buffer, or null if queue didn't have a buffer.
|
||||
BufferNode* exchange_buffer_with_new(PtrQueue& queue);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
void install_new_buffer(PtrQueue& queue);
|
||||
|
||||
public:
|
||||
|
||||
// Return the associated BufferNode allocator.
|
||||
BufferNode::Allocator* allocator() const { return _allocator; }
|
||||
|
||||
// Return the buffer for a BufferNode of size buffer_capacity().
|
||||
void** allocate_buffer();
|
||||
|
||||
// Return an empty buffer to the free list. The node is required
|
||||
// to have been allocated with a size of buffer_capacity().
|
||||
void deallocate_buffer(BufferNode* node);
|
||||
|
||||
// A completed buffer is a buffer the mutator is finished with, and
|
||||
// is ready to be processed by the collector. It need not be full.
|
||||
|
||||
// Adds node to the completed buffer list.
|
||||
virtual void enqueue_completed_buffer(BufferNode* node) = 0;
|
||||
|
||||
size_t buffer_capacity() const {
|
||||
return _allocator->buffer_capacity();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_PTRQUEUE_HPP
|
||||
@@ -36,14 +36,19 @@
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) :
|
||||
PtrQueue(qset),
|
||||
_buf(nullptr),
|
||||
_index(0),
|
||||
// SATB queues are only active during marking cycles. We create them
|
||||
// with their active field set to false. If a thread is created
|
||||
// during a cycle, it's SATB queue needs to be activated before the
|
||||
// thread starts running. This is handled by the collector-specific
|
||||
// BarrierSet thread attachment protocol.
|
||||
_active(false)
|
||||
{ }
|
||||
{}
|
||||
|
||||
SATBMarkQueue::~SATBMarkQueue() {
|
||||
assert(_buf == nullptr, "queue must be flushed before delete");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Helpful for debugging
|
||||
@@ -64,7 +69,7 @@ void SATBMarkQueue::print(const char* name) {
|
||||
#endif // PRODUCT
|
||||
|
||||
SATBMarkQueueSet::SATBMarkQueueSet(BufferNode::Allocator* allocator) :
|
||||
PtrQueueSet(allocator),
|
||||
_allocator(allocator),
|
||||
_list(),
|
||||
_count_and_process_flag(0),
|
||||
_process_completed_buffers_threshold(SIZE_MAX),
|
||||
@@ -214,13 +219,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
|
||||
}
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::flush_queue(SATBMarkQueue& queue) {
|
||||
// Filter now to possibly save work later. If filtering empties the
|
||||
// buffer then flush_queue can deallocate the buffer.
|
||||
filter(queue);
|
||||
PtrQueueSet::flush_queue(queue);
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::enqueue_known_active(SATBMarkQueue& queue, oop obj) {
|
||||
assert(queue.is_active(), "precondition");
|
||||
void* value = cast_from_oop<void*>(obj);
|
||||
@@ -355,3 +353,76 @@ void SATBMarkQueueSet::abandon_partial_marking() {
|
||||
} closure(*this);
|
||||
Threads::threads_do(&closure);
|
||||
}
|
||||
|
||||
size_t SATBMarkQueue::current_capacity() const {
|
||||
if (_buf == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return BufferNode::make_node_from_buffer(_buf)->capacity();
|
||||
}
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::reset_queue(SATBMarkQueue& queue) {
|
||||
queue.set_index(queue.current_capacity());
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::flush_queue(SATBMarkQueue& queue) {
|
||||
// Filter now to possibly save work later. If filtering empties the
|
||||
// buffer then flush_queue can deallocate the buffer.
|
||||
filter(queue);
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
size_t index = queue.index();
|
||||
queue.set_buffer(nullptr);
|
||||
queue.set_index(0);
|
||||
BufferNode* node = BufferNode::make_node_from_buffer(buffer, index);
|
||||
if (index == node->capacity()) {
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
enqueue_completed_buffer(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool SATBMarkQueueSet::try_enqueue(SATBMarkQueue& queue, void* value) {
|
||||
size_t index = queue.index();
|
||||
if (index == 0) return false;
|
||||
void** buffer = queue.buffer();
|
||||
assert(buffer != nullptr, "no buffer but non-zero index");
|
||||
buffer[--index] = value;
|
||||
queue.set_index(index);
|
||||
return true;
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::retry_enqueue(SATBMarkQueue& queue, void* value) {
|
||||
assert(queue.index() != 0, "precondition");
|
||||
assert(queue.buffer() != nullptr, "precondition");
|
||||
size_t index = queue.index();
|
||||
queue.buffer()[--index] = value;
|
||||
queue.set_index(index);
|
||||
}
|
||||
|
||||
BufferNode* SATBMarkQueueSet::exchange_buffer_with_new(SATBMarkQueue& queue) {
|
||||
BufferNode* node = nullptr;
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
node = BufferNode::make_node_from_buffer(buffer, queue.index());
|
||||
}
|
||||
install_new_buffer(queue);
|
||||
return node;
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::install_new_buffer(SATBMarkQueue& queue) {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
queue.set_buffer(BufferNode::make_buffer_from_node(node));
|
||||
queue.set_index(node->capacity());
|
||||
}
|
||||
|
||||
void** SATBMarkQueueSet::allocate_buffer() {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
return BufferNode::make_buffer_from_node(node);
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::deallocate_buffer(BufferNode* node) {
|
||||
_allocator->release(node);
|
||||
}
|
||||
|
||||
@@ -25,11 +25,15 @@
|
||||
#ifndef SHARE_GC_SHARED_SATBMARKQUEUE_HPP
|
||||
#define SHARE_GC_SHARED_SATBMARKQUEUE_HPP
|
||||
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
|
||||
class Thread;
|
||||
class Monitor;
|
||||
@@ -45,12 +49,33 @@ public:
|
||||
virtual void do_buffer(void** buffer, size_t size) = 0;
|
||||
};
|
||||
|
||||
// A PtrQueue whose elements are (possibly stale) pointers to object heads.
|
||||
class SATBMarkQueue: public PtrQueue {
|
||||
// A queue whose elements are (possibly stale) pointers to object heads.
|
||||
class SATBMarkQueue {
|
||||
friend class VMStructs;
|
||||
friend class SATBMarkQueueSet;
|
||||
|
||||
private:
|
||||
NONCOPYABLE(SATBMarkQueue);
|
||||
|
||||
// The buffer.
|
||||
void** _buf;
|
||||
|
||||
// The (byte) index at which an object was last enqueued. Starts at
|
||||
// capacity (in bytes) (indicating an empty buffer) and goes towards zero.
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _index;
|
||||
|
||||
static const size_t _element_size = sizeof(void*);
|
||||
|
||||
static size_t byte_index_to_index(size_t ind) {
|
||||
assert(is_aligned(ind, _element_size), "precondition");
|
||||
return ind / _element_size;
|
||||
}
|
||||
|
||||
static size_t index_to_byte_index(size_t ind) {
|
||||
return ind * _element_size;
|
||||
}
|
||||
|
||||
// Per-queue (so thread-local) cache of the SATBMarkQueueSet's
|
||||
// active state, to support inline barriers in compiled code.
|
||||
bool _active;
|
||||
@@ -58,6 +83,29 @@ private:
|
||||
public:
|
||||
SATBMarkQueue(SATBMarkQueueSet* qset);
|
||||
|
||||
// Queue must be flushed
|
||||
~SATBMarkQueue();
|
||||
|
||||
void** buffer() const { return _buf; }
|
||||
|
||||
void set_buffer(void** buffer) { _buf = buffer; }
|
||||
|
||||
size_t index() const {
|
||||
return byte_index_to_index(_index);
|
||||
}
|
||||
|
||||
void set_index(size_t new_index) {
|
||||
assert(new_index <= current_capacity(), "precondition");
|
||||
_index = index_to_byte_index(new_index);
|
||||
}
|
||||
|
||||
// Returns the capacity of the buffer, or 0 if the queue doesn't currently
|
||||
// have a buffer.
|
||||
size_t current_capacity() const;
|
||||
|
||||
bool is_empty() const { return index() == current_capacity(); }
|
||||
size_t size() const { return current_capacity() - index(); }
|
||||
|
||||
bool is_active() const { return _active; }
|
||||
void set_active(bool value) { _active = value; }
|
||||
|
||||
@@ -68,14 +116,16 @@ public:
|
||||
|
||||
// Compiler support.
|
||||
static ByteSize byte_offset_of_index() {
|
||||
return PtrQueue::byte_offset_of_index<SATBMarkQueue>();
|
||||
return byte_offset_of(SATBMarkQueue, _index);
|
||||
}
|
||||
using PtrQueue::byte_width_of_index;
|
||||
|
||||
static constexpr ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
|
||||
|
||||
static ByteSize byte_offset_of_buf() {
|
||||
return PtrQueue::byte_offset_of_buf<SATBMarkQueue>();
|
||||
return byte_offset_of(SATBMarkQueue, _buf);
|
||||
}
|
||||
using PtrQueue::byte_width_of_buf;
|
||||
|
||||
static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
|
||||
|
||||
static ByteSize byte_offset_of_active() {
|
||||
return byte_offset_of(SATBMarkQueue, _active);
|
||||
@@ -84,7 +134,18 @@ public:
|
||||
static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
|
||||
};
|
||||
|
||||
class SATBMarkQueueSet: public PtrQueueSet {
|
||||
|
||||
// A SATBMarkQueueSet represents resources common to a set of SATBMarkQueues.
|
||||
// In particular, the individual queues allocate buffers from this shared
|
||||
// set, and return completed buffers to the set.
|
||||
// A completed buffer is a buffer the mutator is finished with, and
|
||||
// is ready to be processed by the collector. It need not be full.
|
||||
|
||||
class SATBMarkQueueSet {
|
||||
|
||||
BufferNode::Allocator* _allocator;
|
||||
|
||||
NONCOPYABLE(SATBMarkQueueSet);
|
||||
|
||||
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, 0);
|
||||
PaddedEnd<BufferNode::Stack> _list;
|
||||
@@ -99,6 +160,24 @@ class SATBMarkQueueSet: public PtrQueueSet {
|
||||
BufferNode* get_completed_buffer();
|
||||
void abandon_completed_buffers();
|
||||
|
||||
// Discard any buffered enqueued data.
|
||||
void reset_queue(SATBMarkQueue& queue);
|
||||
|
||||
// Add value to queue's buffer, returning true. If buffer is full
|
||||
// or if queue doesn't have a buffer, does nothing and returns false.
|
||||
bool try_enqueue(SATBMarkQueue& queue, void* value);
|
||||
|
||||
// Add value to queue's buffer. The queue must have a non-full buffer.
|
||||
// Used after an initial try_enqueue has failed and the situation resolved.
|
||||
void retry_enqueue(SATBMarkQueue& queue, void* value);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
// Returns the old buffer, or null if queue didn't have a buffer.
|
||||
BufferNode* exchange_buffer_with_new(SATBMarkQueue& queue);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
void install_new_buffer(SATBMarkQueue& queue);
|
||||
|
||||
#ifdef ASSERT
|
||||
void dump_active_states(bool expected_active);
|
||||
void verify_active_states(bool expected_active);
|
||||
@@ -106,6 +185,7 @@ class SATBMarkQueueSet: public PtrQueueSet {
|
||||
|
||||
protected:
|
||||
SATBMarkQueueSet(BufferNode::Allocator* allocator);
|
||||
|
||||
~SATBMarkQueueSet();
|
||||
|
||||
void handle_zero_index(SATBMarkQueue& queue);
|
||||
@@ -131,6 +211,7 @@ public:
|
||||
void set_process_completed_buffers_threshold(size_t value);
|
||||
|
||||
size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
|
||||
|
||||
void set_buffer_enqueue_threshold_percentage(uint value);
|
||||
|
||||
// If there exists some completed buffer, pop and process it, and
|
||||
@@ -144,7 +225,7 @@ public:
|
||||
// Add obj to queue. This qset and the queue must be active.
|
||||
void enqueue_known_active(SATBMarkQueue& queue, oop obj);
|
||||
virtual void filter(SATBMarkQueue& queue) = 0;
|
||||
virtual void enqueue_completed_buffer(BufferNode* node);
|
||||
void enqueue_completed_buffer(BufferNode* node);
|
||||
|
||||
// The number of buffers in the list. Racy and not updated atomically
|
||||
// with the set of completed buffers.
|
||||
@@ -157,6 +238,20 @@ public:
|
||||
return (_count_and_process_flag.load_relaxed() & 1) != 0;
|
||||
}
|
||||
|
||||
// Return the associated BufferNode allocator.
|
||||
BufferNode::Allocator* allocator() const { return _allocator; }
|
||||
|
||||
// Return the buffer for a BufferNode of size buffer_capacity().
|
||||
void** allocate_buffer();
|
||||
|
||||
// Return an empty buffer to the free list. The node is required
|
||||
// to have been allocated with a size of buffer_capacity().
|
||||
void deallocate_buffer(BufferNode* node);
|
||||
|
||||
size_t buffer_capacity() const {
|
||||
return _allocator->buffer_capacity();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Helpful for debugging
|
||||
void print_all(const char* msg);
|
||||
|
||||
@@ -36,7 +36,6 @@
|
||||
#include "memory/iterator.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/cpuTimeCounters.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@@ -46,17 +45,17 @@
|
||||
|
||||
OopStorage* StringDedup::Processor::_storages[2] = {};
|
||||
|
||||
StringDedup::StorageUse* volatile StringDedup::Processor::_storage_for_requests = nullptr;
|
||||
Atomic<StringDedup::StorageUse*> StringDedup::Processor::_storage_for_requests{};
|
||||
StringDedup::StorageUse* StringDedup::Processor::_storage_for_processing = nullptr;
|
||||
|
||||
void StringDedup::Processor::initialize_storage() {
|
||||
assert(_storages[0] == nullptr, "storage already created");
|
||||
assert(_storages[1] == nullptr, "storage already created");
|
||||
assert(_storage_for_requests == nullptr, "storage already created");
|
||||
assert(_storage_for_requests.load_relaxed() == nullptr, "storage already created");
|
||||
assert(_storage_for_processing == nullptr, "storage already created");
|
||||
_storages[0] = OopStorageSet::create_weak("StringDedup Requests0 Weak", mtStringDedup);
|
||||
_storages[1] = OopStorageSet::create_weak("StringDedup Requests1 Weak", mtStringDedup);
|
||||
_storage_for_requests = new StorageUse(_storages[0]);
|
||||
_storage_for_requests.store_relaxed(new StorageUse(_storages[0]));
|
||||
_storage_for_processing = new StorageUse(_storages[1]);
|
||||
}
|
||||
|
||||
@@ -75,7 +74,7 @@ void StringDedup::Processor::wait_for_requests() const {
|
||||
{
|
||||
ThreadBlockInVM tbivm(_thread);
|
||||
MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
|
||||
OopStorage* storage = AtomicAccess::load(&_storage_for_requests)->storage();
|
||||
OopStorage* storage = _storage_for_requests.load_relaxed()->storage();
|
||||
while ((storage->allocation_count() == 0) &&
|
||||
!Table::is_dead_entry_removal_needed()) {
|
||||
ml.wait();
|
||||
@@ -83,7 +82,7 @@ void StringDedup::Processor::wait_for_requests() const {
|
||||
}
|
||||
// Swap the request and processing storage objects.
|
||||
log_trace(stringdedup)("swapping request storages");
|
||||
_storage_for_processing = AtomicAccess::xchg(&_storage_for_requests, _storage_for_processing);
|
||||
_storage_for_processing = _storage_for_requests.exchange(_storage_for_processing);
|
||||
GlobalCounter::write_synchronize();
|
||||
// Wait for the now current processing storage object to no longer be used
|
||||
// by an in-progress GC. Again here, the num-dead notification from the
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/stringdedup/stringDedup.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "services/cpuTimeUsage.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@@ -51,7 +52,7 @@ class StringDedup::Processor : public CHeapObj<mtGC> {
|
||||
NONCOPYABLE(Processor);
|
||||
|
||||
static OopStorage* _storages[2];
|
||||
static StorageUse* volatile _storage_for_requests;
|
||||
static Atomic<StorageUse*> _storage_for_requests;
|
||||
static StorageUse* _storage_for_processing;
|
||||
|
||||
JavaThread* _thread;
|
||||
|
||||
@@ -94,13 +94,17 @@ static double strdedup_elapsed_param_ms(Tickspan t) {
|
||||
void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_stat) {
|
||||
log_info(stringdedup)(
|
||||
"Concurrent String Deduplication "
|
||||
"%zu/" STRDEDUP_BYTES_FORMAT_NS " (new), "
|
||||
"%zu (inspected), "
|
||||
"%zu/" STRDEDUP_BYTES_FORMAT_NS " (new unknown), "
|
||||
"%zu/" STRDEDUP_BYTES_FORMAT_NS " (deduped), "
|
||||
"avg " STRDEDUP_PERCENT_FORMAT_NS ", "
|
||||
"total avg deduped/new unknown bytes " STRDEDUP_PERCENT_FORMAT_NS ", "
|
||||
STRDEDUP_BYTES_FORMAT_NS " (total deduped)," STRDEDUP_BYTES_FORMAT_NS " (total new unknown), "
|
||||
STRDEDUP_ELAPSED_FORMAT_MS " of " STRDEDUP_ELAPSED_FORMAT_MS,
|
||||
last_stat->_inspected,
|
||||
last_stat->_new, STRDEDUP_BYTES_PARAM(last_stat->_new_bytes),
|
||||
last_stat->_deduped, STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes),
|
||||
percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes),
|
||||
STRDEDUP_BYTES_PARAM(total_stat->_deduped_bytes), STRDEDUP_BYTES_PARAM(total_stat->_new_bytes),
|
||||
strdedup_elapsed_param_ms(last_stat->_process_elapsed),
|
||||
strdedup_elapsed_param_ms(last_stat->_active_elapsed));
|
||||
}
|
||||
@@ -213,7 +217,7 @@ void StringDedup::Stat::log_statistics() const {
|
||||
log_debug(stringdedup)(" Inspected: %12zu", _inspected);
|
||||
log_debug(stringdedup)(" Known: %12zu(%5.1f%%)", _known, known_percent);
|
||||
log_debug(stringdedup)(" Shared: %12zu(%5.1f%%)", _known_shared, known_shared_percent);
|
||||
log_debug(stringdedup)(" New: %12zu(%5.1f%%)" STRDEDUP_BYTES_FORMAT,
|
||||
log_debug(stringdedup)(" New unknown: %12zu(%5.1f%%)" STRDEDUP_BYTES_FORMAT,
|
||||
_new, new_percent, STRDEDUP_BYTES_PARAM(_new_bytes));
|
||||
log_debug(stringdedup)(" Replaced: %12zu(%5.1f%%)", _replaced, replaced_percent);
|
||||
log_debug(stringdedup)(" Deleted: %12zu(%5.1f%%)", _deleted, deleted_percent);
|
||||
|
||||
@@ -23,7 +23,6 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/stringdedup/stringDedupStorageUse.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
@@ -34,18 +33,18 @@ StringDedup::StorageUse::StorageUse(OopStorage* storage) :
|
||||
{}
|
||||
|
||||
bool StringDedup::StorageUse::is_used_acquire() const {
|
||||
return AtomicAccess::load_acquire(&_use_count) > 0;
|
||||
return _use_count.load_acquire() > 0;
|
||||
}
|
||||
|
||||
StringDedup::StorageUse*
|
||||
StringDedup::StorageUse::obtain(StorageUse* volatile* ptr) {
|
||||
StringDedup::StorageUse::obtain(Atomic<StorageUse*>* ptr) {
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
StorageUse* storage = AtomicAccess::load(ptr);
|
||||
AtomicAccess::inc(&storage->_use_count);
|
||||
StorageUse* storage = ptr->load_relaxed();
|
||||
storage->_use_count.add_then_fetch(1u);
|
||||
return storage;
|
||||
}
|
||||
|
||||
void StringDedup::StorageUse::relinquish() {
|
||||
size_t result = AtomicAccess::sub(&_use_count, size_t(1));
|
||||
assert(result != SIZE_MAX, "use count underflow");
|
||||
size_t result = _use_count.fetch_then_sub(1u);
|
||||
assert(result != 0, "use count underflow");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/stringdedup/stringDedup.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@@ -35,7 +36,7 @@ class OopStorage;
|
||||
// Manage access to one of the OopStorage objects used for requests.
|
||||
class StringDedup::StorageUse : public CHeapObj<mtStringDedup> {
|
||||
OopStorage* const _storage;
|
||||
volatile size_t _use_count;
|
||||
Atomic<size_t> _use_count;
|
||||
|
||||
NONCOPYABLE(StorageUse);
|
||||
|
||||
@@ -48,7 +49,7 @@ public:
|
||||
bool is_used_acquire() const;
|
||||
|
||||
// Get the current requests object, and increment its in-use count.
|
||||
static StorageUse* obtain(StorageUse* volatile* ptr);
|
||||
static StorageUse* obtain(Atomic<StorageUse*>* ptr);
|
||||
|
||||
// Discard a prior "obtain" request, decrementing the in-use count, and
|
||||
// permitting the deduplication thread to start processing if needed.
|
||||
|
||||
@@ -37,6 +37,7 @@
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
size_t ThreadLocalAllocBuffer::_max_size = 0;
|
||||
int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
|
||||
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
|
||||
|
||||
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
|
||||
@@ -224,6 +225,30 @@ void ThreadLocalAllocBuffer::startup_initialization() {
|
||||
// abort during VM initialization.
|
||||
_target_refills = MAX2(_target_refills, 2U);
|
||||
|
||||
#ifdef COMPILER2
|
||||
// If the C2 compiler is present, extra space is needed at the end of
|
||||
// TLABs, otherwise prefetching instructions generated by the C2
|
||||
// compiler will fault (due to accessing memory outside of heap).
|
||||
// The amount of space is the max of the number of lines to
|
||||
// prefetch for array and for instance allocations. (Extra space must be
|
||||
// reserved to accommodate both types of allocations.)
|
||||
//
|
||||
// Only SPARC-specific BIS instructions are known to fault. (Those
|
||||
// instructions are generated if AllocatePrefetchStyle==3 and
|
||||
// AllocatePrefetchInstr==1). To be on the safe side, however,
|
||||
// extra space is reserved for all combinations of
|
||||
// AllocatePrefetchStyle and AllocatePrefetchInstr.
|
||||
//
|
||||
// If the C2 compiler is not present, no space is reserved.
|
||||
|
||||
// +1 for rounding up to next cache line, +1 to be safe
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
|
||||
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
|
||||
(int)HeapWordSize;
|
||||
}
|
||||
#endif
|
||||
|
||||
// During jvm startup, the main thread is initialized
|
||||
// before the heap is initialized. So reinitialize it now.
|
||||
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
|
||||
@@ -429,7 +454,8 @@ void ThreadLocalAllocStats::publish() {
|
||||
}
|
||||
|
||||
size_t ThreadLocalAllocBuffer::end_reserve() {
|
||||
return CollectedHeap::lab_alignment_reserve();
|
||||
size_t reserve_size = CollectedHeap::lab_alignment_reserve();
|
||||
return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
|
||||
}
|
||||
|
||||
const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const {
|
||||
|
||||
@@ -58,6 +58,7 @@ private:
|
||||
size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
|
||||
|
||||
static size_t _max_size; // maximum size of any TLAB
|
||||
static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
|
||||
static unsigned _target_refills; // expected number of refills between GCs
|
||||
|
||||
unsigned _number_of_refills;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user