mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2026-01-20 15:31:59 +01:00
Compare commits
108 Commits
avu/dsync_
...
jdk-22+23
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8555e0f6c4 | ||
|
|
73c5f60f41 | ||
|
|
cc4b0d9217 | ||
|
|
a290256bbf | ||
|
|
b1625af600 | ||
|
|
806529aa77 | ||
|
|
e9eb8b98f4 | ||
|
|
8eb6f617b3 | ||
|
|
82747132b0 | ||
|
|
0dcd730f5c | ||
|
|
45e68ae207 | ||
|
|
4a0ad462ab | ||
|
|
134c382d39 | ||
|
|
ef8c8408a6 | ||
|
|
bf9a93de1f | ||
|
|
b2504a0f9c | ||
|
|
bfafb27e27 | ||
|
|
c760097943 | ||
|
|
85e4cde3f8 | ||
|
|
d22e368cb5 | ||
|
|
541ff7149f | ||
|
|
42f43c520c | ||
|
|
ac0ee20a38 | ||
|
|
a7c0190230 | ||
|
|
9ac6ac86d3 | ||
|
|
1c0e7b71b8 | ||
|
|
74f1889b58 | ||
|
|
e1cae72036 | ||
|
|
419ed90770 | ||
|
|
439ed046e4 | ||
|
|
e4803e0cbf | ||
|
|
cdf337357a | ||
|
|
1696603ccd | ||
|
|
b3126b6e44 | ||
|
|
1c2ea1d27b | ||
|
|
96e6e670b5 | ||
|
|
2d4bbf4787 | ||
|
|
8fb94fd4fe | ||
|
|
b5c863b772 | ||
|
|
377138c7b5 | ||
|
|
c146685ca9 | ||
|
|
01c0d5dd0a | ||
|
|
df599dbb9b | ||
|
|
c099cf53f2 | ||
|
|
29cf2c471b | ||
|
|
ea6a88a0aa | ||
|
|
de6667cf11 | ||
|
|
008ca2a725 | ||
|
|
1a21c1a783 | ||
|
|
81db1721d4 | ||
|
|
be01caf30d | ||
|
|
ec79ab4b3c | ||
|
|
c788160f8a | ||
|
|
ffaecd4aa2 | ||
|
|
3b65b8797a | ||
|
|
9dc40ba48e | ||
|
|
f875163c5d | ||
|
|
84f4f7477c | ||
|
|
6ad093ef12 | ||
|
|
e6f46a4326 | ||
|
|
e318cd25cb | ||
|
|
7a7b1e5a92 | ||
|
|
cb20a3e7a6 | ||
|
|
99efcded6c | ||
|
|
e9d19d0fff | ||
|
|
faa8bde275 | ||
|
|
4f808c62b0 | ||
|
|
2d4a4d04b8 | ||
|
|
792d829328 | ||
|
|
23a96bf312 | ||
|
|
64f8253b7d | ||
|
|
53bb7cd415 | ||
|
|
d6ce62ebc0 | ||
|
|
7f31a0591c | ||
|
|
4a85f6ae9f | ||
|
|
5207443b36 | ||
|
|
ee57e731d0 | ||
|
|
f262f06c97 | ||
|
|
bfaf5704e7 | ||
|
|
d354141aa1 | ||
|
|
c86592d38d | ||
|
|
3660a90ad8 | ||
|
|
7f47c51ace | ||
|
|
36de19d462 | ||
|
|
ab1934848b | ||
|
|
b4f5379d50 | ||
|
|
0461d9a7d6 | ||
|
|
2a76ad975c | ||
|
|
b3fec6b5f3 | ||
|
|
11394828b3 | ||
|
|
2182c93689 | ||
|
|
613a3cc689 | ||
|
|
613d32c282 | ||
|
|
f1e8787393 | ||
|
|
47624f6fc6 | ||
|
|
2d5829afbc | ||
|
|
0064cf90ff | ||
|
|
3a7525d5c3 | ||
|
|
f4c5db92ea | ||
|
|
7452d50be5 | ||
|
|
3e39d7b34c | ||
|
|
ee6f25b507 | ||
|
|
e05cafda78 | ||
|
|
d3c3f0e7c8 | ||
|
|
576c9bccfb | ||
|
|
5411ad2a5c | ||
|
|
75ce02fe74 | ||
|
|
328b381075 |
@@ -548,7 +548,7 @@ to compile successfully without issues.</p>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>Windows</td>
|
||||
<td>Microsoft Visual Studio 2022 update 17.1.0</td>
|
||||
<td>Microsoft Visual Studio 2022 version 17.6.5</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@@ -334,11 +334,11 @@ possible to compile the JDK with both older and newer versions, but the closer
|
||||
you stay to this list, the more likely you are to compile successfully without
|
||||
issues.
|
||||
|
||||
| Operating system | Toolchain version |
|
||||
| ------------------ | ------------------------------------------ |
|
||||
| Linux | gcc 11.2.0 |
|
||||
| macOS | Apple Xcode 14.3.1 (using clang 14.0.3) |
|
||||
| Windows | Microsoft Visual Studio 2022 update 17.1.0 |
|
||||
| Operating system | Toolchain version |
|
||||
| ------------------ | ------------------------------------------- |
|
||||
| Linux | gcc 11.2.0 |
|
||||
| macOS | Apple Xcode 14.3.1 (using clang 14.0.3) |
|
||||
| Windows | Microsoft Visual Studio 2022 version 17.6.5 |
|
||||
|
||||
All compilers are expected to be able to compile to the C99 language standard,
|
||||
as some C99 features are used in the source code. Microsoft Visual Studio
|
||||
|
||||
@@ -207,13 +207,22 @@ changed, and the first N tiers they can afford to run, but at least
|
||||
tier1.</p>
|
||||
<p>A brief description of the tiered test groups:</p>
|
||||
<ul>
|
||||
<li><p><code>tier1</code>: This is the lowest test tier. Multiple
|
||||
developers run these tests every day. Because of the widespread use, the
|
||||
tests in <code>tier1</code> are carefully selected and optimized to run
|
||||
fast, and to run in the most stable manner. The test failures in
|
||||
<code>tier1</code> are usually followed up on quickly, either with
|
||||
fixes, or adding relevant tests to problem list. GitHub Actions
|
||||
workflows, if enabled, run <code>tier1</code> tests.</p></li>
|
||||
<li><p><code>tier1</code>: This is the most fundamental test tier.
|
||||
Roughly speaking, a failure of a test in this tier has the potential to
|
||||
indicate a problem that would affect many Java programs. Tests in
|
||||
<code>tier1</code> include tests of HotSpot, core APIs in the
|
||||
<code>java.base</code> module, and the <code>javac</code> compiler.
|
||||
Multiple developers run these tests every day. Because of the widespread
|
||||
use, the tests in <code>tier1</code> are carefully selected and
|
||||
optimized to run fast, and to run in the most stable manner. As a
|
||||
guideline, nearly all individual tests in <code>tier1</code> are
|
||||
expected to run to completion in ten seconds or less when run on common
|
||||
configurations used for development. Long-running tests, even of core
|
||||
functionality, should occur in higher tiers or be covered in other kinds
|
||||
of testing. The test failures in <code>tier1</code> are usually followed
|
||||
up on quickly, either with fixes, or adding relevant tests to problem
|
||||
list. GitHub Actions workflows, if enabled, run <code>tier1</code>
|
||||
tests.</p></li>
|
||||
<li><p><code>tier2</code>: This test group covers even more ground.
|
||||
These contain, among other things, tests that either run for too long to
|
||||
be at <code>tier1</code>, or may require special configuration, or tests
|
||||
|
||||
@@ -135,12 +135,21 @@ the first N tiers they can afford to run, but at least tier1.
|
||||
|
||||
A brief description of the tiered test groups:
|
||||
|
||||
- `tier1`: This is the lowest test tier. Multiple developers run these tests
|
||||
every day. Because of the widespread use, the tests in `tier1` are
|
||||
carefully selected and optimized to run fast, and to run in the most stable
|
||||
manner. The test failures in `tier1` are usually followed up on quickly,
|
||||
either with fixes, or adding relevant tests to problem list. GitHub Actions
|
||||
workflows, if enabled, run `tier1` tests.
|
||||
- `tier1`: This is the most fundamental test tier.
|
||||
Roughly speaking, a failure of a test in this tier has the potential
|
||||
to indicate a problem that would affect many Java programs. Tests in
|
||||
`tier1` include tests of HotSpot, core APIs in the `java.base`
|
||||
module, and the `javac` compiler. Multiple developers run these
|
||||
tests every day. Because of the widespread use, the tests in `tier1`
|
||||
are carefully selected and optimized to run fast, and to run in the
|
||||
most stable manner. As a guideline, nearly all individual tests in
|
||||
`tier1` are expected to run to completion in ten seconds or less
|
||||
when run on common configurations used for development. Long-running
|
||||
tests, even of core functionality, should occur in higher tiers or
|
||||
be covered in other kinds of testing. The test failures in `tier1`
|
||||
are usually followed up on quickly, either with fixes, or adding
|
||||
relevant tests to problem list. GitHub Actions workflows, if
|
||||
enabled, run `tier1` tests.
|
||||
|
||||
- `tier2`: This test group covers even more ground. These contain, among other
|
||||
things, tests that either run for too long to be at `tier1`, or may require
|
||||
|
||||
@@ -182,9 +182,14 @@ else # not java.base
|
||||
endif
|
||||
endif
|
||||
|
||||
# Set main class of jdk.httpserver module
|
||||
ifeq ($(MODULE), jdk.httpserver)
|
||||
JMOD_FLAGS += --main-class sun.net.httpserver.simpleserver.Main
|
||||
################################################################################
|
||||
# Include module specific build settings
|
||||
|
||||
-include Jmod.gmk
|
||||
|
||||
# Set main class
|
||||
ifneq ($(JMOD_FLAGS_main_class), )
|
||||
JMOD_FLAGS += $(JMOD_FLAGS_main_class)
|
||||
endif
|
||||
|
||||
# Changes to the jmod tool itself should also trigger a rebuild of all jmods.
|
||||
|
||||
@@ -405,8 +405,9 @@ JMOD_TARGETS := $(addsuffix -jmod, $(JMOD_MODULES))
|
||||
|
||||
define DeclareJmodRecipe
|
||||
$1-jmod:
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f CreateJmods.gmk \
|
||||
MODULE=$1)
|
||||
+($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) \
|
||||
$(patsubst %,-I%/modules/$1,$(PHASE_MAKEDIRS)) \
|
||||
-f CreateJmods.gmk MODULE=$1)
|
||||
endef
|
||||
|
||||
$(foreach m, $(JMOD_MODULES), $(eval $(call DeclareJmodRecipe,$m)))
|
||||
@@ -1053,6 +1054,9 @@ else
|
||||
# All modules include the main license files from java.base.
|
||||
$(JMOD_TARGETS): java.base-copy
|
||||
|
||||
# jdk.javadoc uses an internal copy of the main license files from java.base.
|
||||
jdk.javadoc-copy: java.base-copy
|
||||
|
||||
zip-security: $(filter jdk.crypto%, $(JAVA_TARGETS))
|
||||
|
||||
ifeq ($(ENABLE_GENERATE_CLASSLIST), true)
|
||||
|
||||
@@ -562,7 +562,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
# The -utf-8 option sets source and execution character sets to UTF-8 to enable correct
|
||||
# compilation of all source files regardless of the active code page on Windows.
|
||||
TOOLCHAIN_CFLAGS_JVM="-nologo -MD -Zc:preprocessor -Zc:strictStrings -Zc:inline -utf-8 -MP"
|
||||
TOOLCHAIN_CFLAGS_JVM="-nologo -MD -Zc:preprocessor -Zc:strictStrings -Zc:inline -permissive- -utf-8 -MP"
|
||||
TOOLCHAIN_CFLAGS_JDK="-nologo -MD -Zc:preprocessor -Zc:strictStrings -Zc:inline -utf-8 -Zc:wchar_t-"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1087,7 +1087,7 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
var devkit_platform_revisions = {
|
||||
linux_x64: "gcc11.2.0-OL6.4+1.0",
|
||||
macosx: "Xcode14.3.1+1.0",
|
||||
windows_x64: "VS2022-17.1.0+1.1",
|
||||
windows_x64: "VS2022-17.6.5+1.0",
|
||||
linux_aarch64: input.build_cpu == "x64" ? "gcc11.2.0-OL7.6+1.1" : "gcc11.2.0-OL7.6+1.0",
|
||||
linux_arm: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_ppc64le: "gcc8.2.0-Fedora27+1.0",
|
||||
|
||||
@@ -229,7 +229,7 @@ public final class SealedGraph implements Taglet {
|
||||
var forwardNavigator = nodePackage.getQualifiedName().toString()
|
||||
.replace(".", "/");
|
||||
|
||||
return backNavigator + forwardNavigator + "/" + node.getSimpleName() + ".html";
|
||||
return backNavigator + forwardNavigator + "/" + packagelessCanonicalName(node) + ".html";
|
||||
}
|
||||
|
||||
public void addEdge(TypeElement node, TypeElement subNode) {
|
||||
@@ -315,5 +315,14 @@ public final class SealedGraph implements Taglet {
|
||||
case MEMBER -> packageName((TypeElement) element.getEnclosingElement());
|
||||
};
|
||||
}
|
||||
|
||||
private static String packagelessCanonicalName(TypeElement element) {
|
||||
String result = element.getSimpleName().toString();
|
||||
while (element.getNestingKind() == NestingKind.MEMBER) {
|
||||
element = (TypeElement) element.getEnclosingElement();
|
||||
result = element.getSimpleName().toString() + '.' + result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -493,13 +493,23 @@ else
|
||||
endif
|
||||
|
||||
# hb-ft.cc is not presently needed, and requires freetype 2.4.2 or later.
|
||||
LIBFONTMANAGER_EXCLUDE_FILES += libharfbuzz/hb-ft.cc
|
||||
# hb-subset and hb-style APIs are not needed, excluded to cut on compilation time.
|
||||
LIBFONTMANAGER_EXCLUDE_FILES += hb-ft.cc hb-subset-cff-common.cc \
|
||||
hb-subset-cff1.cc hb-subset-cff2.cc hb-subset-input.cc hb-subset-plan.cc \
|
||||
hb-subset.cc hb-subset-instancer-solver.cc gsubgpos-context.cc hb-style.cc
|
||||
|
||||
# list of disabled warnings and the compilers for which it was specifically added.
|
||||
# array-bounds -> GCC 12 on Alpine Linux
|
||||
# parentheses -> GCC 6
|
||||
# range-loop-analysis -> clang on Xcode12
|
||||
|
||||
HARFBUZZ_DISABLED_WARNINGS_gcc := missing-field-initializers strict-aliasing \
|
||||
unused-result array-bounds parentheses
|
||||
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.
|
||||
# expansion-to-defined required for GCC 9 builds. Not required for GCC 10+.
|
||||
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type expansion-to-defined dangling-reference
|
||||
# maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+.
|
||||
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \
|
||||
expansion-to-defined dangling-reference maybe-uninitialized
|
||||
HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers range-loop-analysis
|
||||
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244
|
||||
|
||||
|
||||
26
make/modules/jdk.httpserver/Jmod.gmk
Normal file
26
make/modules/jdk.httpserver/Jmod.gmk
Normal file
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
JMOD_FLAGS_main_class := --main-class sun.net.httpserver.simpleserver.Main
|
||||
26
make/modules/jdk.jartool/Jmod.gmk
Normal file
26
make/modules/jdk.jartool/Jmod.gmk
Normal file
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
JMOD_FLAGS_main_class := --main-class sun.tools.jar.Main
|
||||
47
make/modules/jdk.javadoc/Copy.gmk
Normal file
47
make/modules/jdk.javadoc/Copy.gmk
Normal file
@@ -0,0 +1,47 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
include CopyCommon.gmk
|
||||
|
||||
JDK_JAVADOC_DIR := $(JDK_OUTPUTDIR)/modules/jdk.javadoc
|
||||
JDK_JAVADOC_DOCLET_RESOURCE_DIR := $(JDK_JAVADOC_DIR)/jdk/javadoc/internal/doclets/formats/html/resources
|
||||
|
||||
################################################################################
|
||||
|
||||
$(eval $(call SetupCopyFiles, COPY_JAVADOC_MODULE_LEGAL_RESOURCES, \
|
||||
DEST := $(JDK_JAVADOC_DOCLET_RESOURCE_DIR)/legal, \
|
||||
FILES := $(wildcard $(MODULE_SRC)/share/legal/*.md), \
|
||||
))
|
||||
TARGETS += $(COPY_JAVADOC_MODULE_LEGAL_RESOURCES)
|
||||
|
||||
################################################################################
|
||||
|
||||
$(eval $(call SetupCopyFiles, COPY_JAVADOC_COMMON_LEGAL_RESOURCES, \
|
||||
DEST := $(JDK_JAVADOC_DOCLET_RESOURCE_DIR)/legal, \
|
||||
FILES := $(wildcard $(COMMON_LEGAL_DST_DIR)/*), \
|
||||
))
|
||||
TARGETS += $(COPY_JAVADOC_COMMON_LEGAL_RESOURCES)
|
||||
|
||||
################################################################################
|
||||
26
make/modules/jdk.jfr/Jmod.gmk
Normal file
26
make/modules/jdk.jfr/Jmod.gmk
Normal file
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
JMOD_FLAGS_main_class := --main-class jdk.jfr.internal.tool.Main
|
||||
26
make/modules/jdk.jpackage/Jmod.gmk
Normal file
26
make/modules/jdk.jpackage/Jmod.gmk
Normal file
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
JMOD_FLAGS_main_class := --main-class jdk.jpackage.main.Main
|
||||
26
make/modules/jdk.jshell/Jmod.gmk
Normal file
26
make/modules/jdk.jshell/Jmod.gmk
Normal file
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
JMOD_FLAGS_main_class := --main-class jdk.internal.jshell.tool.JShellToolProvider
|
||||
26
make/modules/jdk.jstatd/Jmod.gmk
Normal file
26
make/modules/jdk.jstatd/Jmod.gmk
Normal file
@@ -0,0 +1,26 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
JMOD_FLAGS_main_class := --main-class sun.tools.jstatd.Jstatd
|
||||
@@ -851,6 +851,7 @@ ifeq ($(call isTargetOs, linux), true)
|
||||
BUILD_TEST_exeinvoke_exeinvoke.c_OPTIMIZATION := NONE
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeFPRegs := -ldl
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LIBS_libAsyncGetCallTraceTest := -ldl
|
||||
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libfast-math := -ffast-math
|
||||
else
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += libtest-rw.c libtest-rwx.c \
|
||||
exeinvoke.c exestack-gap.c exestack-tls.c libAsyncGetCallTraceTest.cpp
|
||||
|
||||
@@ -47,11 +47,7 @@ bool ABIDescriptor::is_volatile_reg(FloatRegister reg) const {
|
||||
}
|
||||
|
||||
bool ForeignGlobals::is_foreign_linker_supported() {
|
||||
#ifdef LINUX
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Stubbed out, implement later
|
||||
|
||||
@@ -175,68 +175,68 @@
|
||||
Register tmp1, Register tmp2,
|
||||
int encForm);
|
||||
|
||||
void clear_array_v(Register base, Register cnt);
|
||||
void clear_array_v(Register base, Register cnt);
|
||||
|
||||
void byte_array_inflate_v(Register src, Register dst,
|
||||
Register len, Register tmp);
|
||||
void byte_array_inflate_v(Register src, Register dst,
|
||||
Register len, Register tmp);
|
||||
|
||||
void char_array_compress_v(Register src, Register dst,
|
||||
void char_array_compress_v(Register src, Register dst,
|
||||
Register len, Register result,
|
||||
Register tmp);
|
||||
|
||||
void encode_iso_array_v(Register src, Register dst,
|
||||
Register len, Register result,
|
||||
Register tmp, bool ascii);
|
||||
void encode_iso_array_v(Register src, Register dst,
|
||||
Register len, Register result,
|
||||
Register tmp, bool ascii);
|
||||
|
||||
void count_positives_v(Register ary, Register len,
|
||||
void count_positives_v(Register ary, Register len,
|
||||
Register result, Register tmp);
|
||||
|
||||
void string_indexof_char_v(Register str1, Register cnt1,
|
||||
void string_indexof_char_v(Register str1, Register cnt1,
|
||||
Register ch, Register result,
|
||||
Register tmp1, Register tmp2,
|
||||
bool isL);
|
||||
|
||||
void minmax_fp_v(VectorRegister dst,
|
||||
void minmax_fp_v(VectorRegister dst,
|
||||
VectorRegister src1, VectorRegister src2,
|
||||
BasicType bt, bool is_min, int vector_length);
|
||||
|
||||
void minmax_fp_masked_v(VectorRegister dst, VectorRegister src1, VectorRegister src2,
|
||||
VectorRegister vmask, VectorRegister tmp1, VectorRegister tmp2,
|
||||
BasicType bt, bool is_min, int vector_length);
|
||||
void minmax_fp_masked_v(VectorRegister dst, VectorRegister src1, VectorRegister src2,
|
||||
VectorRegister vmask, VectorRegister tmp1, VectorRegister tmp2,
|
||||
BasicType bt, bool is_min, int vector_length);
|
||||
|
||||
void reduce_minmax_fp_v(FloatRegister dst,
|
||||
FloatRegister src1, VectorRegister src2,
|
||||
VectorRegister tmp1, VectorRegister tmp2,
|
||||
bool is_double, bool is_min, int vector_length,
|
||||
VectorMask vm = Assembler::unmasked);
|
||||
void reduce_minmax_fp_v(FloatRegister dst,
|
||||
FloatRegister src1, VectorRegister src2,
|
||||
VectorRegister tmp1, VectorRegister tmp2,
|
||||
bool is_double, bool is_min, int vector_length,
|
||||
VectorMask vm = Assembler::unmasked);
|
||||
|
||||
void reduce_integral_v(Register dst, Register src1,
|
||||
void reduce_integral_v(Register dst, Register src1,
|
||||
VectorRegister src2, VectorRegister tmp,
|
||||
int opc, BasicType bt, int vector_length,
|
||||
VectorMask vm = Assembler::unmasked);
|
||||
|
||||
void vsetvli_helper(BasicType bt, int vector_length, LMUL vlmul = Assembler::m1, Register tmp = t0);
|
||||
void vsetvli_helper(BasicType bt, int vector_length, LMUL vlmul = Assembler::m1, Register tmp = t0);
|
||||
|
||||
void compare_integral_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
|
||||
BasicType bt, int vector_length, VectorMask vm = Assembler::unmasked);
|
||||
void compare_integral_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
|
||||
BasicType bt, int vector_length, VectorMask vm = Assembler::unmasked);
|
||||
|
||||
void compare_fp_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
|
||||
BasicType bt, int vector_length, VectorMask vm = Assembler::unmasked);
|
||||
void compare_fp_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
|
||||
BasicType bt, int vector_length, VectorMask vm = Assembler::unmasked);
|
||||
|
||||
// In Matcher::scalable_predicate_reg_slots,
|
||||
// we assume each predicate register is one-eighth of the size of
|
||||
// scalable vector register, one mask bit per vector byte.
|
||||
void spill_vmask(VectorRegister v, int offset){
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vse8_v(v, t0);
|
||||
}
|
||||
// In Matcher::scalable_predicate_reg_slots,
|
||||
// we assume each predicate register is one-eighth of the size of
|
||||
// scalable vector register, one mask bit per vector byte.
|
||||
void spill_vmask(VectorRegister v, int offset){
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vse8_v(v, t0);
|
||||
}
|
||||
|
||||
void unspill_vmask(VectorRegister v, int offset){
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vle8_v(v, t0);
|
||||
}
|
||||
void unspill_vmask(VectorRegister v, int offset){
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vle8_v(v, t0);
|
||||
}
|
||||
|
||||
void spill_copy_vmask_stack_to_stack(int src_offset, int dst_offset, int vector_length_in_bytes) {
|
||||
assert(vector_length_in_bytes % 4 == 0, "unexpected vector mask reg size");
|
||||
|
||||
@@ -302,17 +302,17 @@ void VM_Version::c2_initialize() {
|
||||
if (UseRVV) {
|
||||
if (FLAG_IS_DEFAULT(MaxVectorSize)) {
|
||||
MaxVectorSize = _initial_vector_length;
|
||||
} else if (MaxVectorSize < 16) {
|
||||
} else if (!is_power_of_2(MaxVectorSize)) {
|
||||
vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d, must be a power of 2", (int)MaxVectorSize));
|
||||
} else if (MaxVectorSize > _initial_vector_length) {
|
||||
warning("Current system only supports max RVV vector length %d. Set MaxVectorSize to %d",
|
||||
_initial_vector_length, _initial_vector_length);
|
||||
MaxVectorSize = _initial_vector_length;
|
||||
}
|
||||
if (MaxVectorSize < 16) {
|
||||
warning("RVV does not support vector length less than 16 bytes. Disabling RVV.");
|
||||
UseRVV = false;
|
||||
} else if (is_power_of_2(MaxVectorSize)) {
|
||||
if (MaxVectorSize > _initial_vector_length) {
|
||||
warning("Current system only supports max RVV vector length %d. Set MaxVectorSize to %d",
|
||||
_initial_vector_length, _initial_vector_length);
|
||||
MaxVectorSize = _initial_vector_length;
|
||||
}
|
||||
} else {
|
||||
vm_exit_during_initialization(err_msg("Unsupported MaxVectorSize: %d", (int)MaxVectorSize));
|
||||
FLAG_SET_DEFAULT(MaxVectorSize, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
|
||||
#endif
|
||||
|
||||
// put OS-includes here (sorted alphabetically)
|
||||
@@ -1118,11 +1118,6 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryLoad event;
|
||||
event.set_name(filename);
|
||||
#endif
|
||||
|
||||
// RTLD_LAZY has currently the same behavior as RTLD_NOW
|
||||
// The dl is loaded immediately with all its dependants.
|
||||
int dflags = RTLD_LAZY;
|
||||
@@ -1133,19 +1128,14 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
dflags |= RTLD_MEMBER;
|
||||
}
|
||||
|
||||
void * result= ::dlopen(filename, dflags);
|
||||
void* result;
|
||||
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
|
||||
result = ::dlopen(filename, dflags);
|
||||
if (result != nullptr) {
|
||||
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
|
||||
// Reload dll cache. Don't do this in signal handling.
|
||||
LoadedLibraries::reload();
|
||||
log_info(os)("shared library load of %s was successful", filename);
|
||||
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
|
||||
return result;
|
||||
} else {
|
||||
// error analysis when dlopen fails
|
||||
@@ -1159,12 +1149,7 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
}
|
||||
Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report);
|
||||
log_info(os)("shared library load of %s failed, %s", filename, error_report);
|
||||
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(error_report);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg(error_report);)
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -71,12 +71,14 @@
|
||||
#include "utilities/vmError.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
|
||||
#endif
|
||||
|
||||
// put OS-includes here
|
||||
# include <dlfcn.h>
|
||||
# include <errno.h>
|
||||
# include <fcntl.h>
|
||||
# include <fenv.h>
|
||||
# include <inttypes.h>
|
||||
# include <poll.h>
|
||||
# include <pthread.h>
|
||||
@@ -974,6 +976,41 @@ bool os::dll_address_to_library_name(address addr, char* buf,
|
||||
// in case of error it checks if .dll/.so was built for the
|
||||
// same architecture as Hotspot is running on
|
||||
|
||||
void *os::Bsd::dlopen_helper(const char *filename, int mode) {
|
||||
#ifndef IA32
|
||||
// Save and restore the floating-point environment around dlopen().
|
||||
// There are known cases where global library initialization sets
|
||||
// FPU flags that affect computation accuracy, for example, enabling
|
||||
// Flush-To-Zero and Denormals-Are-Zero. Do not let those libraries
|
||||
// break Java arithmetic. Unfortunately, this might affect libraries
|
||||
// that might depend on these FPU features for performance and/or
|
||||
// numerical "accuracy", but we need to protect Java semantics first
|
||||
// and foremost. See JDK-8295159.
|
||||
|
||||
// This workaround is ineffective on IA32 systems because the MXCSR
|
||||
// register (which controls flush-to-zero mode) is not stored in the
|
||||
// legacy fenv.
|
||||
|
||||
fenv_t default_fenv;
|
||||
int rtn = fegetenv(&default_fenv);
|
||||
assert(rtn == 0, "fegetenv must succeed");
|
||||
#endif // IA32
|
||||
|
||||
void * result= ::dlopen(filename, RTLD_LAZY);
|
||||
|
||||
#ifndef IA32
|
||||
if (result != nullptr && ! IEEE_subnormal_handling_OK()) {
|
||||
// We just dlopen()ed a library that mangled the floating-point
|
||||
// flags. Silently fix things now.
|
||||
int rtn = fesetenv(&default_fenv);
|
||||
assert(rtn == 0, "fesetenv must succeed");
|
||||
assert(IEEE_subnormal_handling_OK(), "fsetenv didn't work");
|
||||
}
|
||||
#endif // IA32
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
#ifdef STATIC_BUILD
|
||||
@@ -981,21 +1018,13 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
#else
|
||||
log_info(os)("attempting shared library load of %s", filename);
|
||||
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryLoad event;
|
||||
event.set_name(filename);
|
||||
#endif
|
||||
|
||||
void * result= ::dlopen(filename, RTLD_LAZY);
|
||||
void* result;
|
||||
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
|
||||
result = os::Bsd::dlopen_helper(filename, RTLD_LAZY);
|
||||
if (result != nullptr) {
|
||||
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
|
||||
// Successful loading
|
||||
log_info(os)("shared library load of %s was successful", filename);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -1010,11 +1039,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
}
|
||||
Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report);
|
||||
log_info(os)("shared library load of %s failed, %s", filename, error_report);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(error_report);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg(error_report);)
|
||||
|
||||
return nullptr;
|
||||
#endif // STATIC_BUILD
|
||||
@@ -1026,21 +1051,13 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
#else
|
||||
log_info(os)("attempting shared library load of %s", filename);
|
||||
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryLoad event;
|
||||
event.set_name(filename);
|
||||
#endif
|
||||
|
||||
void * result= ::dlopen(filename, RTLD_LAZY);
|
||||
void* result;
|
||||
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
|
||||
result = os::Bsd::dlopen_helper(filename, RTLD_LAZY);
|
||||
if (result != nullptr) {
|
||||
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
|
||||
// Successful loading
|
||||
log_info(os)("shared library load of %s was successful", filename);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -1057,11 +1074,7 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
}
|
||||
Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report);
|
||||
log_info(os)("shared library load of %s failed, %s", filename, error_report);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(error_report);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg(error_report);)
|
||||
int diag_msg_max_length=ebuflen-strlen(ebuf);
|
||||
char* diag_msg_buf=ebuf+strlen(ebuf);
|
||||
|
||||
|
||||
@@ -70,6 +70,8 @@ class os::Bsd {
|
||||
// Real-time clock functions
|
||||
static void clock_init(void);
|
||||
|
||||
static void *dlopen_helper(const char *path, int mode);
|
||||
|
||||
// Stack repair handling
|
||||
|
||||
// none present
|
||||
|
||||
@@ -82,6 +82,7 @@
|
||||
#include "utilities/vmError.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
|
||||
#endif
|
||||
|
||||
// put OS-includes here
|
||||
@@ -94,6 +95,7 @@
|
||||
# include <signal.h>
|
||||
# include <endian.h>
|
||||
# include <errno.h>
|
||||
# include <fenv.h>
|
||||
# include <dlfcn.h>
|
||||
# include <stdio.h>
|
||||
# include <unistd.h>
|
||||
@@ -1800,15 +1802,29 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
|
||||
int ebuflen) {
|
||||
void * result = ::dlopen(filename, RTLD_LAZY);
|
||||
void * os::Linux::dlopen_helper(const char *filename, char *ebuf, int ebuflen) {
|
||||
#ifndef IA32
|
||||
// Save and restore the floating-point environment around dlopen().
|
||||
// There are known cases where global library initialization sets
|
||||
// FPU flags that affect computation accuracy, for example, enabling
|
||||
// Flush-To-Zero and Denormals-Are-Zero. Do not let those libraries
|
||||
// break Java arithmetic. Unfortunately, this might affect libraries
|
||||
// that might depend on these FPU features for performance and/or
|
||||
// numerical "accuracy", but we need to protect Java semantics first
|
||||
// and foremost. See JDK-8295159.
|
||||
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryLoad event;
|
||||
event.set_name(filename);
|
||||
#endif
|
||||
// This workaround is ineffective on IA32 systems because the MXCSR
|
||||
// register (which controls flush-to-zero mode) is not stored in the
|
||||
// legacy fenv.
|
||||
|
||||
fenv_t default_fenv;
|
||||
int rtn = fegetenv(&default_fenv);
|
||||
assert(rtn == 0, "fegetenv must succeed");
|
||||
#endif // IA32
|
||||
|
||||
void* result;
|
||||
JFR_ONLY(NativeLibraryLoadEvent load_event(filename, &result);)
|
||||
result = ::dlopen(filename, RTLD_LAZY);
|
||||
if (result == nullptr) {
|
||||
const char* error_report = ::dlerror();
|
||||
if (error_report == nullptr) {
|
||||
@@ -1820,19 +1836,20 @@ void * os::Linux::dlopen_helper(const char *filename, char *ebuf,
|
||||
}
|
||||
Events::log_dll_message(nullptr, "Loading shared library %s failed, %s", filename, error_report);
|
||||
log_info(os)("shared library load of %s failed, %s", filename, error_report);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(error_report);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg(error_report);)
|
||||
} else {
|
||||
Events::log_dll_message(nullptr, "Loaded shared library %s", filename);
|
||||
log_info(os)("shared library load of %s was successful", filename);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
#ifndef IA32
|
||||
// Quickly test to make sure subnormals are correctly handled.
|
||||
if (! IEEE_subnormal_handling_OK()) {
|
||||
// We just dlopen()ed a library that mangled the floating-point
|
||||
// flags. Silently fix things now.
|
||||
int rtn = fesetenv(&default_fenv);
|
||||
assert(rtn == 0, "fesetenv must succeed");
|
||||
assert(IEEE_subnormal_handling_OK(), "fsetenv didn't work");
|
||||
}
|
||||
#endif // IA32
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@@ -3381,6 +3398,11 @@ bool os::committed_in_range(address start, size_t size, address& committed_start
|
||||
return false;
|
||||
}
|
||||
|
||||
// If mincore is not supported.
|
||||
if (mincore_return_value == -1 && errno == ENOSYS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(vec[stripe] == 'X', "overflow guard");
|
||||
assert(mincore_return_value == 0, "Range must be valid");
|
||||
// Process this stripe
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef AIX
|
||||
@@ -725,10 +725,7 @@ void os::dll_unload(void *lib) {
|
||||
}
|
||||
#endif // LINUX
|
||||
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryUnload event;
|
||||
event.set_name(l_path);
|
||||
#endif
|
||||
JFR_ONLY(NativeLibraryUnloadEvent unload_event(l_path);)
|
||||
|
||||
if (l_path == nullptr) {
|
||||
l_path = "<not available>";
|
||||
@@ -739,11 +736,7 @@ void os::dll_unload(void *lib) {
|
||||
Events::log_dll_message(nullptr, "Unloaded shared library \"%s\" [" INTPTR_FORMAT "]",
|
||||
l_path, p2i(lib));
|
||||
log_info(os)("Unloaded shared library \"%s\" [" INTPTR_FORMAT "]", l_path, p2i(lib));
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(unload_event.set_result(true);)
|
||||
} else {
|
||||
const char* error_report = ::dlerror();
|
||||
if (error_report == nullptr) {
|
||||
@@ -754,11 +747,7 @@ void os::dll_unload(void *lib) {
|
||||
l_path, p2i(lib), error_report);
|
||||
log_info(os)("Attempt to unload shared library \"%s\" [" INTPTR_FORMAT "] failed, %s",
|
||||
l_path, p2i(lib), error_report);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(error_report);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(unload_event.set_error_msg(error_report);)
|
||||
}
|
||||
// Update the dll cache
|
||||
AIX_ONLY(LoadedLibraries::reload());
|
||||
|
||||
@@ -80,6 +80,7 @@
|
||||
#include "windbghelp.hpp"
|
||||
#if INCLUDE_JFR
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
|
||||
#endif
|
||||
|
||||
#ifdef _DEBUG
|
||||
@@ -505,12 +506,17 @@ struct tm* os::gmtime_pd(const time_t* clock, struct tm* res) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
enum Ept { EPT_THREAD, EPT_PROCESS, EPT_PROCESS_DIE };
|
||||
// Wrapper around _endthreadex(), exit() and _exit()
|
||||
[[noreturn]]
|
||||
static void exit_process_or_thread(Ept what, int code);
|
||||
|
||||
JNIEXPORT
|
||||
LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
|
||||
|
||||
// Thread start routine for all newly created threads.
|
||||
// Called with the associated Thread* as the argument.
|
||||
unsigned __stdcall os::win32::thread_native_entry(void* t) {
|
||||
static unsigned __stdcall thread_native_entry(void* t) {
|
||||
Thread* thread = static_cast<Thread*>(t);
|
||||
|
||||
thread->record_stack_base_and_size();
|
||||
@@ -558,7 +564,8 @@ unsigned __stdcall os::win32::thread_native_entry(void* t) {
|
||||
|
||||
// Thread must not return from exit_process_or_thread(), but if it does,
|
||||
// let it proceed to exit normally
|
||||
return (unsigned)os::win32::exit_process_or_thread(os::win32::EPT_THREAD, res);
|
||||
exit_process_or_thread(EPT_THREAD, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
static OSThread* create_os_thread(Thread* thread, HANDLE thread_handle,
|
||||
@@ -745,7 +752,7 @@ bool os::create_thread(Thread* thread, ThreadType thr_type,
|
||||
thread_handle =
|
||||
(HANDLE)_beginthreadex(nullptr,
|
||||
(unsigned)stack_size,
|
||||
&os::win32::thread_native_entry,
|
||||
&thread_native_entry,
|
||||
thread,
|
||||
initflag,
|
||||
&thread_id);
|
||||
@@ -1202,7 +1209,7 @@ void os::abort(bool dump_core, void* siginfo, const void* context) {
|
||||
if (dumpFile != nullptr) {
|
||||
CloseHandle(dumpFile);
|
||||
}
|
||||
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
|
||||
exit_process_or_thread(EPT_PROCESS, 1);
|
||||
}
|
||||
|
||||
dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
|
||||
@@ -1226,12 +1233,12 @@ void os::abort(bool dump_core, void* siginfo, const void* context) {
|
||||
jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
|
||||
}
|
||||
CloseHandle(dumpFile);
|
||||
win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
|
||||
exit_process_or_thread(EPT_PROCESS, 1);
|
||||
}
|
||||
|
||||
// Die immediately, no exit hook, no abort hook, no cleanup.
|
||||
void os::die() {
|
||||
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, -1);
|
||||
exit_process_or_thread(EPT_PROCESS_DIE, -1);
|
||||
}
|
||||
|
||||
void os::dll_unload(void *lib) {
|
||||
@@ -1240,33 +1247,22 @@ void os::dll_unload(void *lib) {
|
||||
snprintf(name, MAX_PATH, "<not available>");
|
||||
}
|
||||
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryUnload event;
|
||||
event.set_name(name);
|
||||
#endif
|
||||
JFR_ONLY(NativeLibraryUnloadEvent unload_event(name);)
|
||||
|
||||
if (::FreeLibrary((HMODULE)lib)) {
|
||||
Events::log_dll_message(nullptr, "Unloaded dll \"%s\" [" INTPTR_FORMAT "]", name, p2i(lib));
|
||||
log_info(os)("Unloaded dll \"%s\" [" INTPTR_FORMAT "]", name, p2i(lib));
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(unload_event.set_result(true);)
|
||||
} else {
|
||||
const DWORD errcode = ::GetLastError();
|
||||
char buf[500];
|
||||
size_t tl = os::lasterror(buf, sizeof(buf));
|
||||
Events::log_dll_message(nullptr, "Attempt to unload dll \"%s\" [" INTPTR_FORMAT "] failed (error code %d)", name, p2i(lib), errcode);
|
||||
log_info(os)("Attempt to unload dll \"%s\" [" INTPTR_FORMAT "] failed (error code %d)", name, p2i(lib), errcode);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
if (tl == 0) {
|
||||
os::snprintf(buf, sizeof(buf), "Attempt to unload dll failed (error code %d)", (int) errcode);
|
||||
}
|
||||
event.set_errorMessage(buf);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(unload_event.set_error_msg(buf);)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1535,21 +1531,14 @@ static int _print_module(const char* fname, address base_address,
|
||||
// same architecture as Hotspot is running on
|
||||
void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
|
||||
log_info(os)("attempting shared library load of %s", name);
|
||||
#if INCLUDE_JFR
|
||||
EventNativeLibraryLoad event;
|
||||
event.set_name(name);
|
||||
#endif
|
||||
void * result = LoadLibrary(name);
|
||||
void* result;
|
||||
JFR_ONLY(NativeLibraryLoadEvent load_event(name, &result);)
|
||||
result = LoadLibrary(name);
|
||||
if (result != nullptr) {
|
||||
Events::log_dll_message(nullptr, "Loaded shared library %s", name);
|
||||
// Recalculate pdb search path if a DLL was loaded successfully.
|
||||
SymbolEngine::recalc_search_path();
|
||||
log_info(os)("shared library load of %s was successful", name);
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(true);
|
||||
event.set_errorMessage(nullptr);
|
||||
event.commit();
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
DWORD errcode = GetLastError();
|
||||
@@ -1563,11 +1552,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
|
||||
if (errcode == ERROR_MOD_NOT_FOUND) {
|
||||
strncpy(ebuf, "Can't find dependent libraries", ebuflen - 1);
|
||||
ebuf[ebuflen - 1] = '\0';
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(ebuf);
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg(ebuf);)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -1578,11 +1563,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
|
||||
// else call os::lasterror to obtain system error message
|
||||
int fd = ::open(name, O_RDONLY | O_BINARY, 0);
|
||||
if (fd < 0) {
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage("open on dll file did not work");
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg("open on dll file did not work");)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -1609,11 +1590,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
|
||||
::close(fd);
|
||||
if (failed_to_get_lib_arch) {
|
||||
// file i/o error - report os::lasterror(...) msg
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage("failed to get lib architecture");
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg("failed to get lib architecture");)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -1658,11 +1635,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
|
||||
// If the architecture is right
|
||||
// but some other error took place - report os::lasterror(...) msg
|
||||
if (lib_arch == running_arch) {
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage("lib architecture matches, but other error occured");
|
||||
event.commit();
|
||||
#endif
|
||||
JFR_ONLY(load_event.set_error_msg("lib architecture matches, but other error occured");)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -1676,12 +1649,7 @@ void * os::dll_load(const char *name, char *ebuf, int ebuflen) {
|
||||
"Can't load this .dll (machine code=0x%x) on a %s-bit platform",
|
||||
lib_arch, running_arch_str);
|
||||
}
|
||||
#if INCLUDE_JFR
|
||||
event.set_success(false);
|
||||
event.set_errorMessage(ebuf);
|
||||
event.commit();
|
||||
#endif
|
||||
|
||||
JFR_ONLY(load_event.set_error_msg(ebuf);)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -2250,7 +2218,7 @@ void* os::win32::install_signal_handler(int sig, signal_handler_t handler) {
|
||||
sigbreakHandler = handler;
|
||||
return oldHandler;
|
||||
} else {
|
||||
return ::signal(sig, handler);
|
||||
return CAST_FROM_FN_PTR(void*, ::signal(sig, handler));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2903,22 +2871,23 @@ LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptio
|
||||
|
||||
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
|
||||
LONG WINAPI topLevelUnhandledExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
if (InterceptOSException) goto exit;
|
||||
DWORD exception_code = exceptionInfo->ExceptionRecord->ExceptionCode;
|
||||
if (!InterceptOSException) {
|
||||
DWORD exceptionCode = exceptionInfo->ExceptionRecord->ExceptionCode;
|
||||
#if defined(_M_ARM64)
|
||||
address pc = (address)exceptionInfo->ContextRecord->Pc;
|
||||
address pc = (address) exceptionInfo->ContextRecord->Pc;
|
||||
#elif defined(_M_AMD64)
|
||||
address pc = (address) exceptionInfo->ContextRecord->Rip;
|
||||
address pc = (address) exceptionInfo->ContextRecord->Rip;
|
||||
#else
|
||||
address pc = (address) exceptionInfo->ContextRecord->Eip;
|
||||
address pc = (address) exceptionInfo->ContextRecord->Eip;
|
||||
#endif
|
||||
Thread* t = Thread::current_or_null_safe();
|
||||
Thread* thread = Thread::current_or_null_safe();
|
||||
|
||||
if (exception_code != EXCEPTION_BREAKPOINT) {
|
||||
report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord,
|
||||
exceptionInfo->ContextRecord);
|
||||
if (exceptionCode != EXCEPTION_BREAKPOINT) {
|
||||
report_error(thread, exceptionCode, pc, exceptionInfo->ExceptionRecord,
|
||||
exceptionInfo->ContextRecord);
|
||||
}
|
||||
}
|
||||
exit:
|
||||
|
||||
return previousUnhandledExceptionFilter ? previousUnhandledExceptionFilter(exceptionInfo) : EXCEPTION_CONTINUE_SEARCH;
|
||||
}
|
||||
#endif
|
||||
@@ -4097,7 +4066,7 @@ static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
static void exit_process_or_thread(Ept what, int exit_code) {
|
||||
// Basic approach:
|
||||
// - Each exiting thread registers its intent to exit and then does so.
|
||||
// - A thread trying to terminate the process must wait for all
|
||||
@@ -4275,7 +4244,7 @@ int os::win32::exit_process_or_thread(Ept what, int exit_code) {
|
||||
}
|
||||
|
||||
// Should not reach here
|
||||
return exit_code;
|
||||
os::infinite_sleep();
|
||||
}
|
||||
|
||||
#undef EXIT_TIMEOUT
|
||||
@@ -4853,11 +4822,11 @@ ssize_t os::pd_write(int fd, const void *buf, size_t nBytes) {
|
||||
}
|
||||
|
||||
void os::exit(int num) {
|
||||
win32::exit_process_or_thread(win32::EPT_PROCESS, num);
|
||||
exit_process_or_thread(EPT_PROCESS, num);
|
||||
}
|
||||
|
||||
void os::_exit(int num) {
|
||||
win32::exit_process_or_thread(win32::EPT_PROCESS_DIE, num);
|
||||
exit_process_or_thread(EPT_PROCESS_DIE, num);
|
||||
}
|
||||
|
||||
// Is a (classpath) directory empty?
|
||||
|
||||
@@ -70,13 +70,6 @@ class os::win32 {
|
||||
static HINSTANCE load_Windows_dll(const char* name, char *ebuf, int ebuflen);
|
||||
|
||||
private:
|
||||
// The handler passed to _beginthreadex().
|
||||
// Called with the associated Thread* as the argument.
|
||||
static unsigned __stdcall thread_native_entry(void*);
|
||||
|
||||
enum Ept { EPT_THREAD, EPT_PROCESS, EPT_PROCESS_DIE };
|
||||
// Wrapper around _endthreadex(), exit() and _exit()
|
||||
static int exit_process_or_thread(Ept what, int exit_code);
|
||||
|
||||
static void initialize_performance_counter();
|
||||
|
||||
|
||||
@@ -111,7 +111,7 @@ public:
|
||||
_p = _fallback_buffer;
|
||||
_capacity = (int)(sizeof(_fallback_buffer) / sizeof(T));
|
||||
}
|
||||
_p[0] = '\0';
|
||||
_p[0] = 0;
|
||||
imprint_sentinel();
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ public:
|
||||
}
|
||||
_p = _fallback_buffer;
|
||||
_capacity = (int)(sizeof(_fallback_buffer) / sizeof(T));
|
||||
_p[0] = '\0';
|
||||
_p[0] = 0;
|
||||
imprint_sentinel();
|
||||
}
|
||||
|
||||
|
||||
@@ -71,4 +71,5 @@ void VMError::raise_fail_fast(void* exrecord, void* context) {
|
||||
RaiseFailFastException(static_cast<PEXCEPTION_RECORD>(exrecord),
|
||||
static_cast<PCONTEXT>(context),
|
||||
flags);
|
||||
os::infinite_sleep();
|
||||
}
|
||||
|
||||
@@ -331,9 +331,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// End life with a fatal error, message and detail message and the context.
|
||||
// Note: no need to do any post-processing here (e.g. signal chaining)
|
||||
va_list va_dummy = nullptr;
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, detail_msg, va_dummy);
|
||||
va_end(va_dummy);
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
|
||||
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
@@ -193,15 +193,6 @@ NOINLINE frame os::current_frame() {
|
||||
}
|
||||
}
|
||||
|
||||
ATTRIBUTE_PRINTF(6, 7)
|
||||
static void report_and_die(Thread* thread, void* context, const char* filename, int lineno, const char* message,
|
||||
const char* detail_fmt, ...) {
|
||||
va_list va;
|
||||
va_start(va, detail_fmt);
|
||||
VMError::report_and_die(thread, context, filename, lineno, message, detail_fmt, va);
|
||||
va_end(va);
|
||||
}
|
||||
|
||||
bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
ucontext_t* uc, JavaThread* thread) {
|
||||
// Enable WXWrite: this function is called by the signal handler at arbitrary
|
||||
@@ -288,7 +279,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// End life with a fatal error, message and detail message and the context.
|
||||
// Note: no need to do any post-processing here (e.g. signal chaining)
|
||||
report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
|
||||
ShouldNotReachHere();
|
||||
|
||||
} else if (sig == SIGFPE &&
|
||||
|
||||
@@ -262,9 +262,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// End life with a fatal error, message and detail message and the context.
|
||||
// Note: no need to do any post-processing here (e.g. signal chaining)
|
||||
va_list va_dummy;
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, detail_msg, va_dummy);
|
||||
va_end(va_dummy);
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
|
||||
|
||||
ShouldNotReachHere();
|
||||
|
||||
|
||||
@@ -345,9 +345,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// End life with a fatal error, message and detail message and the context.
|
||||
// Note: no need to do any post-processing here (e.g. signal chaining)
|
||||
va_list va_dummy;
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, detail_msg, va_dummy);
|
||||
va_end(va_dummy);
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
|
||||
|
||||
ShouldNotReachHere();
|
||||
|
||||
|
||||
@@ -251,9 +251,7 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
|
||||
// End life with a fatal error, message and detail message and the context.
|
||||
// Note: no need to do any post-processing here (e.g. signal chaining)
|
||||
va_list va_dummy;
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, detail_msg, va_dummy);
|
||||
va_end(va_dummy);
|
||||
VMError::report_and_die(thread, uc, nullptr, 0, msg, "%s", detail_msg);
|
||||
|
||||
ShouldNotReachHere();
|
||||
} else if (sig == SIGFPE &&
|
||||
|
||||
@@ -257,7 +257,7 @@ public:
|
||||
void set_cisc_reg_mask_name(const char *rm_name) { _cisc_reg_mask_name = rm_name; }
|
||||
// Output cisc-method prototypes and method bodies
|
||||
void declare_cisc_version(ArchDesc &AD, FILE *fp_cpp);
|
||||
bool define_cisc_version (ArchDesc &AD, FILE *fp_cpp);
|
||||
void define_cisc_version(ArchDesc& AD, FILE* fp_cpp);
|
||||
|
||||
bool check_branch_variant(ArchDesc &AD, InstructForm *short_branch);
|
||||
|
||||
@@ -273,7 +273,7 @@ public:
|
||||
bool has_short_branch_form() { return _short_branch_form != nullptr; }
|
||||
// Output short branch prototypes and method bodies
|
||||
void declare_short_branch_methods(FILE *fp_cpp);
|
||||
bool define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp);
|
||||
void define_short_branch_methods(ArchDesc& AD, FILE* fp_cpp);
|
||||
|
||||
uint alignment() { return _alignment; }
|
||||
void set_alignment(uint val) { _alignment = val; }
|
||||
|
||||
@@ -3099,42 +3099,6 @@ void ArchDesc::define_oper_interface(FILE *fp, OperandForm &oper, FormDict &glob
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Construct the method to copy _idx, inputs and operands to new node.
|
||||
static void define_fill_new_machnode(bool used, FILE *fp_cpp) {
|
||||
fprintf(fp_cpp, "\n");
|
||||
fprintf(fp_cpp, "// Copy _idx, inputs and operands to new node\n");
|
||||
fprintf(fp_cpp, "void MachNode::fill_new_machnode(MachNode* node) const {\n");
|
||||
if( !used ) {
|
||||
fprintf(fp_cpp, " // This architecture does not have cisc or short branch instructions\n");
|
||||
fprintf(fp_cpp, " ShouldNotCallThis();\n");
|
||||
fprintf(fp_cpp, "}\n");
|
||||
} else {
|
||||
// New node must use same node index for access through allocator's tables
|
||||
fprintf(fp_cpp, " // New node must use same node index\n");
|
||||
fprintf(fp_cpp, " node->set_idx( _idx );\n");
|
||||
// Copy machine-independent inputs
|
||||
fprintf(fp_cpp, " // Copy machine-independent inputs\n");
|
||||
fprintf(fp_cpp, " for( uint j = 0; j < req(); j++ ) {\n");
|
||||
fprintf(fp_cpp, " node->add_req(in(j));\n");
|
||||
fprintf(fp_cpp, " }\n");
|
||||
// Copy machine operands to new MachNode
|
||||
fprintf(fp_cpp, " // Copy my operands, except for cisc position\n");
|
||||
fprintf(fp_cpp, " int nopnds = num_opnds();\n");
|
||||
fprintf(fp_cpp, " assert( node->num_opnds() == (uint)nopnds, \"Must have same number of operands\");\n");
|
||||
fprintf(fp_cpp, " MachOper **to = node->_opnds;\n");
|
||||
fprintf(fp_cpp, " for( int i = 0; i < nopnds; i++ ) {\n");
|
||||
fprintf(fp_cpp, " if( i != cisc_operand() ) \n");
|
||||
fprintf(fp_cpp, " to[i] = _opnds[i]->clone();\n");
|
||||
fprintf(fp_cpp, " }\n");
|
||||
fprintf(fp_cpp, " // Do not increment node index counter, since node reuses my index\n");
|
||||
fprintf(fp_cpp, " Compile* C = Compile::current();\n");
|
||||
fprintf(fp_cpp, " C->set_unique(C->unique() - 1);\n");
|
||||
fprintf(fp_cpp, "}\n");
|
||||
}
|
||||
fprintf(fp_cpp, "\n");
|
||||
}
|
||||
|
||||
//------------------------------defineClasses----------------------------------
|
||||
// Define members of MachNode and MachOper classes based on
|
||||
// operand and instruction lists
|
||||
@@ -3230,7 +3194,6 @@ void ArchDesc::defineClasses(FILE *fp) {
|
||||
defineOut_RegMask(_CPP_MISC_file._fp, instr->_ident, reg_mask(*instr));
|
||||
}
|
||||
|
||||
bool used = false;
|
||||
// Output the definitions for expand rules & peephole rules
|
||||
_instructions.reset();
|
||||
for( ; (instr = (InstructForm*)_instructions.iter()) != nullptr; ) {
|
||||
@@ -3249,15 +3212,12 @@ void ArchDesc::defineClasses(FILE *fp) {
|
||||
definePeephole(_CPP_PEEPHOLE_file._fp, instr);
|
||||
|
||||
// Output code to convert to the cisc version, if applicable
|
||||
used |= instr->define_cisc_version(*this, fp);
|
||||
instr->define_cisc_version(*this, fp);
|
||||
|
||||
// Output code to convert to the short branch version, if applicable
|
||||
used |= instr->define_short_branch_methods(*this, fp);
|
||||
instr->define_short_branch_methods(*this, fp);
|
||||
}
|
||||
|
||||
// Construct the method called by cisc_version() to copy inputs and operands.
|
||||
define_fill_new_machnode(used, fp);
|
||||
|
||||
// Output the definitions for labels
|
||||
_instructions.reset();
|
||||
while( (instr = (InstructForm*)_instructions.iter()) != nullptr ) {
|
||||
@@ -4074,7 +4034,7 @@ void InstructForm::declare_cisc_version(ArchDesc &AD, FILE *fp_hpp) {
|
||||
|
||||
//---------------------------define_cisc_version-------------------------------
|
||||
// Build CISC version of this instruction
|
||||
bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
|
||||
void InstructForm::define_cisc_version(ArchDesc& AD, FILE* fp_cpp) {
|
||||
InstructForm *inst_cisc = this->cisc_spill_alternate();
|
||||
if( AD.can_cisc_spill() && (inst_cisc != nullptr) ) {
|
||||
const char *name = inst_cisc->_ident;
|
||||
@@ -4120,9 +4080,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
|
||||
fprintf(fp_cpp, " return node;\n");
|
||||
fprintf(fp_cpp, "}\n");
|
||||
fprintf(fp_cpp, "\n");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//---------------------------declare_short_branch_methods----------------------
|
||||
@@ -4135,7 +4093,7 @@ void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
|
||||
|
||||
//---------------------------define_short_branch_methods-----------------------
|
||||
// Build definitions for short branch methods
|
||||
bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
|
||||
void InstructForm::define_short_branch_methods(ArchDesc& AD, FILE* fp_cpp) {
|
||||
if (has_short_branch_form()) {
|
||||
InstructForm *short_branch = short_branch_form();
|
||||
const char *name = short_branch->_ident;
|
||||
@@ -4164,9 +4122,7 @@ bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
|
||||
fprintf(fp_cpp, " return node;\n");
|
||||
fprintf(fp_cpp, "}\n");
|
||||
fprintf(fp_cpp,"\n");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -397,6 +397,7 @@ int Compilation::compile_java_method() {
|
||||
PhaseTraceTime timeit(_t_buildIR);
|
||||
build_hir();
|
||||
}
|
||||
CHECK_BAILOUT_(no_frame_size);
|
||||
if (BailoutAfterHIR) {
|
||||
BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size);
|
||||
}
|
||||
@@ -446,13 +447,13 @@ void Compilation::install_code(int frame_size) {
|
||||
|
||||
void Compilation::compile_method() {
|
||||
|
||||
CompilationMemoryStatisticMark cmsm(env()->task()->directive());
|
||||
|
||||
{
|
||||
PhaseTraceTime timeit(_t_setup);
|
||||
|
||||
// setup compilation
|
||||
initialize();
|
||||
CHECK_BAILOUT();
|
||||
|
||||
}
|
||||
|
||||
if (!method()->can_be_compiled()) {
|
||||
@@ -605,6 +606,9 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
|
||||
_cfg_printer_output = new CFGPrinterOutput(this);
|
||||
}
|
||||
#endif
|
||||
|
||||
CompilationMemoryStatisticMark cmsm(directive);
|
||||
|
||||
compile_method();
|
||||
if (bailed_out()) {
|
||||
_env->record_method_not_compilable(bailout_msg());
|
||||
|
||||
@@ -85,6 +85,7 @@ class Compilation: public StackObj {
|
||||
bool _has_monitors; // Fastpath monitors detection for Continuations
|
||||
bool _install_code;
|
||||
const char* _bailout_msg;
|
||||
bool _oom;
|
||||
ExceptionInfoList* _exception_info_list;
|
||||
ExceptionHandlerTable _exception_handler_table;
|
||||
ImplicitExceptionTable _implicit_exception_table;
|
||||
@@ -203,6 +204,10 @@ class Compilation: public StackObj {
|
||||
}
|
||||
#endif // PRODUCT
|
||||
|
||||
// MemLimit handling
|
||||
bool oom() const { return _oom; }
|
||||
void set_oom() { _oom = true; }
|
||||
|
||||
// error handling
|
||||
void bailout(const char* msg);
|
||||
bool bailed_out() const { return _bailout_msg != nullptr; }
|
||||
|
||||
@@ -573,6 +573,12 @@ void ArchiveBuilder::verify_estimate_size(size_t estimate, const char* which) {
|
||||
_other_region_used_bytes = 0;
|
||||
}
|
||||
|
||||
char* ArchiveBuilder::ro_strdup(const char* s) {
|
||||
char* archived_str = ro_region_alloc((int)strlen(s) + 1);
|
||||
strcpy(archived_str, s);
|
||||
return archived_str;
|
||||
}
|
||||
|
||||
void ArchiveBuilder::dump_rw_metadata() {
|
||||
ResourceMark rm;
|
||||
log_info(cds)("Allocating RW objects ... ");
|
||||
@@ -1133,6 +1139,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||
// The address of _source_obj at runtime
|
||||
oop requested_obj = ArchiveHeapWriter::source_obj_to_requested_obj(_source_obj);
|
||||
// The address of this field in the requested space
|
||||
assert(requested_obj != nullptr, "Attempting to load field from null oop");
|
||||
address requested_field_addr = cast_from_oop<address>(requested_obj) + fd->offset();
|
||||
|
||||
fd->print_on(_st);
|
||||
|
||||
@@ -374,6 +374,8 @@ public:
|
||||
return align_up(byte_size, SharedSpaceObjectAlignment);
|
||||
}
|
||||
|
||||
char* ro_strdup(const char* s);
|
||||
|
||||
void dump_rw_metadata();
|
||||
void dump_ro_metadata();
|
||||
void relocate_metaspaceobj_embedded_pointers();
|
||||
|
||||
@@ -241,7 +241,7 @@ Handle CDSProtectionDomain::get_shared_protection_domain(Handle class_loader,
|
||||
TRAPS) {
|
||||
Handle protection_domain;
|
||||
if (shared_protection_domain(shared_path_index) == nullptr) {
|
||||
Handle pd = get_protection_domain_from_classloader(class_loader, url, THREAD);
|
||||
Handle pd = get_protection_domain_from_classloader(class_loader, url, CHECK_NH);
|
||||
atomic_set_shared_protection_domain(shared_path_index, pd());
|
||||
}
|
||||
|
||||
|
||||
@@ -125,7 +125,7 @@ static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
|
||||
// full module graph
|
||||
static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
|
||||
{"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
|
||||
{"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
|
||||
{ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
|
||||
{"java/lang/Module$ArchivedData", "archivedData"},
|
||||
{nullptr, nullptr},
|
||||
};
|
||||
@@ -964,7 +964,14 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
|
||||
|
||||
// Initialize from archived data. Currently this is done only
|
||||
// during VM initialization time. No lock is needed.
|
||||
if (record != nullptr) {
|
||||
if (record == nullptr) {
|
||||
if (log_is_enabled(Info, cds, heap)) {
|
||||
ResourceMark rm(THREAD);
|
||||
log_info(cds, heap)("subgraph %s is not recorded",
|
||||
k->external_name());
|
||||
}
|
||||
return nullptr;
|
||||
} else {
|
||||
if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
|
||||
if (log_is_enabled(Info, cds, heap)) {
|
||||
ResourceMark rm(THREAD);
|
||||
@@ -1751,4 +1758,26 @@ void HeapShared::print_stats() {
|
||||
avg_size(_total_obj_size, _total_obj_count));
|
||||
}
|
||||
|
||||
bool HeapShared::is_archived_boot_layer_available(JavaThread* current) {
|
||||
TempNewSymbol klass_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_CLASS);
|
||||
InstanceKlass* k = SystemDictionary::find_instance_klass(current, klass_name, Handle(), Handle());
|
||||
if (k == nullptr) {
|
||||
return false;
|
||||
} else {
|
||||
TempNewSymbol field_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_FIELD);
|
||||
TempNewSymbol field_signature = SymbolTable::new_symbol("Ljdk/internal/module/ArchivedBootLayer;");
|
||||
fieldDescriptor fd;
|
||||
if (k->find_field(field_name, field_signature, true, &fd) != nullptr) {
|
||||
oop m = k->java_mirror();
|
||||
oop f = m->obj_field(fd.offset());
|
||||
if (CompressedOops::is_null(f)) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
@@ -49,6 +49,9 @@ class ResourceBitMap;
|
||||
struct ArchivableStaticFieldInfo;
|
||||
class ArchiveHeapInfo;
|
||||
|
||||
#define ARCHIVED_BOOT_LAYER_CLASS "jdk/internal/module/ArchivedBootLayer"
|
||||
#define ARCHIVED_BOOT_LAYER_FIELD "archivedBootLayer"
|
||||
|
||||
// A dump time sub-graph info for Klass _k. It includes the entry points
|
||||
// (static fields in _k's mirror) of the archived sub-graphs reachable
|
||||
// from _k's mirror. It also contains a list of Klasses of the objects
|
||||
@@ -160,6 +163,7 @@ public:
|
||||
// Scratch objects for archiving Klass::java_mirror()
|
||||
static oop scratch_java_mirror(BasicType t) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
|
||||
static oop scratch_java_mirror(Klass* k) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
|
||||
static bool is_archived_boot_layer_available(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN_(false);
|
||||
|
||||
private:
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
@@ -45,6 +45,7 @@
|
||||
#include "classfile/classLoaderExt.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/loaderConstraints.hpp"
|
||||
#include "classfile/modules.hpp"
|
||||
#include "classfile/placeholders.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
@@ -387,6 +388,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
|
||||
SystemDictionaryShared::serialize_vm_classes(soc);
|
||||
soc->do_tag(--tag);
|
||||
|
||||
CDS_JAVA_HEAP_ONLY(Modules::serialize(soc);)
|
||||
CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc);)
|
||||
|
||||
LambdaFormInvokers::serialize(soc);
|
||||
@@ -479,6 +481,8 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||
|
||||
// Write lambform lines into archive
|
||||
LambdaFormInvokers::dump_static_archive_invokers();
|
||||
// Write module name into archive
|
||||
CDS_JAVA_HEAP_ONLY(Modules::dump_main_module_name();)
|
||||
// Write the other data to the output array.
|
||||
DumpRegion* ro_region = ArchiveBuilder::current()->ro_region();
|
||||
char* start = ro_region->top();
|
||||
@@ -763,8 +767,6 @@ void MetaspaceShared::preload_and_dump_impl(TRAPS) {
|
||||
log_info(cds)("Reading extra data: done.");
|
||||
}
|
||||
|
||||
HeapShared::init_for_dumping(CHECK);
|
||||
|
||||
// Rewrite and link classes
|
||||
log_info(cds)("Rewriting and linking classes ...");
|
||||
|
||||
@@ -778,6 +780,11 @@ void MetaspaceShared::preload_and_dump_impl(TRAPS) {
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (CDSConfig::is_dumping_heap()) {
|
||||
StringTable::allocate_shared_strings_array(CHECK);
|
||||
if (!HeapShared::is_archived_boot_layer_available(THREAD)) {
|
||||
log_info(cds)("archivedBootLayer not available, disabling full module graph");
|
||||
disable_full_module_graph();
|
||||
}
|
||||
HeapShared::init_for_dumping(CHECK);
|
||||
ArchiveHeapWriter::init();
|
||||
if (use_full_module_graph()) {
|
||||
HeapShared::reset_archived_object_states(CHECK);
|
||||
@@ -1163,8 +1170,8 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
|
||||
static_mapinfo->map_or_load_heap_region();
|
||||
}
|
||||
#endif // _LP64
|
||||
log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
|
||||
log_info(cds)("full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled");
|
||||
log_info(cds)("initial optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
|
||||
log_info(cds)("initial full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled");
|
||||
} else {
|
||||
unmap_archive(static_mapinfo);
|
||||
unmap_archive(dynamic_mapinfo);
|
||||
|
||||
@@ -319,10 +319,10 @@ public:
|
||||
|
||||
// This is true if the compilation is not going to produce code.
|
||||
// (It is reasonable to retry failed compilations.)
|
||||
bool failing() { return _failure_reason != nullptr; }
|
||||
bool failing() const { return _failure_reason != nullptr; }
|
||||
|
||||
// Reason this compilation is failing, such as "too many basic blocks".
|
||||
const char* failure_reason() { return _failure_reason; }
|
||||
const char* failure_reason() const { return _failure_reason; }
|
||||
|
||||
// Return state of appropriate compatibility
|
||||
int compilable() { return _compilable; }
|
||||
|
||||
@@ -86,22 +86,6 @@ typedef int (*canonicalize_fn_t)(const char *orig, char *out, int len);
|
||||
|
||||
static canonicalize_fn_t CanonicalizeEntry = nullptr;
|
||||
|
||||
// Entry points in zip.dll for loading zip/jar file entries
|
||||
|
||||
typedef void * * (*ZipOpen_t)(const char *name, char **pmsg);
|
||||
typedef void (*ZipClose_t)(jzfile *zip);
|
||||
typedef jzentry* (*FindEntry_t)(jzfile *zip, const char *name, jint *sizeP, jint *nameLen);
|
||||
typedef jboolean (*ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
|
||||
typedef jint (*Crc32_t)(jint crc, const jbyte *buf, jint len);
|
||||
|
||||
static ZipOpen_t ZipOpen = nullptr;
|
||||
static ZipClose_t ZipClose = nullptr;
|
||||
static FindEntry_t FindEntry = nullptr;
|
||||
static ReadEntry_t ReadEntry = nullptr;
|
||||
static Crc32_t Crc32 = nullptr;
|
||||
int ClassLoader::_libzip_loaded = 0;
|
||||
void* ClassLoader::_zip_handle = nullptr;
|
||||
|
||||
// Entry points for jimage.dll for loading jimage file entries
|
||||
|
||||
static JImageOpen_t JImageOpen = nullptr;
|
||||
@@ -292,7 +276,7 @@ ClassPathZipEntry::ClassPathZipEntry(jzfile* zip, const char* zip_name,
|
||||
}
|
||||
|
||||
ClassPathZipEntry::~ClassPathZipEntry() {
|
||||
(*ZipClose)(_zip);
|
||||
ZipLibrary::close(_zip);
|
||||
FREE_C_HEAP_ARRAY(char, _zip_name);
|
||||
}
|
||||
|
||||
@@ -301,7 +285,7 @@ u1* ClassPathZipEntry::open_entry(JavaThread* current, const char* name, jint* f
|
||||
ThreadToNativeFromVM ttn(current);
|
||||
// check whether zip archive contains name
|
||||
jint name_len;
|
||||
jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
|
||||
jzentry* entry = ZipLibrary::find_entry(_zip, name, filesize, &name_len);
|
||||
if (entry == nullptr) return nullptr;
|
||||
u1* buffer;
|
||||
char name_buf[128];
|
||||
@@ -321,7 +305,9 @@ u1* ClassPathZipEntry::open_entry(JavaThread* current, const char* name, jint* f
|
||||
size++;
|
||||
}
|
||||
buffer = NEW_RESOURCE_ARRAY(u1, size);
|
||||
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return nullptr;
|
||||
if (!ZipLibrary::read_entry(_zip, entry, buffer, filename)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// return result
|
||||
if (nul_terminate) {
|
||||
@@ -724,8 +710,7 @@ jzfile* ClassLoader::open_zip_file(const char* canonical_path, char** error_msg,
|
||||
// enable call to C land
|
||||
ThreadToNativeFromVM ttn(thread);
|
||||
HandleMark hm(thread);
|
||||
load_zip_library_if_needed();
|
||||
return (*ZipOpen)(canonical_path, error_msg);
|
||||
return ZipLibrary::open(canonical_path, error_msg);
|
||||
}
|
||||
|
||||
ClassPathEntry* ClassLoader::create_class_path_entry(JavaThread* current,
|
||||
@@ -937,32 +922,6 @@ void ClassLoader::load_java_library() {
|
||||
CanonicalizeEntry = CAST_TO_FN_PTR(canonicalize_fn_t, dll_lookup(javalib_handle, "JDK_Canonicalize", nullptr));
|
||||
}
|
||||
|
||||
void ClassLoader::release_load_zip_library() {
|
||||
ConditionalMutexLocker locker(Zip_lock, Zip_lock != nullptr, Monitor::_no_safepoint_check_flag);
|
||||
if (_libzip_loaded == 0) {
|
||||
load_zip_library();
|
||||
Atomic::release_store(&_libzip_loaded, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoader::load_zip_library() {
|
||||
assert(ZipOpen == nullptr, "should not load zip library twice");
|
||||
char path[JVM_MAXPATHLEN];
|
||||
char ebuf[1024];
|
||||
if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
|
||||
_zip_handle = os::dll_load(path, ebuf, sizeof ebuf);
|
||||
}
|
||||
if (_zip_handle == nullptr) {
|
||||
vm_exit_during_initialization("Unable to load zip library", path);
|
||||
}
|
||||
|
||||
ZipOpen = CAST_TO_FN_PTR(ZipOpen_t, dll_lookup(_zip_handle, "ZIP_Open", path));
|
||||
ZipClose = CAST_TO_FN_PTR(ZipClose_t, dll_lookup(_zip_handle, "ZIP_Close", path));
|
||||
FindEntry = CAST_TO_FN_PTR(FindEntry_t, dll_lookup(_zip_handle, "ZIP_FindEntry", path));
|
||||
ReadEntry = CAST_TO_FN_PTR(ReadEntry_t, dll_lookup(_zip_handle, "ZIP_ReadEntry", path));
|
||||
Crc32 = CAST_TO_FN_PTR(Crc32_t, dll_lookup(_zip_handle, "ZIP_CRC32", path));
|
||||
}
|
||||
|
||||
void ClassLoader::load_jimage_library() {
|
||||
assert(JImageOpen == nullptr, "should not load jimage library twice");
|
||||
char path[JVM_MAXPATHLEN];
|
||||
@@ -982,8 +941,7 @@ void ClassLoader::load_jimage_library() {
|
||||
}
|
||||
|
||||
int ClassLoader::crc32(int crc, const char* buf, int len) {
|
||||
load_zip_library_if_needed();
|
||||
return (*Crc32)(crc, (const jbyte*)buf, len);
|
||||
return ZipLibrary::crc32(crc, (const jbyte*)buf, len);
|
||||
}
|
||||
|
||||
oop ClassLoader::get_system_package(const char* name, TRAPS) {
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "runtime/perfDataTypes.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/zipLibrary.hpp"
|
||||
|
||||
// The VM class loader.
|
||||
#include <sys/stat.h>
|
||||
@@ -84,19 +85,6 @@ class ClassPathDirEntry: public ClassPathEntry {
|
||||
ClassFileStream* open_stream(JavaThread* current, const char* name);
|
||||
};
|
||||
|
||||
// Type definitions for zip file and zip file entry
|
||||
typedef void* jzfile;
|
||||
typedef struct {
|
||||
char *name; /* entry name */
|
||||
jlong time; /* modification time */
|
||||
jlong size; /* size of uncompressed data */
|
||||
jlong csize; /* size of compressed data (zero if uncompressed) */
|
||||
jint crc; /* crc of uncompressed data */
|
||||
char *comment; /* optional zip file comment */
|
||||
jbyte *extra; /* optional extra data */
|
||||
jlong pos; /* position of LOC header (if negative) or data */
|
||||
} jzentry;
|
||||
|
||||
class ClassPathZipEntry: public ClassPathEntry {
|
||||
private:
|
||||
jzfile* _zip; // The zip archive
|
||||
@@ -227,8 +215,6 @@ class ClassLoader: AllStatic {
|
||||
CDS_ONLY(static void add_to_module_path_entries(const char* path,
|
||||
ClassPathEntry* entry);)
|
||||
|
||||
// cache the zip library handle
|
||||
static void* _zip_handle;
|
||||
public:
|
||||
CDS_ONLY(static ClassPathEntry* app_classpath_entries() {return _app_classpath_entries;})
|
||||
CDS_ONLY(static ClassPathEntry* module_path_entries() {return _module_path_entries;})
|
||||
@@ -247,16 +233,10 @@ class ClassLoader: AllStatic {
|
||||
|
||||
static void* dll_lookup(void* lib, const char* name, const char* path);
|
||||
static void load_java_library();
|
||||
static void load_zip_library();
|
||||
static void load_jimage_library();
|
||||
|
||||
private:
|
||||
static int _libzip_loaded; // used to sync loading zip.
|
||||
static void release_load_zip_library();
|
||||
|
||||
public:
|
||||
static inline void load_zip_library_if_needed();
|
||||
static void* zip_library_handle() { return _zip_handle; }
|
||||
static void* zip_library_handle();
|
||||
static jzfile* open_zip_file(const char* canonical_path, char** error_msg, JavaThread* thread);
|
||||
static ClassPathEntry* create_class_path_entry(JavaThread* current,
|
||||
const char *path, const struct stat* st,
|
||||
|
||||
@@ -58,12 +58,6 @@ inline ClassPathEntry* ClassLoader::classpath_entry(int n) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void ClassLoader::load_zip_library_if_needed() {
|
||||
if (Atomic::load_acquire(&_libzip_loaded) == 0) {
|
||||
release_load_zip_library();
|
||||
}
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
|
||||
// Helper function used by CDS code to get the number of boot classpath
|
||||
|
||||
@@ -2453,7 +2453,7 @@ void java_lang_Throwable::print_stack_trace(Handle throwable, outputStream* st)
|
||||
BacktraceElement bte = iter.next(THREAD);
|
||||
print_stack_element_to_stream(st, bte._mirror, bte._method_id, bte._version, bte._bci, bte._name);
|
||||
}
|
||||
{
|
||||
if (THREAD->can_call_java()) {
|
||||
// Call getCause() which doesn't necessarily return the _cause field.
|
||||
ExceptionMark em(THREAD);
|
||||
JavaValue cause(T_OBJECT);
|
||||
@@ -2475,6 +2475,9 @@ void java_lang_Throwable::print_stack_trace(Handle throwable, outputStream* st)
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
st->print_raw_cr("<<cannot call Java to get cause>>");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "cds/archiveBuilder.hpp"
|
||||
#include "cds/metaspaceShared.hpp"
|
||||
#include "classfile/classFileParser.hpp"
|
||||
#include "classfile/classLoader.hpp"
|
||||
@@ -559,6 +560,49 @@ void Modules::verify_archived_modules() {
|
||||
ModuleEntry::verify_archived_module_entries();
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
char* Modules::_archived_main_module_name = nullptr;
|
||||
#endif
|
||||
|
||||
void Modules::dump_main_module_name() {
|
||||
const char* module_name = Arguments::get_property("jdk.module.main");
|
||||
if (module_name != nullptr) {
|
||||
_archived_main_module_name = ArchiveBuilder::current()->ro_strdup(module_name);
|
||||
}
|
||||
ArchivePtrMarker::mark_pointer(&_archived_main_module_name);
|
||||
}
|
||||
|
||||
void Modules::serialize(SerializeClosure* soc) {
|
||||
soc->do_ptr(&_archived_main_module_name);
|
||||
if (soc->reading()) {
|
||||
const char* runtime_main_module = Arguments::get_property("jdk.module.main");
|
||||
log_info(cds)("_archived_main_module_name %s",
|
||||
_archived_main_module_name != nullptr ? _archived_main_module_name : "(null)");
|
||||
bool disable = false;
|
||||
if (runtime_main_module == nullptr) {
|
||||
if (_archived_main_module_name != nullptr) {
|
||||
log_info(cds)("Module %s specified during dump time but not during runtime", _archived_main_module_name);
|
||||
disable = true;
|
||||
}
|
||||
} else {
|
||||
if (_archived_main_module_name == nullptr) {
|
||||
log_info(cds)("Module %s specified during runtime but not during dump time", runtime_main_module);
|
||||
disable = true;
|
||||
} else if (strcmp(runtime_main_module, _archived_main_module_name) != 0) {
|
||||
log_info(cds)("Mismatched modules: runtime %s dump time %s", runtime_main_module, _archived_main_module_name);
|
||||
disable = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (disable) {
|
||||
log_info(cds)("Disabling optimized module handling");
|
||||
MetaspaceShared::disable_optimized_module_handling();
|
||||
}
|
||||
log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");
|
||||
log_info(cds)("full module graph: %s", MetaspaceShared::use_full_module_graph() ? "enabled" : "disabled");
|
||||
}
|
||||
}
|
||||
|
||||
void Modules::define_archived_modules(Handle h_platform_loader, Handle h_system_loader, TRAPS) {
|
||||
assert(UseSharedSpaces && MetaspaceShared::use_full_module_graph(), "must be");
|
||||
|
||||
|
||||
@@ -59,6 +59,12 @@ public:
|
||||
static void define_archived_modules(Handle h_platform_loader, Handle h_system_loader,
|
||||
TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void verify_archived_modules() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void dump_main_module_name() NOT_CDS_JAVA_HEAP_RETURN;
|
||||
static void serialize(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
static char* _archived_main_module_name;
|
||||
#endif
|
||||
|
||||
// Provides the java.lang.Module for the unnamed module defined
|
||||
// to the boot loader.
|
||||
|
||||
@@ -611,7 +611,7 @@ InstanceKlass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
|
||||
InstanceKlass* loaded_class = nullptr;
|
||||
SymbolHandle superclassname; // Keep alive while loading in parallel thread.
|
||||
|
||||
assert(THREAD->can_call_java(),
|
||||
guarantee(THREAD->can_call_java(),
|
||||
"can not load classes with compiler thread: class=%s, classloader=%s",
|
||||
name->as_C_string(),
|
||||
class_loader.is_null() ? "null" : class_loader->klass()->name()->as_C_string());
|
||||
@@ -2056,7 +2056,7 @@ Method* SystemDictionary::find_method_handle_invoker(Klass* klass,
|
||||
Klass* accessing_klass,
|
||||
Handle* appendix_result,
|
||||
TRAPS) {
|
||||
assert(THREAD->can_call_java() ,"");
|
||||
guarantee(THREAD->can_call_java(), "");
|
||||
Handle method_type =
|
||||
SystemDictionary::find_method_handle_type(signature, accessing_klass, CHECK_NULL);
|
||||
|
||||
|
||||
@@ -26,6 +26,9 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#ifdef COMPILER1
|
||||
#include "c1/c1_Compilation.hpp"
|
||||
#endif
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compilationMemoryStatistic.hpp"
|
||||
#include "compiler/compilerDirectives.hpp"
|
||||
@@ -42,15 +45,16 @@
|
||||
#endif
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
|
||||
ArenaStatCounter::ArenaStatCounter() :
|
||||
_current(0), _start(0), _peak(0),
|
||||
_na(0), _ra(0),
|
||||
_limit(0), _hit_limit(false),
|
||||
_na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0)
|
||||
{}
|
||||
|
||||
@@ -58,8 +62,15 @@ size_t ArenaStatCounter::peak_since_start() const {
|
||||
return _peak > _start ? _peak - _start : 0;
|
||||
}
|
||||
|
||||
void ArenaStatCounter::start() {
|
||||
void ArenaStatCounter::start(size_t limit) {
|
||||
_peak = _start = _current;
|
||||
_limit = limit;
|
||||
_hit_limit = false;
|
||||
}
|
||||
|
||||
void ArenaStatCounter::end(){
|
||||
_limit = 0;
|
||||
_hit_limit = false;
|
||||
}
|
||||
|
||||
void ArenaStatCounter::update_c2_node_count() {
|
||||
@@ -104,6 +115,10 @@ bool ArenaStatCounter::account(ssize_t delta, int tag) {
|
||||
_ra_at_peak = _ra;
|
||||
update_c2_node_count();
|
||||
rc = true;
|
||||
// Did we hit the memory limit?
|
||||
if (!_hit_limit && _limit > 0 && peak_since_start() > _limit) {
|
||||
_hit_limit = true;
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
@@ -125,7 +140,8 @@ class FullMethodName {
|
||||
|
||||
public:
|
||||
|
||||
FullMethodName(Symbol* k, Symbol* m, Symbol* s) : _k(k), _m(m), _s(s) {}
|
||||
FullMethodName(const Method* m) :
|
||||
_k(m->klass_name()), _m(m->name()), _s(m->signature()) {};
|
||||
FullMethodName(const FullMethodName& o) : _k(o._k), _m(o._m), _s(o._s) {}
|
||||
|
||||
void make_permanent() {
|
||||
@@ -173,13 +189,15 @@ class MemStatEntry : public CHeapObj<mtInternal> {
|
||||
size_t _na_at_peak;
|
||||
size_t _ra_at_peak;
|
||||
unsigned _live_nodes_at_peak;
|
||||
const char* _result;
|
||||
|
||||
public:
|
||||
|
||||
MemStatEntry(FullMethodName method)
|
||||
: _method(method), _comptype(compiler_c1),
|
||||
_time(0), _num_recomp(0), _thread(nullptr),
|
||||
_total(0), _na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0) {
|
||||
_total(0), _na_at_peak(0), _ra_at_peak(0), _live_nodes_at_peak(0),
|
||||
_result(nullptr) {
|
||||
}
|
||||
|
||||
void set_comptype(CompilerType comptype) { _comptype = comptype; }
|
||||
@@ -192,6 +210,8 @@ public:
|
||||
void set_ra_at_peak(size_t n) { _ra_at_peak = n; }
|
||||
void set_live_nodes_at_peak(unsigned n) { _live_nodes_at_peak = n; }
|
||||
|
||||
void set_result(const char* s) { _result = s; }
|
||||
|
||||
size_t total() const { return _total; }
|
||||
|
||||
static void print_legend(outputStream* st) {
|
||||
@@ -199,7 +219,8 @@ public:
|
||||
st->print_cr(" total : memory allocated via arenas while compiling");
|
||||
st->print_cr(" NA : ...how much in node arenas (if c2)");
|
||||
st->print_cr(" RA : ...how much in resource areas");
|
||||
st->print_cr(" #nodes : ...how many nodes (if c2)");
|
||||
st->print_cr(" result : Result: 'ok' finished successfully, 'oom' hit memory limit, 'err' compilation failed");
|
||||
st->print_cr(" #nodes : ...how many nodes (c2 only)");
|
||||
st->print_cr(" time : time of last compilation (sec)");
|
||||
st->print_cr(" type : compiler type");
|
||||
st->print_cr(" #rc : how often recompiled");
|
||||
@@ -207,7 +228,7 @@ public:
|
||||
}
|
||||
|
||||
static void print_header(outputStream* st) {
|
||||
st->print_cr("total NA RA #nodes time type #rc thread method");
|
||||
st->print_cr("total NA RA result #nodes time type #rc thread method");
|
||||
}
|
||||
|
||||
void print_on(outputStream* st, bool human_readable) const {
|
||||
@@ -237,6 +258,10 @@ public:
|
||||
}
|
||||
col += 10; st->fill_to(col);
|
||||
|
||||
// result?
|
||||
st->print("%s ", _result ? _result : "");
|
||||
col += 8; st->fill_to(col);
|
||||
|
||||
// Number of Nodes when memory peaked
|
||||
st->print("%u ", _live_nodes_at_peak);
|
||||
col += 8; st->fill_to(col);
|
||||
@@ -281,7 +306,7 @@ public:
|
||||
|
||||
void add(const FullMethodName& fmn, CompilerType comptype,
|
||||
size_t total, size_t na_at_peak, size_t ra_at_peak,
|
||||
unsigned live_nodes_at_peak) {
|
||||
unsigned live_nodes_at_peak, const char* result) {
|
||||
assert_lock_strong(NMTCompilationCostHistory_lock);
|
||||
|
||||
MemStatEntry** pe = get(fmn);
|
||||
@@ -302,6 +327,7 @@ public:
|
||||
e->set_na_at_peak(na_at_peak);
|
||||
e->set_ra_at_peak(ra_at_peak);
|
||||
e->set_live_nodes_at_peak(live_nodes_at_peak);
|
||||
e->set_result(result);
|
||||
}
|
||||
|
||||
// Returns a C-heap-allocated SortMe array containing all entries from the table,
|
||||
@@ -341,20 +367,21 @@ void CompilationMemoryStatistic::initialize() {
|
||||
log_info(compilation, alloc)("Compilation memory statistic enabled");
|
||||
}
|
||||
|
||||
void CompilationMemoryStatistic::on_start_compilation() {
|
||||
void CompilationMemoryStatistic::on_start_compilation(const DirectiveSet* directive) {
|
||||
assert(enabled(), "Not enabled?");
|
||||
Thread::current()->as_Compiler_thread()->arena_stat()->start();
|
||||
const size_t limit = directive->mem_limit();
|
||||
Thread::current()->as_Compiler_thread()->arena_stat()->start(limit);
|
||||
}
|
||||
|
||||
void CompilationMemoryStatistic::on_end_compilation() {
|
||||
assert(enabled(), "Not enabled?");
|
||||
ResourceMark rm;
|
||||
CompilerThread* const th = Thread::current()->as_Compiler_thread();
|
||||
const ArenaStatCounter* const arena_stat = th->arena_stat();
|
||||
ArenaStatCounter* const arena_stat = th->arena_stat();
|
||||
const CompilerType ct = th->task()->compiler()->type();
|
||||
|
||||
const Method* const m = th->task()->method();
|
||||
FullMethodName fmn(m->klass_name(), m->name(), m->signature());
|
||||
FullMethodName fmn(m);
|
||||
fmn.make_permanent();
|
||||
|
||||
const DirectiveSet* directive = th->task()->directive();
|
||||
@@ -368,6 +395,20 @@ void CompilationMemoryStatistic::on_end_compilation() {
|
||||
arena_stat->print_on(tty);
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
// Store result
|
||||
// For this to work, we must call on_end_compilation() at a point where
|
||||
// Compile|Compilation already handed over the failure string to ciEnv,
|
||||
// but ciEnv must still be alive.
|
||||
const char* result = "ok"; // ok
|
||||
const ciEnv* const env = th->env();
|
||||
if (env) {
|
||||
const char* const failure_reason = env->failure_reason();
|
||||
if (failure_reason != nullptr) {
|
||||
result = (failure_reason == failure_reason_memlimit()) ? "oom" : "err";
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
MutexLocker ml(NMTCompilationCostHistory_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(_the_table != nullptr, "not initialized");
|
||||
@@ -376,14 +417,105 @@ void CompilationMemoryStatistic::on_end_compilation() {
|
||||
arena_stat->peak_since_start(), // total
|
||||
arena_stat->na_at_peak(),
|
||||
arena_stat->ra_at_peak(),
|
||||
arena_stat->live_nodes_at_peak());
|
||||
arena_stat->live_nodes_at_peak(),
|
||||
result);
|
||||
}
|
||||
|
||||
arena_stat->end(); // reset things
|
||||
}
|
||||
|
||||
static void inform_compilation_about_oom(CompilerType ct) {
|
||||
// Inform C1 or C2 that an OOM happened. They will take delayed action
|
||||
// and abort the compilation in progress. Note that this is not instantaneous,
|
||||
// since the compiler has to actively bailout, which may take a while, during
|
||||
// which memory usage may rise further.
|
||||
//
|
||||
// The mechanism differs slightly between C1 and C2:
|
||||
// - With C1, we directly set the bailout string, which will cause C1 to
|
||||
// bailout at the typical BAILOUT places.
|
||||
// - With C2, the corresponding mechanism would be the failure string; but
|
||||
// bailout paths in C2 are not complete and therefore it is dangerous to
|
||||
// set the failure string at - for C2 - seemingly random places. Instead,
|
||||
// upon OOM C2 sets the failure string next time it checks the node limit.
|
||||
if (ciEnv::current() != nullptr) {
|
||||
void* compiler_data = ciEnv::current()->compiler_data();
|
||||
#ifdef COMPILER1
|
||||
if (ct == compiler_c1) {
|
||||
Compilation* C = static_cast<Compilation*>(compiler_data);
|
||||
if (C != nullptr) {
|
||||
C->bailout(CompilationMemoryStatistic::failure_reason_memlimit());
|
||||
C->set_oom();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
if (ct == compiler_c2) {
|
||||
Compile* C = static_cast<Compile*>(compiler_data);
|
||||
if (C != nullptr) {
|
||||
C->set_oom();
|
||||
}
|
||||
}
|
||||
#endif // COMPILER2
|
||||
}
|
||||
}
|
||||
|
||||
void CompilationMemoryStatistic::on_arena_change(ssize_t diff, const Arena* arena) {
|
||||
assert(enabled(), "Not enabled?");
|
||||
CompilerThread* const th = Thread::current()->as_Compiler_thread();
|
||||
th->arena_stat()->account(diff, (int)arena->get_tag());
|
||||
|
||||
ArenaStatCounter* const arena_stat = th->arena_stat();
|
||||
bool hit_limit_before = arena_stat->hit_limit();
|
||||
|
||||
if (arena_stat->account(diff, (int)arena->get_tag())) { // new peak?
|
||||
|
||||
// Limit handling
|
||||
if (arena_stat->hit_limit()) {
|
||||
|
||||
char name[1024] = "";
|
||||
bool print = false;
|
||||
bool crash = false;
|
||||
CompilerType ct = compiler_none;
|
||||
|
||||
// get some more info
|
||||
const CompileTask* task = th->task();
|
||||
if (task != nullptr) {
|
||||
ct = task->compiler()->type();
|
||||
const DirectiveSet* directive = task->directive();
|
||||
print = directive->should_print_memstat();
|
||||
crash = directive->should_crash_at_mem_limit();
|
||||
const Method* m = th->task()->method();
|
||||
if (m != nullptr) {
|
||||
FullMethodName(m).as_C_string(name, sizeof(name));
|
||||
}
|
||||
}
|
||||
|
||||
char message[1024] = "";
|
||||
|
||||
// build up message if we need it later
|
||||
if (print || crash) {
|
||||
stringStream ss(message, sizeof(message));
|
||||
if (ct != compiler_none && name[0] != '\0') {
|
||||
ss.print("%s %s: ", compilertype2name(ct), name);
|
||||
}
|
||||
ss.print("Hit MemLimit %s (limit: %zu now: %zu)",
|
||||
(hit_limit_before ? "again" : ""),
|
||||
arena_stat->limit(), arena_stat->peak_since_start());
|
||||
}
|
||||
|
||||
// log if needed
|
||||
if (print) {
|
||||
tty->print_raw(message);
|
||||
tty->cr();
|
||||
}
|
||||
|
||||
// Crash out if needed
|
||||
if (crash) {
|
||||
report_fatal(OOM_HOTSPOT_ARENA, __FILE__, __LINE__, "%s", message);
|
||||
} else {
|
||||
inform_compilation_about_oom(ct);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline ssize_t diff_entries_by_size(const MemStatEntry* e1, const MemStatEntry* e2) {
|
||||
@@ -438,10 +570,15 @@ void CompilationMemoryStatistic::print_all_by_size(outputStream* st, bool human_
|
||||
FREE_C_HEAP_ARRAY(Entry, filtered);
|
||||
}
|
||||
|
||||
const char* CompilationMemoryStatistic::failure_reason_memlimit() {
|
||||
static const char* const s = "hit memory limit while compiling";
|
||||
return s;
|
||||
}
|
||||
|
||||
CompilationMemoryStatisticMark::CompilationMemoryStatisticMark(const DirectiveSet* directive)
|
||||
: _active(directive->should_collect_memstat()) {
|
||||
if (_active) {
|
||||
CompilationMemoryStatistic::on_start_compilation();
|
||||
CompilationMemoryStatistic::on_start_compilation(directive);
|
||||
}
|
||||
}
|
||||
CompilationMemoryStatisticMark::~CompilationMemoryStatisticMark() {
|
||||
|
||||
@@ -47,6 +47,9 @@ class ArenaStatCounter : public CHeapObj<mtCompiler> {
|
||||
size_t _na;
|
||||
// Current bytes used for resource areas
|
||||
size_t _ra;
|
||||
// MemLimit handling
|
||||
size_t _limit;
|
||||
bool _hit_limit;
|
||||
|
||||
// Peak composition:
|
||||
// Size of node arena when total peaked (c2 only)
|
||||
@@ -69,15 +72,20 @@ public:
|
||||
size_t ra_at_peak() const { return _ra_at_peak; }
|
||||
unsigned live_nodes_at_peak() const { return _live_nodes_at_peak; }
|
||||
|
||||
// Mark the start of a compilation.
|
||||
void start();
|
||||
// Mark the start and end of a compilation.
|
||||
void start(size_t limit);
|
||||
void end();
|
||||
|
||||
// Account an arena allocation or de-allocation.
|
||||
// Returns true if new peak reached
|
||||
bool account(ssize_t delta, int tag);
|
||||
|
||||
void set_live_nodes_at_peak(unsigned i) { _live_nodes_at_peak = i; }
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
|
||||
size_t limit() const { return _limit; }
|
||||
bool hit_limit() const { return _hit_limit; }
|
||||
};
|
||||
|
||||
class CompilationMemoryStatistic : public AllStatic {
|
||||
@@ -86,10 +94,16 @@ public:
|
||||
static void initialize();
|
||||
// true if CollectMemStat or PrintMemStat has been enabled for any method
|
||||
static bool enabled() { return _enabled; }
|
||||
static void on_start_compilation();
|
||||
static void on_start_compilation(const DirectiveSet* directive);
|
||||
|
||||
// Called at end of compilation. Records the arena usage peak. Also takes over
|
||||
// status information from ciEnv (compilation failed, oom'ed or went okay). ciEnv::_failure_reason
|
||||
// must be set at this point (so place CompilationMemoryStatisticMark correctly).
|
||||
static void on_end_compilation();
|
||||
static void on_arena_change(ssize_t diff, const Arena* arena);
|
||||
static void print_all_by_size(outputStream* st, bool human_readable, size_t minsize);
|
||||
// For compilers
|
||||
static const char* failure_reason_memlimit();
|
||||
};
|
||||
|
||||
// RAII object to wrap one compilation
|
||||
|
||||
@@ -142,7 +142,7 @@ void CompileTask::initialize(int compile_id,
|
||||
/**
|
||||
* Returns the compiler for this task.
|
||||
*/
|
||||
AbstractCompiler* CompileTask::compiler() {
|
||||
AbstractCompiler* CompileTask::compiler() const {
|
||||
return CompileBroker::compiler(_comp_level);
|
||||
}
|
||||
|
||||
|
||||
@@ -180,7 +180,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
int comp_level() { return _comp_level;}
|
||||
void set_comp_level(int comp_level) { _comp_level = comp_level;}
|
||||
|
||||
AbstractCompiler* compiler();
|
||||
AbstractCompiler* compiler() const;
|
||||
CompileTask* select_for_compilation();
|
||||
|
||||
int num_inlined_bytecodes() const { return _num_inlined_bytecodes; }
|
||||
|
||||
@@ -203,13 +203,24 @@ bool DirectiveSet::is_c2(CompilerDirectives* directive) const {
|
||||
}
|
||||
|
||||
bool DirectiveSet::should_collect_memstat() const {
|
||||
return MemStatOption > 0;
|
||||
// MemLimit requires the memory statistic to be active
|
||||
return MemStatOption > 0 || MemLimitOption != 0;
|
||||
}
|
||||
|
||||
bool DirectiveSet::should_print_memstat() const {
|
||||
return MemStatOption == (uintx)MemStatAction::print;
|
||||
}
|
||||
|
||||
size_t DirectiveSet::mem_limit() const {
|
||||
return MemLimitOption < 0 ? -MemLimitOption : MemLimitOption;
|
||||
}
|
||||
|
||||
bool DirectiveSet::should_crash_at_mem_limit() const {
|
||||
// The sign encodes the action to be taken when reaching
|
||||
// the memory limit (+ stop - crash)
|
||||
return MemLimitOption < 0;
|
||||
}
|
||||
|
||||
// In the list of Control/disabled intrinsics, the ID of the control intrinsics can separated:
|
||||
// - by ',' (if -XX:Control/DisableIntrinsic is used once when invoking the VM) or
|
||||
// - by '\n' (if -XX:Control/DisableIntrinsic is used multiple times when invoking the VM) or
|
||||
|
||||
@@ -41,6 +41,7 @@
|
||||
cflags(BreakAtExecute, bool, false, BreakAtExecute) \
|
||||
cflags(BreakAtCompile, bool, false, BreakAtCompile) \
|
||||
cflags(Log, bool, LogCompilation, Unknown) \
|
||||
cflags(MemLimit, intx, 0, MemLimit) \
|
||||
cflags(MemStat, uintx, 0, MemStat) \
|
||||
cflags(PrintAssembly, bool, PrintAssembly, PrintAssembly) \
|
||||
cflags(PrintCompilation, bool, PrintCompilation, PrintCompilation) \
|
||||
@@ -150,6 +151,8 @@ public:
|
||||
bool is_c2(CompilerDirectives* directive) const;
|
||||
bool should_collect_memstat() const;
|
||||
bool should_print_memstat() const;
|
||||
size_t mem_limit() const;
|
||||
bool should_crash_at_mem_limit() const; // true: crash false: stop compilation
|
||||
|
||||
typedef enum {
|
||||
#define enum_of_flags(name, type, dvalue, cc_flag) name##Index,
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/jniHandles.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/parseInteger.hpp"
|
||||
|
||||
static const char* optiontype_names[] = {
|
||||
#define enum_of_types(type, name) name,
|
||||
@@ -459,7 +460,7 @@ bool CompilerOracle::should_print_methods() {
|
||||
|
||||
// Tells whether there are any methods to collect memory statistics for
|
||||
bool CompilerOracle::should_collect_memstat() {
|
||||
return has_command(CompileCommand::MemStat);
|
||||
return has_command(CompileCommand::MemStat) || has_command(CompileCommand::MemLimit);
|
||||
}
|
||||
|
||||
bool CompilerOracle::should_print_final_memstat_report() {
|
||||
@@ -634,6 +635,44 @@ void skip_comma(char* &line) {
|
||||
}
|
||||
}
|
||||
|
||||
static bool parseMemLimit(const char* line, intx& value, int& bytes_read, char* errorbuf, const int buf_size) {
|
||||
// Format:
|
||||
// "<memory size>['~' <suboption>]"
|
||||
// <memory size> can have units, e.g. M
|
||||
// <suboption> one of "crash" "stop", if omitted, "stop" is implied.
|
||||
//
|
||||
// Examples:
|
||||
// -XX:CompileCommand='memlimit,*.*,20m'
|
||||
// -XX:CompileCommand='memlimit,*.*,20m~stop'
|
||||
// -XX:CompileCommand='memlimit,Option::toString,1m~crash'
|
||||
//
|
||||
// The resulting intx carries the size and whether we are to stop or crash:
|
||||
// - neg. value means crash
|
||||
// - pos. value (default) means stop
|
||||
size_t s = 0;
|
||||
char* end;
|
||||
if (!parse_integer<size_t>(line, &end, &s)) {
|
||||
jio_snprintf(errorbuf, buf_size, "MemLimit: invalid value");
|
||||
}
|
||||
bytes_read = (int)(end - line);
|
||||
|
||||
intx v = (intx)s;
|
||||
if ((*end) != '\0') {
|
||||
if (strncasecmp(end, "~crash", 6) == 0) {
|
||||
v = -v;
|
||||
bytes_read += 6;
|
||||
} else if (strncasecmp(end, "~stop", 5) == 0) {
|
||||
// ok, this is the default
|
||||
bytes_read += 5;
|
||||
} else {
|
||||
jio_snprintf(errorbuf, buf_size, "MemLimit: invalid option");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
value = v;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool parseEnumValueAsUintx(enum CompileCommand option, const char* line, uintx& value, int& bytes_read, char* errorbuf, const int buf_size) {
|
||||
if (option == CompileCommand::MemStat) {
|
||||
if (strncasecmp(line, "collect", 7) == 0) {
|
||||
@@ -659,7 +698,13 @@ static void scan_value(enum OptionType type, char* line, int& total_bytes_read,
|
||||
total_bytes_read += skipped;
|
||||
if (type == OptionType::Intx) {
|
||||
intx value;
|
||||
if (sscanf(line, "" INTX_FORMAT "%n", &value, &bytes_read) == 1) {
|
||||
// Special handling for memlimit
|
||||
bool success = (option == CompileCommand::MemLimit) && parseMemLimit(line, value, bytes_read, errorbuf, buf_size);
|
||||
if (!success) {
|
||||
// Is it a raw number?
|
||||
success = sscanf(line, "" INTX_FORMAT "%n", &value, &bytes_read) == 1;
|
||||
}
|
||||
if (success) {
|
||||
total_bytes_read += bytes_read;
|
||||
line += bytes_read;
|
||||
register_command(matcher, option, value);
|
||||
|
||||
@@ -57,6 +57,7 @@ class methodHandle;
|
||||
option(Break, "break", Bool) \
|
||||
option(BreakAtExecute, "BreakAtExecute", Bool) \
|
||||
option(BreakAtCompile, "BreakAtCompile", Bool) \
|
||||
option(MemLimit, "MemLimit", Intx) \
|
||||
option(MemStat, "MemStat", Uintx) \
|
||||
option(PrintAssembly, "PrintAssembly", Bool) \
|
||||
option(PrintCompilation, "PrintCompilation", Bool) \
|
||||
|
||||
@@ -39,6 +39,7 @@ CompilerThread::CompilerThread(CompileQueue* queue,
|
||||
_queue = queue;
|
||||
_counters = counters;
|
||||
_buffer_blob = nullptr;
|
||||
_can_call_java = false;
|
||||
_compiler = nullptr;
|
||||
_arena_stat = CompilationMemoryStatistic::enabled() ? new ArenaStatCounter : nullptr;
|
||||
|
||||
@@ -56,15 +57,17 @@ CompilerThread::~CompilerThread() {
|
||||
delete _arena_stat;
|
||||
}
|
||||
|
||||
void CompilerThread::set_compiler(AbstractCompiler* c) {
|
||||
// Only jvmci compiler threads can call Java
|
||||
_can_call_java = c != nullptr && c->is_jvmci();
|
||||
_compiler = c;
|
||||
}
|
||||
|
||||
void CompilerThread::thread_entry(JavaThread* thread, TRAPS) {
|
||||
assert(thread->is_Compiler_thread(), "must be compiler thread");
|
||||
CompileBroker::compiler_thread_loop();
|
||||
}
|
||||
|
||||
bool CompilerThread::can_call_java() const {
|
||||
return _compiler != nullptr && _compiler->is_jvmci();
|
||||
}
|
||||
|
||||
// Hide native compiler threads from external view.
|
||||
bool CompilerThread::is_hidden_from_external_view() const {
|
||||
return _compiler == nullptr || _compiler->is_hidden_from_external_view();
|
||||
|
||||
@@ -31,18 +31,17 @@ class AbstractCompiler;
|
||||
class ArenaStatCounter;
|
||||
class BufferBlob;
|
||||
class ciEnv;
|
||||
class CompileThread;
|
||||
class CompilerThread;
|
||||
class CompileLog;
|
||||
class CompileTask;
|
||||
class CompileQueue;
|
||||
class CompilerCounters;
|
||||
class IdealGraphPrinter;
|
||||
class JVMCIEnv;
|
||||
class JVMCIPrimitiveArray;
|
||||
|
||||
// A thread used for Compilation.
|
||||
class CompilerThread : public JavaThread {
|
||||
friend class VMStructs;
|
||||
JVMCI_ONLY(friend class CompilerThreadCanCallJava;)
|
||||
private:
|
||||
CompilerCounters* _counters;
|
||||
|
||||
@@ -51,6 +50,7 @@ class CompilerThread : public JavaThread {
|
||||
CompileTask* volatile _task; // print_threads_compiling can read this concurrently.
|
||||
CompileQueue* _queue;
|
||||
BufferBlob* _buffer_blob;
|
||||
bool _can_call_java;
|
||||
|
||||
AbstractCompiler* _compiler;
|
||||
TimeStamp _idle_time;
|
||||
@@ -73,13 +73,13 @@ class CompilerThread : public JavaThread {
|
||||
|
||||
bool is_Compiler_thread() const { return true; }
|
||||
|
||||
virtual bool can_call_java() const;
|
||||
virtual bool can_call_java() const { return _can_call_java; }
|
||||
|
||||
// Returns true if this CompilerThread is hidden from JVMTI and FlightRecorder. C1 and C2 are
|
||||
// always hidden but JVMCI compiler threads might be hidden.
|
||||
virtual bool is_hidden_from_external_view() const;
|
||||
|
||||
void set_compiler(AbstractCompiler* c) { _compiler = c; }
|
||||
void set_compiler(AbstractCompiler* c);
|
||||
AbstractCompiler* compiler() const { return _compiler; }
|
||||
|
||||
CompileQueue* queue() const { return _queue; }
|
||||
|
||||
@@ -257,7 +257,7 @@ public:
|
||||
HR_FORMAT_PARAMS(r),
|
||||
p2i(r->top_at_mark_start()),
|
||||
p2i(r->parsable_bottom()),
|
||||
r->has_surv_rate_group() ? r->age_in_surv_rate_group() : -1);
|
||||
r->has_surv_rate_group() ? checked_cast<int>(r->age_in_surv_rate_group()) : -1);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -2162,7 +2162,6 @@ void G1CMTask::reset(G1CMBitMap* mark_bitmap) {
|
||||
_calls = 0;
|
||||
_elapsed_time_ms = 0.0;
|
||||
_termination_time_ms = 0.0;
|
||||
_termination_start_time_ms = 0.0;
|
||||
|
||||
_mark_stats_cache.reset();
|
||||
}
|
||||
@@ -2761,16 +2760,14 @@ void G1CMTask::do_marking_step(double time_target_ms,
|
||||
// Separated the asserts so that we know which one fires.
|
||||
assert(_cm->out_of_regions(), "only way to reach here");
|
||||
assert(_task_queue->size() == 0, "only way to reach here");
|
||||
_termination_start_time_ms = os::elapsedVTime() * 1000.0;
|
||||
double termination_start_time_ms = os::elapsedVTime() * 1000.0;
|
||||
|
||||
// The G1CMTask class also extends the TerminatorTerminator class,
|
||||
// hence its should_exit_termination() method will also decide
|
||||
// whether to exit the termination protocol or not.
|
||||
bool finished = (is_serial ||
|
||||
_cm->terminator()->offer_termination(this));
|
||||
double termination_end_time_ms = os::elapsedVTime() * 1000.0;
|
||||
_termination_time_ms +=
|
||||
termination_end_time_ms - _termination_start_time_ms;
|
||||
_termination_time_ms += (os::elapsedTime() * 1000.0 - termination_start_time_ms);
|
||||
|
||||
if (finished) {
|
||||
// We're all done.
|
||||
@@ -2888,7 +2885,6 @@ G1CMTask::G1CMTask(uint worker_id,
|
||||
_step_times_ms(),
|
||||
_elapsed_time_ms(0.0),
|
||||
_termination_time_ms(0.0),
|
||||
_termination_start_time_ms(0.0),
|
||||
_marking_step_diff_ms()
|
||||
{
|
||||
guarantee(task_queue != nullptr, "invariant");
|
||||
|
||||
@@ -702,8 +702,6 @@ private:
|
||||
double _elapsed_time_ms;
|
||||
// Termination time of this task
|
||||
double _termination_time_ms;
|
||||
// When this task got into the termination protocol
|
||||
double _termination_start_time_ms;
|
||||
|
||||
TruncatedSeq _marking_step_diff_ms;
|
||||
|
||||
|
||||
@@ -1,218 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
|
||||
class PhaseTimesStat {
|
||||
static constexpr G1GCPhaseTimes::GCParPhases phase_name =
|
||||
G1GCPhaseTimes::RemoveSelfForwards;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
uint _worker_id;
|
||||
Ticks _start;
|
||||
|
||||
public:
|
||||
PhaseTimesStat(G1GCPhaseTimes* phase_times, uint worker_id) :
|
||||
_phase_times(phase_times),
|
||||
_worker_id(worker_id),
|
||||
_start(Ticks::now()) { }
|
||||
|
||||
~PhaseTimesStat() {
|
||||
_phase_times->record_or_add_time_secs(phase_name,
|
||||
_worker_id,
|
||||
(Ticks::now() - _start).seconds());
|
||||
}
|
||||
|
||||
void register_empty_chunk() {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RemoveSelfForwardEmptyChunksNum);
|
||||
}
|
||||
|
||||
void register_nonempty_chunk() {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RemoveSelfForwardChunksNum);
|
||||
}
|
||||
|
||||
void register_objects_count_and_size(size_t num_marked_obj, size_t marked_words) {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
num_marked_obj,
|
||||
G1GCPhaseTimes::RemoveSelfForwardObjectsNum);
|
||||
|
||||
size_t marked_bytes = marked_words * HeapWordSize;
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
marked_bytes,
|
||||
G1GCPhaseTimes::RemoveSelfForwardObjectsBytes);
|
||||
}
|
||||
};
|
||||
|
||||
// Fill the memory area from start to end with filler objects, and update the BOT
|
||||
// accordingly. Since we clear and use the bitmap for marking objects that failed
|
||||
// evacuation, there is no other work to be done there.
|
||||
static size_t zap_dead_objects(HeapRegion* hr, HeapWord* start, HeapWord* end) {
|
||||
assert(start <= end, "precondition");
|
||||
if (start == end) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
hr->fill_range_with_dead_objects(start, end);
|
||||
return pointer_delta(end, start);
|
||||
}
|
||||
|
||||
static void update_garbage_words_in_hr(HeapRegion* hr, size_t garbage_words) {
|
||||
if (garbage_words != 0) {
|
||||
hr->note_self_forward_chunk_done(garbage_words * HeapWordSize);
|
||||
}
|
||||
}
|
||||
|
||||
static void prefetch_obj(HeapWord* obj_addr) {
|
||||
Prefetch::write(obj_addr, PrefetchScanIntervalInBytes);
|
||||
}
|
||||
|
||||
void G1RemoveSelfForwardsTask::process_chunk(uint worker_id,
|
||||
uint chunk_idx) {
|
||||
PhaseTimesStat stat(_g1h->phase_times(), worker_id);
|
||||
|
||||
G1CMBitMap* bitmap = _cm->mark_bitmap();
|
||||
const uint region_idx = _evac_failure_regions->get_region_idx(chunk_idx / _num_chunks_per_region);
|
||||
HeapRegion* hr = _g1h->region_at(region_idx);
|
||||
|
||||
HeapWord* hr_bottom = hr->bottom();
|
||||
HeapWord* hr_top = hr->top();
|
||||
HeapWord* chunk_start = hr_bottom + (chunk_idx % _num_chunks_per_region) * _chunk_size;
|
||||
|
||||
assert(chunk_start < hr->end(), "inv");
|
||||
if (chunk_start >= hr_top) {
|
||||
return;
|
||||
}
|
||||
|
||||
HeapWord* chunk_end = MIN2(chunk_start + _chunk_size, hr_top);
|
||||
HeapWord* first_marked_addr = bitmap->get_next_marked_addr(chunk_start, hr_top);
|
||||
|
||||
size_t garbage_words = 0;
|
||||
|
||||
if (chunk_start == hr_bottom) {
|
||||
// This is the bottom-most chunk in this region; zap [bottom, first_marked_addr).
|
||||
garbage_words += zap_dead_objects(hr, hr_bottom, first_marked_addr);
|
||||
}
|
||||
|
||||
if (first_marked_addr >= chunk_end) {
|
||||
stat.register_empty_chunk();
|
||||
update_garbage_words_in_hr(hr, garbage_words);
|
||||
return;
|
||||
}
|
||||
|
||||
stat.register_nonempty_chunk();
|
||||
|
||||
size_t num_marked_objs = 0;
|
||||
size_t marked_words = 0;
|
||||
|
||||
HeapWord* obj_addr = first_marked_addr;
|
||||
assert(chunk_start <= obj_addr && obj_addr < chunk_end,
|
||||
"object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
|
||||
p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
|
||||
do {
|
||||
assert(bitmap->is_marked(obj_addr), "inv");
|
||||
prefetch_obj(obj_addr);
|
||||
|
||||
oop obj = cast_to_oop(obj_addr);
|
||||
const size_t obj_size = obj->size();
|
||||
HeapWord* const obj_end_addr = obj_addr + obj_size;
|
||||
|
||||
{
|
||||
// Process marked object.
|
||||
assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
|
||||
obj->init_mark();
|
||||
hr->update_bot_for_block(obj_addr, obj_end_addr);
|
||||
|
||||
// Statistics
|
||||
num_marked_objs++;
|
||||
marked_words += obj_size;
|
||||
}
|
||||
|
||||
assert(obj_end_addr <= hr_top, "inv");
|
||||
// Use hr_top as the limit so that we zap dead ranges up to the next
|
||||
// marked obj or hr_top.
|
||||
HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
|
||||
garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
|
||||
obj_addr = next_marked_obj_addr;
|
||||
} while (obj_addr < chunk_end);
|
||||
|
||||
assert(marked_words > 0 && num_marked_objs > 0, "inv");
|
||||
|
||||
stat.register_objects_count_and_size(num_marked_objs, marked_words);
|
||||
|
||||
update_garbage_words_in_hr(hr, garbage_words);
|
||||
}
|
||||
|
||||
G1RemoveSelfForwardsTask::G1RemoveSelfForwardsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
WorkerTask("G1 Remove Self-forwarding Pointers"),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(_g1h->concurrent_mark()),
|
||||
_evac_failure_regions(evac_failure_regions),
|
||||
_chunk_bitmap(mtGC) {
|
||||
|
||||
_num_evac_fail_regions = _evac_failure_regions->num_regions_failed_evacuation();
|
||||
_num_chunks_per_region = G1CollectedHeap::get_chunks_per_region();
|
||||
|
||||
_chunk_size = static_cast<uint>(HeapRegion::GrainWords / _num_chunks_per_region);
|
||||
|
||||
log_debug(gc, ergo)("Initializing removing self forwards with %u chunks per region",
|
||||
_num_chunks_per_region);
|
||||
|
||||
_chunk_bitmap.resize(_num_chunks_per_region * _num_evac_fail_regions);
|
||||
}
|
||||
|
||||
void G1RemoveSelfForwardsTask::work(uint worker_id) {
|
||||
const uint total_workers = G1CollectedHeap::heap()->workers()->active_workers();
|
||||
const uint total_chunks = _num_chunks_per_region * _num_evac_fail_regions;
|
||||
const uint start_chunk_idx = worker_id * total_chunks / total_workers;
|
||||
|
||||
for (uint i = 0; i < total_chunks; i++) {
|
||||
const uint chunk_idx = (start_chunk_idx + i) % total_chunks;
|
||||
if (claim_chunk(chunk_idx)) {
|
||||
process_chunk(worker_id, chunk_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_G1_G1EVACFAILURE_HPP
|
||||
#define SHARE_GC_G1_G1EVACFAILURE_HPP
|
||||
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1ConcurrentMark;
|
||||
class G1EvacFailureRegions;
|
||||
|
||||
// Task to fixup self-forwarding pointers within the objects installed as a result
|
||||
// of an evacuation failure.
|
||||
class G1RemoveSelfForwardsTask : public WorkerTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
CHeapBitMap _chunk_bitmap;
|
||||
|
||||
uint _num_chunks_per_region;
|
||||
uint _num_evac_fail_regions;
|
||||
size_t _chunk_size;
|
||||
|
||||
bool claim_chunk(uint chunk_idx) {
|
||||
return _chunk_bitmap.par_set_bit(chunk_idx);
|
||||
}
|
||||
|
||||
void process_chunk(uint worker_id, uint chunk_idx);
|
||||
|
||||
public:
|
||||
explicit G1RemoveSelfForwardsTask(G1EvacFailureRegions* evac_failure_regions);
|
||||
|
||||
void work(uint worker_id);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1EVACFAILURE_HPP
|
||||
@@ -32,10 +32,10 @@
|
||||
|
||||
G1SurvRateGroup::G1SurvRateGroup() :
|
||||
_stats_arrays_length(0),
|
||||
_num_added_regions(0),
|
||||
_accum_surv_rate_pred(nullptr),
|
||||
_last_pred(0.0),
|
||||
_surv_rate_predictors(nullptr),
|
||||
_num_added_regions(0) {
|
||||
_surv_rate_predictors(nullptr) {
|
||||
reset();
|
||||
start_adding_regions();
|
||||
}
|
||||
@@ -82,12 +82,11 @@ void G1SurvRateGroup::stop_adding_regions() {
|
||||
}
|
||||
}
|
||||
|
||||
void G1SurvRateGroup::record_surviving_words(int age_in_group, size_t surv_words) {
|
||||
guarantee(0 <= age_in_group && (size_t)age_in_group < _num_added_regions,
|
||||
"age_in_group is %d not between 0 and " SIZE_FORMAT, age_in_group, _num_added_regions);
|
||||
void G1SurvRateGroup::record_surviving_words(uint age, size_t surv_words) {
|
||||
assert(is_valid_age(age), "age is %u not between 0 and %u", age, _num_added_regions);
|
||||
|
||||
double surv_rate = (double)surv_words / HeapRegion::GrainWords;
|
||||
_surv_rate_predictors[age_in_group]->add(surv_rate);
|
||||
_surv_rate_predictors[age]->add(surv_rate);
|
||||
}
|
||||
|
||||
void G1SurvRateGroup::all_surviving_words_recorded(const G1Predictions& predictor, bool update_predictors) {
|
||||
|
||||
@@ -49,31 +49,33 @@
|
||||
// predictors in reverse chronological order as returned by age_in_group(). I.e.
|
||||
// index 0 contains the rate information for the region retired most recently.
|
||||
class G1SurvRateGroup : public CHeapObj<mtGC> {
|
||||
size_t _stats_arrays_length;
|
||||
uint _stats_arrays_length;
|
||||
uint _num_added_regions; // The number of regions in this survivor rate group.
|
||||
|
||||
double* _accum_surv_rate_pred;
|
||||
double _last_pred;
|
||||
TruncatedSeq** _surv_rate_predictors;
|
||||
|
||||
size_t _num_added_regions; // The number of regions in this survivor rate group.
|
||||
|
||||
void fill_in_last_surv_rates();
|
||||
void finalize_predictions(const G1Predictions& predictor);
|
||||
|
||||
public:
|
||||
static const int InvalidAgeIndex = -1;
|
||||
static bool is_valid_age_index(int age) { return age >= 0; }
|
||||
static const uint InvalidAgeIndex = UINT_MAX;
|
||||
bool is_valid_age_index(uint age_index) const {
|
||||
return age_index >= 1 && age_index <= _num_added_regions;
|
||||
}
|
||||
bool is_valid_age(uint age) const { return age < _num_added_regions; }
|
||||
|
||||
G1SurvRateGroup();
|
||||
void reset();
|
||||
void start_adding_regions();
|
||||
void stop_adding_regions();
|
||||
void record_surviving_words(int age_in_group, size_t surv_words);
|
||||
void record_surviving_words(uint age, size_t surv_words);
|
||||
void all_surviving_words_recorded(const G1Predictions& predictor, bool update_predictors);
|
||||
|
||||
double accum_surv_rate_pred(int age) const {
|
||||
double accum_surv_rate_pred(uint age) const {
|
||||
assert(_stats_arrays_length > 0, "invariant" );
|
||||
assert(is_valid_age_index(age), "must be");
|
||||
if ((size_t)age < _stats_arrays_length)
|
||||
if (age < _stats_arrays_length)
|
||||
return _accum_surv_rate_pred[age];
|
||||
else {
|
||||
double diff = (double)(age - _stats_arrays_length + 1);
|
||||
@@ -81,22 +83,22 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
double surv_rate_pred(G1Predictions const& predictor, int age) const {
|
||||
assert(is_valid_age_index(age), "must be");
|
||||
double surv_rate_pred(G1Predictions const& predictor, uint age) const {
|
||||
assert(is_valid_age(age), "must be");
|
||||
|
||||
age = MIN2(age, (int)_stats_arrays_length - 1);
|
||||
// _stats_arrays_length might not be in sync with _num_added_regions in Cleanup pause.
|
||||
age = MIN2(age, _stats_arrays_length - 1);
|
||||
|
||||
return predictor.predict_in_unit_interval(_surv_rate_predictors[age]);
|
||||
}
|
||||
|
||||
int next_age_index() {
|
||||
return (int)++_num_added_regions;
|
||||
uint next_age_index() {
|
||||
return ++_num_added_regions;
|
||||
}
|
||||
|
||||
int age_in_group(int age_index) const {
|
||||
int result = (int)(_num_added_regions - age_index);
|
||||
assert(is_valid_age_index(result), "invariant" );
|
||||
return result;
|
||||
uint age_in_group(uint age_index) const {
|
||||
assert(is_valid_age_index(age_index), "invariant" );
|
||||
return _num_added_regions - age_index;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -29,16 +29,26 @@
|
||||
#include "gc/g1/g1CardTableEntryClosure.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1CollectionSetCandidates.inline.hpp"
|
||||
#include "gc/g1/g1CollectorState.hpp"
|
||||
#include "gc/g1/g1ConcurrentMark.inline.hpp"
|
||||
#include "gc/g1/g1EvacStats.inline.hpp"
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "gc/g1/g1EvacInfo.hpp"
|
||||
#include "gc/g1/g1EvacStats.inline.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "gc/g1/g1YoungGCPostEvacuateTasks.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.inline.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
class G1PostEvacuateCollectionSetCleanupTask1::MergePssTask : public G1AbstractSubTask {
|
||||
@@ -93,14 +103,183 @@ public:
|
||||
};
|
||||
|
||||
class G1PostEvacuateCollectionSetCleanupTask1::RestoreRetainedRegionsTask : public G1AbstractSubTask {
|
||||
G1RemoveSelfForwardsTask _task;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
CHeapBitMap _chunk_bitmap;
|
||||
|
||||
uint _num_chunks_per_region;
|
||||
uint _num_evac_fail_regions;
|
||||
size_t _chunk_size;
|
||||
|
||||
class PhaseTimesStat {
|
||||
static constexpr G1GCPhaseTimes::GCParPhases phase_name =
|
||||
G1GCPhaseTimes::RemoveSelfForwards;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
uint _worker_id;
|
||||
Ticks _start;
|
||||
|
||||
public:
|
||||
PhaseTimesStat(G1GCPhaseTimes* phase_times, uint worker_id) :
|
||||
_phase_times(phase_times),
|
||||
_worker_id(worker_id),
|
||||
_start(Ticks::now()) { }
|
||||
|
||||
~PhaseTimesStat() {
|
||||
_phase_times->record_or_add_time_secs(phase_name,
|
||||
_worker_id,
|
||||
(Ticks::now() - _start).seconds());
|
||||
}
|
||||
|
||||
void register_empty_chunk() {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RemoveSelfForwardEmptyChunksNum);
|
||||
}
|
||||
|
||||
void register_nonempty_chunk() {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RemoveSelfForwardChunksNum);
|
||||
}
|
||||
|
||||
void register_objects_count_and_size(size_t num_marked_obj, size_t marked_words) {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
num_marked_obj,
|
||||
G1GCPhaseTimes::RemoveSelfForwardObjectsNum);
|
||||
|
||||
size_t marked_bytes = marked_words * HeapWordSize;
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
marked_bytes,
|
||||
G1GCPhaseTimes::RemoveSelfForwardObjectsBytes);
|
||||
}
|
||||
};
|
||||
|
||||
// Fill the memory area from start to end with filler objects, and update the BOT
|
||||
// accordingly. Since we clear and use the bitmap for marking objects that failed
|
||||
// evacuation, there is no other work to be done there.
|
||||
static size_t zap_dead_objects(HeapRegion* hr, HeapWord* start, HeapWord* end) {
|
||||
assert(start <= end, "precondition");
|
||||
if (start == end) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
hr->fill_range_with_dead_objects(start, end);
|
||||
return pointer_delta(end, start);
|
||||
}
|
||||
|
||||
static void update_garbage_words_in_hr(HeapRegion* hr, size_t garbage_words) {
|
||||
if (garbage_words != 0) {
|
||||
hr->note_self_forward_chunk_done(garbage_words * HeapWordSize);
|
||||
}
|
||||
}
|
||||
|
||||
static void prefetch_obj(HeapWord* obj_addr) {
|
||||
Prefetch::write(obj_addr, PrefetchScanIntervalInBytes);
|
||||
}
|
||||
|
||||
bool claim_chunk(uint chunk_idx) {
|
||||
return _chunk_bitmap.par_set_bit(chunk_idx);
|
||||
}
|
||||
|
||||
void process_chunk(uint worker_id, uint chunk_idx) {
|
||||
PhaseTimesStat stat(_g1h->phase_times(), worker_id);
|
||||
|
||||
G1CMBitMap* bitmap = _cm->mark_bitmap();
|
||||
const uint region_idx = _evac_failure_regions->get_region_idx(chunk_idx / _num_chunks_per_region);
|
||||
HeapRegion* hr = _g1h->region_at(region_idx);
|
||||
|
||||
HeapWord* hr_bottom = hr->bottom();
|
||||
HeapWord* hr_top = hr->top();
|
||||
HeapWord* chunk_start = hr_bottom + (chunk_idx % _num_chunks_per_region) * _chunk_size;
|
||||
|
||||
assert(chunk_start < hr->end(), "inv");
|
||||
if (chunk_start >= hr_top) {
|
||||
return;
|
||||
}
|
||||
|
||||
HeapWord* chunk_end = MIN2(chunk_start + _chunk_size, hr_top);
|
||||
HeapWord* first_marked_addr = bitmap->get_next_marked_addr(chunk_start, hr_top);
|
||||
|
||||
size_t garbage_words = 0;
|
||||
|
||||
if (chunk_start == hr_bottom) {
|
||||
// This is the bottom-most chunk in this region; zap [bottom, first_marked_addr).
|
||||
garbage_words += zap_dead_objects(hr, hr_bottom, first_marked_addr);
|
||||
}
|
||||
|
||||
if (first_marked_addr >= chunk_end) {
|
||||
stat.register_empty_chunk();
|
||||
update_garbage_words_in_hr(hr, garbage_words);
|
||||
return;
|
||||
}
|
||||
|
||||
stat.register_nonempty_chunk();
|
||||
|
||||
size_t num_marked_objs = 0;
|
||||
size_t marked_words = 0;
|
||||
|
||||
HeapWord* obj_addr = first_marked_addr;
|
||||
assert(chunk_start <= obj_addr && obj_addr < chunk_end,
|
||||
"object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
|
||||
p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
|
||||
do {
|
||||
assert(bitmap->is_marked(obj_addr), "inv");
|
||||
prefetch_obj(obj_addr);
|
||||
|
||||
oop obj = cast_to_oop(obj_addr);
|
||||
const size_t obj_size = obj->size();
|
||||
HeapWord* const obj_end_addr = obj_addr + obj_size;
|
||||
|
||||
{
|
||||
// Process marked object.
|
||||
assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
|
||||
obj->init_mark();
|
||||
hr->update_bot_for_block(obj_addr, obj_end_addr);
|
||||
|
||||
// Statistics
|
||||
num_marked_objs++;
|
||||
marked_words += obj_size;
|
||||
}
|
||||
|
||||
assert(obj_end_addr <= hr_top, "inv");
|
||||
// Use hr_top as the limit so that we zap dead ranges up to the next
|
||||
// marked obj or hr_top.
|
||||
HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
|
||||
garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
|
||||
obj_addr = next_marked_obj_addr;
|
||||
} while (obj_addr < chunk_end);
|
||||
|
||||
assert(marked_words > 0 && num_marked_objs > 0, "inv");
|
||||
|
||||
stat.register_objects_count_and_size(num_marked_objs, marked_words);
|
||||
|
||||
update_garbage_words_in_hr(hr, garbage_words);
|
||||
}
|
||||
|
||||
public:
|
||||
RestoreRetainedRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1AbstractSubTask(G1GCPhaseTimes::RestoreRetainedRegions),
|
||||
_task(evac_failure_regions),
|
||||
_evac_failure_regions(evac_failure_regions) {
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(_g1h->concurrent_mark()),
|
||||
_evac_failure_regions(evac_failure_regions),
|
||||
_chunk_bitmap(mtGC) {
|
||||
|
||||
_num_evac_fail_regions = _evac_failure_regions->num_regions_failed_evacuation();
|
||||
_num_chunks_per_region = G1CollectedHeap::get_chunks_per_region();
|
||||
|
||||
_chunk_size = static_cast<uint>(HeapRegion::GrainWords / _num_chunks_per_region);
|
||||
|
||||
log_debug(gc, ergo)("Initializing removing self forwards with %u chunks per region",
|
||||
_num_chunks_per_region);
|
||||
|
||||
_chunk_bitmap.resize(_num_chunks_per_region * _num_evac_fail_regions);
|
||||
}
|
||||
|
||||
double worker_cost() const override {
|
||||
@@ -111,7 +290,16 @@ public:
|
||||
}
|
||||
|
||||
void do_work(uint worker_id) override {
|
||||
_task.work(worker_id);
|
||||
const uint total_workers = G1CollectedHeap::heap()->workers()->active_workers();
|
||||
const uint total_chunks = _num_chunks_per_region * _num_evac_fail_regions;
|
||||
const uint start_chunk_idx = worker_id * total_chunks / total_workers;
|
||||
|
||||
for (uint i = 0; i < total_chunks; i++) {
|
||||
const uint chunk_idx = (start_chunk_idx + i) % total_chunks;
|
||||
if (claim_chunk(chunk_idx)) {
|
||||
process_chunk(worker_id, chunk_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#define SHARE_GC_G1_G1YOUNGGCPOSTEVACUATETASKS_HPP
|
||||
|
||||
#include "gc/g1/g1BatchedTask.hpp"
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
|
||||
class FreeCSetStats;
|
||||
|
||||
|
||||
@@ -244,7 +244,7 @@ private:
|
||||
// Data for young region survivor prediction.
|
||||
uint _young_index_in_cset;
|
||||
G1SurvRateGroup* _surv_rate_group;
|
||||
int _age_index;
|
||||
uint _age_index;
|
||||
|
||||
// NUMA node.
|
||||
uint _node_index;
|
||||
@@ -507,7 +507,7 @@ public:
|
||||
_young_index_in_cset = index;
|
||||
}
|
||||
|
||||
int age_in_surv_rate_group() const;
|
||||
uint age_in_surv_rate_group() const;
|
||||
bool has_valid_age_in_surv_rate() const;
|
||||
|
||||
bool has_surv_rate_group() const;
|
||||
|
||||
@@ -501,14 +501,14 @@ HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
|
||||
return oops_on_memregion_iterate<Closure, in_gc_pause>(mr, cl);
|
||||
}
|
||||
|
||||
inline int HeapRegion::age_in_surv_rate_group() const {
|
||||
inline uint HeapRegion::age_in_surv_rate_group() const {
|
||||
assert(has_surv_rate_group(), "pre-condition");
|
||||
assert(has_valid_age_in_surv_rate(), "pre-condition");
|
||||
return _surv_rate_group->age_in_group(_age_index);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::has_valid_age_in_surv_rate() const {
|
||||
return G1SurvRateGroup::is_valid_age_index(_age_index);
|
||||
return _surv_rate_group->is_valid_age_index(_age_index);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::has_surv_rate_group() const {
|
||||
@@ -537,15 +537,13 @@ inline void HeapRegion::uninstall_surv_rate_group() {
|
||||
_surv_rate_group = nullptr;
|
||||
_age_index = G1SurvRateGroup::InvalidAgeIndex;
|
||||
} else {
|
||||
assert(!has_valid_age_in_surv_rate(), "pre-condition");
|
||||
assert(_age_index == G1SurvRateGroup::InvalidAgeIndex, "inv");
|
||||
}
|
||||
}
|
||||
|
||||
inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
|
||||
assert(has_surv_rate_group(), "pre-condition");
|
||||
assert(has_valid_age_in_surv_rate(), "pre-condition");
|
||||
int age_in_group = age_in_surv_rate_group();
|
||||
_surv_rate_group->record_surviving_words(age_in_group, words_survived);
|
||||
uint age = age_in_surv_rate_group();
|
||||
_surv_rate_group->record_surviving_words(age, words_survived);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP
|
||||
|
||||
@@ -48,7 +48,7 @@ class CheckForUnmarkedOops : public BasicOopIterateClosure {
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (_young_gen->is_in_reserved(obj) &&
|
||||
!_card_table->addr_is_marked_imprecise(p)) {
|
||||
!_card_table->is_dirty_for_addr(p)) {
|
||||
// Don't overwrite the first missing card mark
|
||||
if (_unmarked_addr == nullptr) {
|
||||
_unmarked_addr = (HeapWord*)p;
|
||||
@@ -90,7 +90,7 @@ class CheckForUnmarkedObjects : public ObjectClosure {
|
||||
CheckForUnmarkedOops object_check(_young_gen, _card_table);
|
||||
obj->oop_iterate(&object_check);
|
||||
if (object_check.has_unmarked_oop()) {
|
||||
guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object");
|
||||
guarantee(_card_table->is_dirty_for_addr(obj), "Found unmarked young_gen object");
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -387,22 +387,9 @@ void PSCardTable::verify_all_young_refs_imprecise() {
|
||||
old_gen->object_iterate(&check);
|
||||
}
|
||||
|
||||
bool PSCardTable::addr_is_marked_imprecise(void *addr) {
|
||||
bool PSCardTable::is_dirty_for_addr(void *addr) {
|
||||
CardValue* p = byte_for(addr);
|
||||
CardValue val = *p;
|
||||
|
||||
if (card_is_dirty(val))
|
||||
return true;
|
||||
|
||||
if (card_is_newgen(val))
|
||||
return true;
|
||||
|
||||
if (card_is_clean(val))
|
||||
return false;
|
||||
|
||||
assert(false, "Found unhandled card mark type");
|
||||
|
||||
return false;
|
||||
return is_dirty(p);
|
||||
}
|
||||
|
||||
bool PSCardTable::is_in_young(const void* p) const {
|
||||
|
||||
@@ -63,11 +63,6 @@ class PSCardTable: public CardTable {
|
||||
HeapWord* const start,
|
||||
HeapWord* const end);
|
||||
|
||||
enum ExtendedCardValue {
|
||||
youngergen_card = CT_MR_BS_last_reserved + 1,
|
||||
verify_card = CT_MR_BS_last_reserved + 5
|
||||
};
|
||||
|
||||
void scan_obj_with_limit(PSPromotionManager* pm,
|
||||
oop obj,
|
||||
HeapWord* start,
|
||||
@@ -77,9 +72,6 @@ class PSCardTable: public CardTable {
|
||||
PSCardTable(MemRegion whole_heap) : CardTable(whole_heap),
|
||||
_preprocessing_active_workers(0) {}
|
||||
|
||||
static CardValue youngergen_card_val() { return youngergen_card; }
|
||||
static CardValue verify_card_val() { return verify_card; }
|
||||
|
||||
// Scavenge support
|
||||
void pre_scavenge(HeapWord* old_gen_bottom, uint active_workers);
|
||||
// Scavenge contents of stripes with the given index.
|
||||
@@ -90,31 +82,17 @@ class PSCardTable: public CardTable {
|
||||
uint stripe_index,
|
||||
uint n_stripes);
|
||||
|
||||
bool addr_is_marked_imprecise(void *addr);
|
||||
|
||||
void set_card_newgen(void* addr) { CardValue* p = byte_for(addr); *p = verify_card; }
|
||||
|
||||
// Testers for entries
|
||||
static bool card_is_dirty(int value) { return value == dirty_card; }
|
||||
static bool card_is_newgen(int value) { return value == youngergen_card; }
|
||||
static bool card_is_clean(int value) { return value == clean_card; }
|
||||
static bool card_is_verify(int value) { return value == verify_card; }
|
||||
bool is_dirty_for_addr(void *addr);
|
||||
|
||||
// Card marking
|
||||
void inline_write_ref_field_gc(void* field) {
|
||||
CardValue* byte = byte_for(field);
|
||||
*byte = youngergen_card;
|
||||
*byte = dirty_card_val();
|
||||
}
|
||||
|
||||
// ReduceInitialCardMarks support
|
||||
bool is_in_young(const void* p) const override;
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_valid_card_address(CardValue* addr) {
|
||||
return (addr >= _byte_map) && (addr < _byte_map + _byte_map_size);
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
// Verification
|
||||
void verify_all_young_refs_imprecise();
|
||||
};
|
||||
|
||||
@@ -234,14 +234,9 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
|
||||
// Drain overflow stack first, so other threads can steal from
|
||||
// claimed stack while we work.
|
||||
while (tq->pop_overflow(task)) {
|
||||
// In PSCardTable::scavenge_contents_parallel(), when work is distributed
|
||||
// among different workers, an object is never split between multiple workers.
|
||||
// Therefore, if a worker gets owned a large objArray, it may accumulate
|
||||
// many tasks (corresponding to every element in this array) in its
|
||||
// task queue. When there are too many overflow tasks, publishing them
|
||||
// (via try_push_to_taskqueue()) can incur noticeable overhead in Young GC
|
||||
// pause, so it is better to process them locally until large-objArray-splitting is implemented.
|
||||
process_popped_location_depth(task);
|
||||
if (!tq->try_push_to_taskqueue(task)) {
|
||||
process_popped_location_depth(task);
|
||||
}
|
||||
}
|
||||
|
||||
while (tq->pop_local(task, threshold)) {
|
||||
|
||||
@@ -133,11 +133,6 @@ class PSPromotionManager {
|
||||
return &_claimed_stack_depth;
|
||||
}
|
||||
|
||||
bool young_gen_is_full() { return _young_gen_is_full; }
|
||||
|
||||
bool old_gen_is_full() { return _old_gen_is_full; }
|
||||
void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
|
||||
|
||||
// Promotion methods
|
||||
template<bool promote_immediately> oop copy_to_survivor_space(oop o);
|
||||
oop oop_promotion_failed(oop obj, markWord obj_mark);
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include "gc/serial/serialBlockOffsetTable.inline.hpp"
|
||||
#include "gc/shared/blockOffsetTable.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "gc/shared/space.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
@@ -34,14 +33,9 @@
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetSharedArray
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
|
||||
size_t init_word_size):
|
||||
_reserved(reserved), _end(nullptr)
|
||||
{
|
||||
SerialBlockOffsetSharedArray::SerialBlockOffsetSharedArray(MemRegion reserved,
|
||||
size_t init_word_size):
|
||||
_reserved(reserved) {
|
||||
size_t size = compute_size(reserved.word_size());
|
||||
ReservedSpace rs(size);
|
||||
if (!rs.is_reserved()) {
|
||||
@@ -53,27 +47,25 @@ BlockOffsetSharedArray::BlockOffsetSharedArray(MemRegion reserved,
|
||||
if (!_vs.initialize(rs, 0)) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
|
||||
}
|
||||
_offset_array = (u_char*)_vs.low_boundary();
|
||||
_offset_array = (uint8_t*)_vs.low_boundary();
|
||||
resize(init_word_size);
|
||||
log_trace(gc, bot)("BlockOffsetSharedArray::BlockOffsetSharedArray: ");
|
||||
log_trace(gc, bot)("SerialBlockOffsetSharedArray::SerialBlockOffsetSharedArray: ");
|
||||
log_trace(gc, bot)(" rs.base(): " PTR_FORMAT " rs.size(): " SIZE_FORMAT_X_0 " rs end(): " PTR_FORMAT,
|
||||
p2i(rs.base()), rs.size(), p2i(rs.base() + rs.size()));
|
||||
log_trace(gc, bot)(" _vs.low_boundary(): " PTR_FORMAT " _vs.high_boundary(): " PTR_FORMAT,
|
||||
p2i(_vs.low_boundary()), p2i(_vs.high_boundary()));
|
||||
}
|
||||
|
||||
void BlockOffsetSharedArray::resize(size_t new_word_size) {
|
||||
void SerialBlockOffsetSharedArray::resize(size_t new_word_size) {
|
||||
assert(new_word_size <= _reserved.word_size(), "Resize larger than reserved");
|
||||
size_t new_size = compute_size(new_word_size);
|
||||
size_t old_size = _vs.committed_size();
|
||||
size_t delta;
|
||||
char* high = _vs.high();
|
||||
_end = _reserved.start() + new_word_size;
|
||||
if (new_size > old_size) {
|
||||
delta = ReservedSpace::page_align_size_up(new_size - old_size);
|
||||
assert(delta > 0, "just checking");
|
||||
if (!_vs.expand_by(delta)) {
|
||||
// Do better than this for Merlin
|
||||
vm_exit_out_of_memory(delta, OOM_MMAP_ERROR, "offset table expansion");
|
||||
}
|
||||
assert(_vs.high() == high + delta, "invalid expansion");
|
||||
@@ -85,384 +77,109 @@ void BlockOffsetSharedArray::resize(size_t new_word_size) {
|
||||
}
|
||||
}
|
||||
|
||||
bool BlockOffsetSharedArray::is_card_boundary(HeapWord* p) const {
|
||||
assert(p >= _reserved.start(), "just checking");
|
||||
size_t delta = pointer_delta(p, _reserved.start());
|
||||
return (delta & right_n_bits((int)BOTConstants::log_card_size_in_words())) == (size_t)NoBits;
|
||||
}
|
||||
// Write the backskip value for each logarithmic region (array slots containing the same entry value).
|
||||
//
|
||||
// offset
|
||||
// card 2nd 3rd
|
||||
// | +- 1st | |
|
||||
// v v v v
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
|
||||
// |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
|
||||
// 11 19 75
|
||||
// 12
|
||||
//
|
||||
// offset card is the card that points to the start of an object
|
||||
// x - offset value of offset card
|
||||
// 1st - start of first logarithmic region
|
||||
// 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
|
||||
// 2nd - start of second logarithmic region
|
||||
// 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
|
||||
// 3rd - start of third logarithmic region
|
||||
// 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
|
||||
//
|
||||
// integer below the block offset entry is an example of
|
||||
// the index of the entry
|
||||
//
|
||||
// Given an address,
|
||||
// Find the index for the address
|
||||
// Find the block offset table entry
|
||||
// Convert the entry to a back slide
|
||||
// (e.g., with today's, offset = 0x81 =>
|
||||
// back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
|
||||
// Move back N (e.g., 8) entries and repeat with the
|
||||
// value of the new entry
|
||||
//
|
||||
void SerialBlockOffsetTable::update_for_block_work(HeapWord* blk_start,
|
||||
HeapWord* blk_end) {
|
||||
HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start);
|
||||
size_t const offset_card = _array->index_for(cur_card_boundary);
|
||||
|
||||
// The first card holds the actual offset.
|
||||
_array->set_offset_array(offset_card, cur_card_boundary, blk_start);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetArray
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// Check if this block spans over other cards.
|
||||
size_t end_card = _array->index_for(blk_end - 1);
|
||||
assert(offset_card <= end_card, "inv");
|
||||
|
||||
BlockOffsetArray::BlockOffsetArray(BlockOffsetSharedArray* array,
|
||||
MemRegion mr, bool init_to_zero_) :
|
||||
BlockOffsetTable(mr.start(), mr.end()),
|
||||
_array(array)
|
||||
{
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
set_init_to_zero(init_to_zero_);
|
||||
if (!init_to_zero_) {
|
||||
// initialize cards to point back to mr.start()
|
||||
set_remainder_to_point_to_start(mr.start() + BOTConstants::card_size_in_words(), mr.end());
|
||||
_array->set_offset_array(0, 0); // set first card to 0
|
||||
}
|
||||
}
|
||||
if (offset_card != end_card) {
|
||||
// Handling remaining cards.
|
||||
size_t start_card_for_region = offset_card + 1;
|
||||
for (uint i = 0; i < BOTConstants::N_powers; i++) {
|
||||
// -1 so that the reach ends in this region and not at the start
|
||||
// of the next.
|
||||
size_t reach = offset_card + BOTConstants::power_to_cards_back(i + 1) - 1;
|
||||
uint8_t value = checked_cast<uint8_t>(BOTConstants::card_size_in_words() + i);
|
||||
|
||||
|
||||
// The arguments follow the normal convention of denoting
|
||||
// a right-open interval: [start, end)
|
||||
void
|
||||
BlockOffsetArray::
|
||||
set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing) {
|
||||
|
||||
check_reducing_assertion(reducing);
|
||||
if (start >= end) {
|
||||
// The start address is equal to the end address (or to
|
||||
// the right of the end address) so there are not cards
|
||||
// that need to be updated..
|
||||
return;
|
||||
}
|
||||
|
||||
// Write the backskip value for each region.
|
||||
//
|
||||
// offset
|
||||
// card 2nd 3rd
|
||||
// | +- 1st | |
|
||||
// v v v v
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
|
||||
// |x|0|0|0|0|0|0|0|1|1|1|1|1|1| ... |1|1|1|1|2|2|2|2|2|2| ...
|
||||
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+-+-+-
|
||||
// 11 19 75
|
||||
// 12
|
||||
//
|
||||
// offset card is the card that points to the start of an object
|
||||
// x - offset value of offset card
|
||||
// 1st - start of first logarithmic region
|
||||
// 0 corresponds to logarithmic value N_words + 0 and 2**(3 * 0) = 1
|
||||
// 2nd - start of second logarithmic region
|
||||
// 1 corresponds to logarithmic value N_words + 1 and 2**(3 * 1) = 8
|
||||
// 3rd - start of third logarithmic region
|
||||
// 2 corresponds to logarithmic value N_words + 2 and 2**(3 * 2) = 64
|
||||
//
|
||||
// integer below the block offset entry is an example of
|
||||
// the index of the entry
|
||||
//
|
||||
// Given an address,
|
||||
// Find the index for the address
|
||||
// Find the block offset table entry
|
||||
// Convert the entry to a back slide
|
||||
// (e.g., with today's, offset = 0x81 =>
|
||||
// back slip = 2**(3*(0x81 - N_words)) = 2**3) = 8
|
||||
// Move back N (e.g., 8) entries and repeat with the
|
||||
// value of the new entry
|
||||
//
|
||||
size_t start_card = _array->index_for(start);
|
||||
size_t end_card = _array->index_for(end-1);
|
||||
assert(start ==_array->address_for_index(start_card), "Precondition");
|
||||
assert(end ==_array->address_for_index(end_card)+BOTConstants::card_size_in_words(), "Precondition");
|
||||
set_remainder_to_point_to_start_incl(start_card, end_card, reducing); // closed interval
|
||||
}
|
||||
|
||||
|
||||
// Unlike the normal convention in this code, the argument here denotes
|
||||
// a closed, inclusive interval: [start_card, end_card], cf set_remainder_to_point_to_start()
|
||||
// above.
|
||||
void
|
||||
BlockOffsetArray::set_remainder_to_point_to_start_incl(size_t start_card, size_t end_card, bool reducing) {
|
||||
|
||||
check_reducing_assertion(reducing);
|
||||
if (start_card > end_card) {
|
||||
return;
|
||||
}
|
||||
assert(start_card > _array->index_for(_bottom), "Cannot be first card");
|
||||
assert(_array->offset_array(start_card-1) <= BOTConstants::card_size_in_words(),
|
||||
"Offset card has an unexpected value");
|
||||
size_t start_card_for_region = start_card;
|
||||
u_char offset = max_jubyte;
|
||||
for (uint i = 0; i < BOTConstants::N_powers; i++) {
|
||||
// -1 so that the card with the actual offset is counted. Another -1
|
||||
// so that the reach ends in this region and not at the start
|
||||
// of the next.
|
||||
size_t reach = start_card - 1 + (BOTConstants::power_to_cards_back(i+1) - 1);
|
||||
offset = BOTConstants::card_size_in_words() + i;
|
||||
if (reach >= end_card) {
|
||||
_array->set_offset_array(start_card_for_region, end_card, offset, reducing);
|
||||
_array->set_offset_array(start_card_for_region, MIN2(reach, end_card), value);
|
||||
start_card_for_region = reach + 1;
|
||||
|
||||
if (reach >= end_card) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
assert(start_card_for_region > end_card, "Sanity check");
|
||||
}
|
||||
|
||||
debug_only(verify_for_block(blk_start, blk_end);)
|
||||
}
|
||||
|
||||
HeapWord* SerialBlockOffsetTable::block_start_reaching_into_card(const void* addr) const {
|
||||
size_t index = _array->index_for(addr);
|
||||
|
||||
uint8_t offset;
|
||||
while (true) {
|
||||
offset = _array->offset_array(index);
|
||||
|
||||
if (offset < BOTConstants::card_size_in_words()) {
|
||||
break;
|
||||
}
|
||||
_array->set_offset_array(start_card_for_region, reach, offset, reducing);
|
||||
start_card_for_region = reach + 1;
|
||||
}
|
||||
assert(start_card_for_region > end_card, "Sanity check");
|
||||
DEBUG_ONLY(check_all_cards(start_card, end_card);)
|
||||
}
|
||||
|
||||
// The card-interval [start_card, end_card] is a closed interval; this
|
||||
// is an expensive check -- use with care and only under protection of
|
||||
// suitable flag.
|
||||
void BlockOffsetArray::check_all_cards(size_t start_card, size_t end_card) const {
|
||||
|
||||
if (end_card < start_card) {
|
||||
return;
|
||||
}
|
||||
guarantee(_array->offset_array(start_card) == BOTConstants::card_size_in_words(), "Wrong value in second card");
|
||||
u_char last_entry = BOTConstants::card_size_in_words();
|
||||
for (size_t c = start_card + 1; c <= end_card; c++ /* yeah! */) {
|
||||
u_char entry = _array->offset_array(c);
|
||||
guarantee(entry >= last_entry, "Monotonicity");
|
||||
if (c - start_card > BOTConstants::power_to_cards_back(1)) {
|
||||
guarantee(entry > BOTConstants::card_size_in_words(), "Should be in logarithmic region");
|
||||
}
|
||||
size_t backskip = BOTConstants::entry_to_cards_back(entry);
|
||||
size_t landing_card = c - backskip;
|
||||
guarantee(landing_card >= (start_card - 1), "Inv");
|
||||
if (landing_card >= start_card) {
|
||||
guarantee(_array->offset_array(landing_card) <= entry, "Monotonicity");
|
||||
} else {
|
||||
guarantee(landing_card == (start_card - 1), "Tautology");
|
||||
// Note that N_words is the maximum offset value
|
||||
guarantee(_array->offset_array(landing_card) <= BOTConstants::card_size_in_words(), "Offset value");
|
||||
}
|
||||
last_entry = entry; // remember for monotonicity test
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BlockOffsetArray::alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
assert(blk_start != nullptr && blk_end > blk_start,
|
||||
"phantom block");
|
||||
single_block(blk_start, blk_end);
|
||||
}
|
||||
|
||||
void
|
||||
BlockOffsetArray::do_block_internal(HeapWord* blk_start,
|
||||
HeapWord* blk_end,
|
||||
bool reducing) {
|
||||
assert(_sp->is_in_reserved(blk_start),
|
||||
"reference must be into the space");
|
||||
assert(_sp->is_in_reserved(blk_end-1),
|
||||
"limit must be within the space");
|
||||
// This is optimized to make the test fast, assuming we only rarely
|
||||
// cross boundaries.
|
||||
uintptr_t end_ui = (uintptr_t)(blk_end - 1);
|
||||
uintptr_t start_ui = (uintptr_t)blk_start;
|
||||
// Calculate the last card boundary preceding end of blk
|
||||
intptr_t boundary_before_end = (intptr_t)end_ui;
|
||||
clear_bits(boundary_before_end, right_n_bits((int)BOTConstants::log_card_size()));
|
||||
if (start_ui <= (uintptr_t)boundary_before_end) {
|
||||
// blk starts at or crosses a boundary
|
||||
// Calculate index of card on which blk begins
|
||||
size_t start_index = _array->index_for(blk_start);
|
||||
// Index of card on which blk ends
|
||||
size_t end_index = _array->index_for(blk_end - 1);
|
||||
// Start address of card on which blk begins
|
||||
HeapWord* boundary = _array->address_for_index(start_index);
|
||||
assert(boundary <= blk_start, "blk should start at or after boundary");
|
||||
if (blk_start != boundary) {
|
||||
// blk starts strictly after boundary
|
||||
// adjust card boundary and start_index forward to next card
|
||||
boundary += BOTConstants::card_size_in_words();
|
||||
start_index++;
|
||||
}
|
||||
assert(start_index <= end_index, "monotonicity of index_for()");
|
||||
assert(boundary <= (HeapWord*)boundary_before_end, "tautology");
|
||||
_array->set_offset_array(start_index, boundary, blk_start, reducing);
|
||||
// We have finished marking the "offset card". We need to now
|
||||
// mark the subsequent cards that this blk spans.
|
||||
if (start_index < end_index) {
|
||||
HeapWord* rem_st = _array->address_for_index(start_index) + BOTConstants::card_size_in_words();
|
||||
HeapWord* rem_end = _array->address_for_index(end_index) + BOTConstants::card_size_in_words();
|
||||
set_remainder_to_point_to_start(rem_st, rem_end, reducing);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The range [blk_start, blk_end) represents a single contiguous block
|
||||
// of storage; modify the block offset table to represent this
|
||||
// information; Right-open interval: [blk_start, blk_end)
|
||||
// NOTE: this method does _not_ adjust _unallocated_block.
|
||||
void
|
||||
BlockOffsetArray::single_block(HeapWord* blk_start,
|
||||
HeapWord* blk_end) {
|
||||
do_block_internal(blk_start, blk_end);
|
||||
}
|
||||
|
||||
void BlockOffsetArray::verify() const {
|
||||
// For each entry in the block offset table, verify that
|
||||
// the entry correctly finds the start of an object at the
|
||||
// first address covered by the block or to the left of that
|
||||
// first address.
|
||||
|
||||
size_t next_index = 1;
|
||||
size_t last_index = last_active_index();
|
||||
|
||||
// Use for debugging. Initialize to null to distinguish the
|
||||
// first iteration through the while loop.
|
||||
HeapWord* last_p = nullptr;
|
||||
HeapWord* last_start = nullptr;
|
||||
oop last_o = nullptr;
|
||||
|
||||
while (next_index <= last_index) {
|
||||
// Use an address past the start of the address for
|
||||
// the entry.
|
||||
HeapWord* p = _array->address_for_index(next_index) + 1;
|
||||
if (p >= _end) {
|
||||
// That's all of the allocated block table.
|
||||
return;
|
||||
}
|
||||
// block_start() asserts that start <= p.
|
||||
HeapWord* start = block_start(p);
|
||||
// First check if the start is an allocated block and only
|
||||
// then if it is a valid object.
|
||||
oop o = cast_to_oop(start);
|
||||
assert(!Universe::is_fully_initialized() ||
|
||||
_sp->is_free_block(start) ||
|
||||
oopDesc::is_oop_or_null(o), "Bad object was found");
|
||||
next_index++;
|
||||
last_p = p;
|
||||
last_start = start;
|
||||
last_o = o;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetArrayContigSpace
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
||||
HeapWord* BlockOffsetArrayContigSpace::block_start_unsafe(const void* addr) const {
|
||||
assert(_array->offset_array(0) == 0, "objects can't cross covered areas");
|
||||
|
||||
// Otherwise, find the block start using the table.
|
||||
assert(_bottom <= addr && addr < _end,
|
||||
"addr must be covered by this Array");
|
||||
size_t index = _array->index_for(addr);
|
||||
// We must make sure that the offset table entry we use is valid. If
|
||||
// "addr" is past the end, start at the last known one and go forward.
|
||||
index = MIN2(index, _next_offset_index-1);
|
||||
HeapWord* q = _array->address_for_index(index);
|
||||
|
||||
uint offset = _array->offset_array(index); // Extend u_char to uint.
|
||||
while (offset > BOTConstants::card_size_in_words()) {
|
||||
// The excess of the offset from N_words indicates a power of Base
|
||||
// to go back by.
|
||||
size_t n_cards_back = BOTConstants::entry_to_cards_back(offset);
|
||||
q -= (BOTConstants::card_size_in_words() * n_cards_back);
|
||||
assert(q >= _sp->bottom(), "Went below bottom!");
|
||||
index -= n_cards_back;
|
||||
offset = _array->offset_array(index);
|
||||
}
|
||||
while (offset == BOTConstants::card_size_in_words()) {
|
||||
assert(q >= _sp->bottom(), "Went below bottom!");
|
||||
q -= BOTConstants::card_size_in_words();
|
||||
index--;
|
||||
offset = _array->offset_array(index);
|
||||
}
|
||||
assert(offset < BOTConstants::card_size_in_words(), "offset too large");
|
||||
q -= offset;
|
||||
HeapWord* n = q;
|
||||
|
||||
while (n <= addr) {
|
||||
debug_only(HeapWord* last = q); // for debugging
|
||||
q = n;
|
||||
n += _sp->block_size(n);
|
||||
}
|
||||
assert(q <= addr, "wrong order for current and arg");
|
||||
assert(addr <= n, "wrong order for arg and next");
|
||||
return q;
|
||||
}
|
||||
|
||||
//
|
||||
// _next_offset_threshold
|
||||
// | _next_offset_index
|
||||
// v v
|
||||
// +-------+-------+-------+-------+-------+
|
||||
// | i-1 | i | i+1 | i+2 | i+3 |
|
||||
// +-------+-------+-------+-------+-------+
|
||||
// ( ^ ]
|
||||
// block-start
|
||||
//
|
||||
|
||||
void BlockOffsetArrayContigSpace::alloc_block_work(HeapWord* blk_start,
|
||||
HeapWord* blk_end) {
|
||||
assert(blk_start != nullptr && blk_end > blk_start,
|
||||
"phantom block");
|
||||
assert(blk_end > _next_offset_threshold,
|
||||
"should be past threshold");
|
||||
assert(blk_start <= _next_offset_threshold,
|
||||
"blk_start should be at or before threshold");
|
||||
assert(pointer_delta(_next_offset_threshold, blk_start) <= BOTConstants::card_size_in_words(),
|
||||
"offset should be <= BlockOffsetSharedArray::N");
|
||||
assert(_sp->is_in_reserved(blk_start),
|
||||
"reference must be into the space");
|
||||
assert(_sp->is_in_reserved(blk_end-1),
|
||||
"limit must be within the space");
|
||||
assert(_next_offset_threshold ==
|
||||
_array->_reserved.start() + _next_offset_index*BOTConstants::card_size_in_words(),
|
||||
"index must agree with threshold");
|
||||
|
||||
debug_only(size_t orig_next_offset_index = _next_offset_index;)
|
||||
|
||||
// Mark the card that holds the offset into the block. Note
|
||||
// that _next_offset_index and _next_offset_threshold are not
|
||||
// updated until the end of this method.
|
||||
_array->set_offset_array(_next_offset_index,
|
||||
_next_offset_threshold,
|
||||
blk_start);
|
||||
|
||||
// We need to now mark the subsequent cards that this blk spans.
|
||||
|
||||
// Index of card on which blk ends.
|
||||
size_t end_index = _array->index_for(blk_end - 1);
|
||||
|
||||
// Are there more cards left to be updated?
|
||||
if (_next_offset_index + 1 <= end_index) {
|
||||
HeapWord* rem_st = _array->address_for_index(_next_offset_index + 1);
|
||||
// Calculate rem_end this way because end_index
|
||||
// may be the last valid index in the covered region.
|
||||
HeapWord* rem_end = _array->address_for_index(end_index) + BOTConstants::card_size_in_words();
|
||||
set_remainder_to_point_to_start(rem_st, rem_end);
|
||||
}
|
||||
|
||||
// _next_offset_index and _next_offset_threshold updated here.
|
||||
_next_offset_index = end_index + 1;
|
||||
// Calculate _next_offset_threshold this way because end_index
|
||||
// may be the last valid index in the covered region.
|
||||
_next_offset_threshold = _array->address_for_index(end_index) + BOTConstants::card_size_in_words();
|
||||
assert(_next_offset_threshold >= blk_end, "Incorrect offset threshold");
|
||||
HeapWord* q = _array->address_for_index(index);
|
||||
return q - offset;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// The offset can be 0 if the block starts on a boundary. That
|
||||
// is checked by an assertion above.
|
||||
size_t start_index = _array->index_for(blk_start);
|
||||
HeapWord* boundary = _array->address_for_index(start_index);
|
||||
assert((_array->offset_array(orig_next_offset_index) == 0 &&
|
||||
blk_start == boundary) ||
|
||||
(_array->offset_array(orig_next_offset_index) > 0 &&
|
||||
_array->offset_array(orig_next_offset_index) <= BOTConstants::card_size_in_words()),
|
||||
"offset array should have been set");
|
||||
for (size_t j = orig_next_offset_index + 1; j <= end_index; j++) {
|
||||
assert(_array->offset_array(j) > 0 &&
|
||||
_array->offset_array(j) <= (u_char) (BOTConstants::card_size_in_words()+BOTConstants::N_powers-1),
|
||||
"offset array should have been set");
|
||||
void SerialBlockOffsetTable::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const {
|
||||
assert(is_crossing_card_boundary(blk_start, blk_end), "precondition");
|
||||
|
||||
const size_t start_card = _array->index_for(align_up_by_card_size(blk_start));
|
||||
const size_t end_card = _array->index_for(blk_end - 1);
|
||||
// Check cards in [start_card, end_card]
|
||||
assert(_array->offset_array(start_card) < BOTConstants::card_size_in_words(), "offset card");
|
||||
|
||||
for (size_t i = start_card + 1; i <= end_card; ++i) {
|
||||
const uint8_t prev = _array->offset_array(i-1);
|
||||
const uint8_t value = _array->offset_array(i);
|
||||
if (prev != value) {
|
||||
assert(value >= prev, "monotonic");
|
||||
size_t n_cards_back = BOTConstants::entry_to_cards_back(value);
|
||||
assert(start_card == (i - n_cards_back), "inv");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void BlockOffsetArrayContigSpace::initialize_threshold() {
|
||||
_next_offset_index = _array->index_for(_bottom);
|
||||
_next_offset_index++;
|
||||
_next_offset_threshold =
|
||||
_array->address_for_index(_next_offset_index);
|
||||
}
|
||||
|
||||
void BlockOffsetArrayContigSpace::zero_bottom_entry() {
|
||||
size_t bottom_index = _array->index_for(_bottom);
|
||||
_array->set_offset_array(bottom_index, 0);
|
||||
}
|
||||
|
||||
size_t BlockOffsetArrayContigSpace::last_active_index() const {
|
||||
return _next_offset_index == 0 ? 0 : _next_offset_index - 1;
|
||||
}
|
||||
|
||||
@@ -36,168 +36,34 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// The CollectedHeap type requires subtypes to implement a method
|
||||
// "block_start". For some subtypes, notably generational
|
||||
// systems using card-table-based write barriers, the efficiency of this
|
||||
// operation may be important. Implementations of the "BlockOffsetArray"
|
||||
// class may be useful in providing such efficient implementations.
|
||||
//
|
||||
// BlockOffsetTable (abstract)
|
||||
// - BlockOffsetArray (abstract)
|
||||
// - BlockOffsetArrayContigSpace
|
||||
//
|
||||
|
||||
class ContiguousSpace;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// The BlockOffsetTable "interface"
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
class BlockOffsetTable {
|
||||
class SerialBlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
// These members describe the region covered by the table.
|
||||
friend class SerialBlockOffsetTable;
|
||||
|
||||
// The space this table is covering.
|
||||
HeapWord* _bottom; // == reserved.start
|
||||
HeapWord* _end; // End of currently allocated region.
|
||||
|
||||
public:
|
||||
// Initialize the table to cover the given space.
|
||||
// The contents of the initial table are undefined.
|
||||
BlockOffsetTable(HeapWord* bottom, HeapWord* end):
|
||||
_bottom(bottom), _end(end) {
|
||||
assert(_bottom <= _end, "arguments out of order");
|
||||
assert(BOTConstants::card_size() == CardTable::card_size(), "sanity");
|
||||
}
|
||||
|
||||
// Note that the committed size of the covered space may have changed,
|
||||
// so the table size might also wish to change.
|
||||
virtual void resize(size_t new_word_size) = 0;
|
||||
|
||||
virtual void set_bottom(HeapWord* new_bottom) {
|
||||
assert(new_bottom <= _end, "new_bottom > _end");
|
||||
_bottom = new_bottom;
|
||||
resize(pointer_delta(_end, _bottom));
|
||||
}
|
||||
|
||||
// Requires "addr" to be contained by a block, and returns the address of
|
||||
// the start of that block.
|
||||
virtual HeapWord* block_start_unsafe(const void* addr) const = 0;
|
||||
|
||||
// Returns the address of the start of the block containing "addr", or
|
||||
// else "null" if it is covered by no block.
|
||||
HeapWord* block_start(const void* addr) const;
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// One implementation of "BlockOffsetTable," the BlockOffsetArray,
|
||||
// divides the covered region into "N"-word subregions (where
|
||||
// "N" = 2^"LogN". An array with an entry for each such subregion
|
||||
// indicates how far back one must go to find the start of the
|
||||
// chunk that includes the first word of the subregion.
|
||||
//
|
||||
// Each BlockOffsetArray is owned by a Space. However, the actual array
|
||||
// may be shared by several BlockOffsetArrays; this is useful
|
||||
// when a single resizable area (such as a generation) is divided up into
|
||||
// several spaces in which contiguous allocation takes place. (Consider,
|
||||
// for example, the garbage-first generation.)
|
||||
|
||||
// Here is the shared array type.
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetSharedArray
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
class BlockOffsetSharedArray: public CHeapObj<mtGC> {
|
||||
friend class BlockOffsetArray;
|
||||
friend class BlockOffsetArrayNonContigSpace;
|
||||
friend class BlockOffsetArrayContigSpace;
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
bool _init_to_zero;
|
||||
|
||||
// The reserved region covered by the shared array.
|
||||
// The reserved heap (i.e. old-gen) covered by the shared array.
|
||||
MemRegion _reserved;
|
||||
|
||||
// End of the current committed region.
|
||||
HeapWord* _end;
|
||||
|
||||
// Array for keeping offsets for retrieving object start fast given an
|
||||
// address.
|
||||
VirtualSpace _vs;
|
||||
u_char* _offset_array; // byte array keeping backwards offsets
|
||||
uint8_t* _offset_array; // byte array keeping backwards offsets
|
||||
|
||||
void fill_range(size_t start, size_t num_cards, u_char offset) {
|
||||
void fill_range(size_t start, size_t num_cards, uint8_t offset) {
|
||||
void* start_ptr = &_offset_array[start];
|
||||
// If collector is concurrent, special handling may be needed.
|
||||
G1GC_ONLY(assert(!UseG1GC, "Shouldn't be here when using G1");)
|
||||
memset(start_ptr, offset, num_cards);
|
||||
}
|
||||
|
||||
protected:
|
||||
// Bounds checking accessors:
|
||||
// For performance these have to devolve to array accesses in product builds.
|
||||
u_char offset_array(size_t index) const {
|
||||
uint8_t offset_array(size_t index) const {
|
||||
assert(index < _vs.committed_size(), "index out of range");
|
||||
return _offset_array[index];
|
||||
}
|
||||
// An assertion-checking helper method for the set_offset_array() methods below.
|
||||
void check_reducing_assertion(bool reducing);
|
||||
|
||||
void set_offset_array(size_t index, u_char offset, bool reducing = false) {
|
||||
check_reducing_assertion(reducing);
|
||||
assert(index < _vs.committed_size(), "index out of range");
|
||||
assert(!reducing || _offset_array[index] >= offset, "Not reducing");
|
||||
_offset_array[index] = offset;
|
||||
}
|
||||
|
||||
void set_offset_array(size_t index, HeapWord* high, HeapWord* low, bool reducing = false) {
|
||||
check_reducing_assertion(reducing);
|
||||
assert(index < _vs.committed_size(), "index out of range");
|
||||
assert(high >= low, "addresses out of order");
|
||||
assert(pointer_delta(high, low) <= BOTConstants::card_size_in_words(), "offset too large");
|
||||
assert(!reducing || _offset_array[index] >= (u_char)pointer_delta(high, low),
|
||||
"Not reducing");
|
||||
_offset_array[index] = (u_char)pointer_delta(high, low);
|
||||
}
|
||||
|
||||
void set_offset_array(HeapWord* left, HeapWord* right, u_char offset, bool reducing = false) {
|
||||
check_reducing_assertion(reducing);
|
||||
assert(index_for(right - 1) < _vs.committed_size(),
|
||||
"right address out of range");
|
||||
assert(left < right, "Heap addresses out of order");
|
||||
size_t num_cards = pointer_delta(right, left) >> BOTConstants::log_card_size_in_words();
|
||||
|
||||
fill_range(index_for(left), num_cards, offset);
|
||||
}
|
||||
|
||||
void set_offset_array(size_t left, size_t right, u_char offset, bool reducing = false) {
|
||||
check_reducing_assertion(reducing);
|
||||
assert(right < _vs.committed_size(), "right address out of range");
|
||||
assert(left <= right, "indexes out of order");
|
||||
size_t num_cards = right - left + 1;
|
||||
|
||||
fill_range(left, num_cards, offset);
|
||||
}
|
||||
|
||||
void check_offset_array(size_t index, HeapWord* high, HeapWord* low) const {
|
||||
assert(index < _vs.committed_size(), "index out of range");
|
||||
assert(high >= low, "addresses out of order");
|
||||
assert(pointer_delta(high, low) <= BOTConstants::card_size_in_words(), "offset too large");
|
||||
assert(_offset_array[index] == pointer_delta(high, low),
|
||||
"Wrong offset");
|
||||
}
|
||||
|
||||
bool is_card_boundary(HeapWord* p) const;
|
||||
|
||||
// Return the number of slots needed for an offset array
|
||||
// that covers mem_region_words words.
|
||||
// We always add an extra slot because if an object
|
||||
// ends on a card boundary we put a 0 in the next
|
||||
// offset array slot, so we want that slot always
|
||||
// to be reserved.
|
||||
static size_t compute_size(size_t mem_region_words) {
|
||||
assert(mem_region_words % BOTConstants::card_size_in_words() == 0, "precondition");
|
||||
|
||||
size_t compute_size(size_t mem_region_words) {
|
||||
size_t number_of_slots = (mem_region_words / BOTConstants::card_size_in_words()) + 1;
|
||||
size_t number_of_slots = mem_region_words / BOTConstants::card_size_in_words();
|
||||
return ReservedSpace::allocation_align_size_up(number_of_slots);
|
||||
}
|
||||
|
||||
@@ -207,198 +73,79 @@ public:
|
||||
// (see "resize" below) up to the size of "_reserved" (which must be at
|
||||
// least "init_word_size".) The contents of the initial table are
|
||||
// undefined; it is the responsibility of the constituent
|
||||
// BlockOffsetTable(s) to initialize cards.
|
||||
BlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
|
||||
// SerialBlockOffsetTable(s) to initialize cards.
|
||||
SerialBlockOffsetSharedArray(MemRegion reserved, size_t init_word_size);
|
||||
|
||||
// Notes a change in the committed size of the region covered by the
|
||||
// table. The "new_word_size" may not be larger than the size of the
|
||||
// reserved region this table covers.
|
||||
void resize(size_t new_word_size);
|
||||
|
||||
void set_bottom(HeapWord* new_bottom);
|
||||
|
||||
// Whether entries should be initialized to zero. Used currently only for
|
||||
// error checking.
|
||||
void set_init_to_zero(bool val) { _init_to_zero = val; }
|
||||
bool init_to_zero() { return _init_to_zero; }
|
||||
|
||||
// Updates all the BlockOffsetArray's sharing this shared array to
|
||||
// reflect the current "top"'s of their spaces.
|
||||
void update_offset_arrays(); // Not yet implemented!
|
||||
|
||||
// Return the appropriate index into "_offset_array" for "p".
|
||||
size_t index_for(const void* p) const;
|
||||
|
||||
// Return the address indicating the start of the region corresponding to
|
||||
// "index" in "_offset_array".
|
||||
HeapWord* address_for_index(size_t index) const;
|
||||
|
||||
void set_offset_array(size_t index, HeapWord* high, HeapWord* low) {
|
||||
assert(index < _vs.committed_size(), "index out of range");
|
||||
assert(high >= low, "addresses out of order");
|
||||
assert(pointer_delta(high, low) < BOTConstants::card_size_in_words(), "offset too large");
|
||||
_offset_array[index] = checked_cast<uint8_t>(pointer_delta(high, low));
|
||||
}
|
||||
|
||||
void set_offset_array(size_t left, size_t right, uint8_t offset) {
|
||||
assert(right < _vs.committed_size(), "right address out of range");
|
||||
assert(left <= right, "precondition");
|
||||
size_t num_cards = right - left + 1;
|
||||
|
||||
fill_range(left, num_cards, offset);
|
||||
}
|
||||
};
|
||||
|
||||
class Space;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// The BlockOffsetArray whose subtypes use the BlockOffsetSharedArray.
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
class BlockOffsetArray: public BlockOffsetTable {
|
||||
// SerialBlockOffsetTable divides the covered region into "N"-word subregions (where
|
||||
// "N" = 2^"LogN". An array with an entry for each such subregion indicates
|
||||
// how far back one must go to find the start of the chunk that includes the
|
||||
// first word of the subregion.
|
||||
class SerialBlockOffsetTable {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
// The shared array, which is shared with other BlockOffsetArray's
|
||||
// corresponding to different spaces within a generation or span of
|
||||
// memory.
|
||||
BlockOffsetSharedArray* _array;
|
||||
|
||||
// The space that owns this subregion.
|
||||
Space* _sp;
|
||||
// The array that contains offset values. Its reacts to heap resizing.
|
||||
SerialBlockOffsetSharedArray* _array;
|
||||
|
||||
// If true, array entries are initialized to 0; otherwise, they are
|
||||
// initialized to point backwards to the beginning of the covered region.
|
||||
bool _init_to_zero;
|
||||
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
// An assertion-checking helper method for the set_remainder*() methods below.
|
||||
void check_reducing_assertion(bool reducing) { _array->check_reducing_assertion(reducing); }
|
||||
|
||||
// Sets the entries
|
||||
// corresponding to the cards starting at "start" and ending at "end"
|
||||
// to point back to the card before "start": the interval [start, end)
|
||||
// is right-open. The last parameter, reducing, indicates whether the
|
||||
// updates to individual entries always reduce the entry from a higher
|
||||
// to a lower value. (For example this would hold true during a temporal
|
||||
// regime during which only block splits were updating the BOT.
|
||||
void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end, bool reducing = false);
|
||||
// Same as above, except that the args here are a card _index_ interval
|
||||
// that is closed: [start_index, end_index]
|
||||
void set_remainder_to_point_to_start_incl(size_t start, size_t end, bool reducing = false);
|
||||
|
||||
// A helper function for BOT adjustment/verification work
|
||||
void do_block_internal(HeapWord* blk_start, HeapWord* blk_end, bool reducing = false);
|
||||
|
||||
public:
|
||||
// The space may not have its bottom and top set yet, which is why the
|
||||
// region is passed as a parameter. If "init_to_zero" is true, the
|
||||
// elements of the array are initialized to zero. Otherwise, they are
|
||||
// initialized to point backwards to the beginning.
|
||||
BlockOffsetArray(BlockOffsetSharedArray* array, MemRegion mr,
|
||||
bool init_to_zero_);
|
||||
|
||||
// Note: this ought to be part of the constructor, but that would require
|
||||
// "this" to be passed as a parameter to a member constructor for
|
||||
// the containing concrete subtype of Space.
|
||||
// This would be legal C++, but MS VC++ doesn't allow it.
|
||||
void set_space(Space* sp) { _sp = sp; }
|
||||
|
||||
// Resets the covered region to the given "mr".
|
||||
void set_region(MemRegion mr) {
|
||||
_bottom = mr.start();
|
||||
_end = mr.end();
|
||||
static HeapWord* align_up_by_card_size(HeapWord* const addr) {
|
||||
return align_up(addr, BOTConstants::card_size());
|
||||
}
|
||||
|
||||
// Note that the committed size of the covered space may have changed,
|
||||
// so the table size might also wish to change.
|
||||
virtual void resize(size_t new_word_size) {
|
||||
HeapWord* new_end = _bottom + new_word_size;
|
||||
if (_end < new_end && !init_to_zero()) {
|
||||
// verify that the old and new boundaries are also card boundaries
|
||||
assert(_array->is_card_boundary(_end),
|
||||
"_end not a card boundary");
|
||||
assert(_array->is_card_boundary(new_end),
|
||||
"new _end would not be a card boundary");
|
||||
// set all the newly added cards
|
||||
_array->set_offset_array(_end, new_end, BOTConstants::card_size_in_words());
|
||||
}
|
||||
_end = new_end; // update _end
|
||||
void verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const;
|
||||
|
||||
public:
|
||||
// Initialize the table to cover the given space.
|
||||
// The contents of the initial table are undefined.
|
||||
SerialBlockOffsetTable(SerialBlockOffsetSharedArray* array) : _array(array) {
|
||||
assert(BOTConstants::card_size() == CardTable::card_size(), "sanity");
|
||||
}
|
||||
|
||||
// Adjust the BOT to show that it has a single block in the
|
||||
// range [blk_start, blk_start + size). All necessary BOT
|
||||
// cards are adjusted, but _unallocated_block isn't.
|
||||
void single_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
void single_block(HeapWord* blk, size_t size) {
|
||||
single_block(blk, blk + size);
|
||||
static bool is_crossing_card_boundary(HeapWord* const obj_start,
|
||||
HeapWord* const obj_end) {
|
||||
HeapWord* cur_card_boundary = align_up_by_card_size(obj_start);
|
||||
// Strictly greater-than, since we check if this block *crosses* card boundary.
|
||||
return obj_end > cur_card_boundary;
|
||||
}
|
||||
|
||||
// When the alloc_block() call returns, the block offset table should
|
||||
// have enough information such that any subsequent block_start() call
|
||||
// with an argument equal to an address that is within the range
|
||||
// [blk_start, blk_end) would return the value blk_start, provided
|
||||
// there have been no calls in between that reset this information
|
||||
// (e.g. see BlockOffsetArrayNonContigSpace::single_block() call
|
||||
// for an appropriate range covering the said interval).
|
||||
// These methods expect to be called with [blk_start, blk_end)
|
||||
// representing a block of memory in the heap.
|
||||
virtual void alloc_block(HeapWord* blk_start, HeapWord* blk_end);
|
||||
void alloc_block(HeapWord* blk, size_t size) {
|
||||
alloc_block(blk, blk + size);
|
||||
}
|
||||
// Returns the address of the start of the block reaching into the card containing
|
||||
// "addr".
|
||||
HeapWord* block_start_reaching_into_card(const void* addr) const;
|
||||
|
||||
// If true, initialize array slots with no allocated blocks to zero.
|
||||
// Otherwise, make them point back to the front.
|
||||
bool init_to_zero() { return _init_to_zero; }
|
||||
// Corresponding setter
|
||||
void set_init_to_zero(bool val) {
|
||||
_init_to_zero = val;
|
||||
assert(_array != nullptr, "_array should be non-null");
|
||||
_array->set_init_to_zero(val);
|
||||
}
|
||||
|
||||
// Debugging
|
||||
// Return the index of the last entry in the "active" region.
|
||||
virtual size_t last_active_index() const = 0;
|
||||
// Verify the block offset table
|
||||
void verify() const;
|
||||
void check_all_cards(size_t left_card, size_t right_card) const;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
// A subtype of BlockOffsetArray that takes advantage of the fact
|
||||
// that its underlying space is a ContiguousSpace, so that its "active"
|
||||
// region can be more efficiently tracked (than for a non-contiguous space).
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
class BlockOffsetArrayContigSpace: public BlockOffsetArray {
|
||||
friend class VMStructs;
|
||||
private:
|
||||
// allocation boundary at which offset array must be updated
|
||||
HeapWord* _next_offset_threshold;
|
||||
size_t _next_offset_index; // index corresponding to that boundary
|
||||
|
||||
// Work function when allocation start crosses threshold.
|
||||
void alloc_block_work(HeapWord* blk_start, HeapWord* blk_end);
|
||||
|
||||
public:
|
||||
BlockOffsetArrayContigSpace(BlockOffsetSharedArray* array, MemRegion mr):
|
||||
BlockOffsetArray(array, mr, true) {
|
||||
_next_offset_threshold = nullptr;
|
||||
_next_offset_index = 0;
|
||||
}
|
||||
|
||||
void set_contig_space(ContiguousSpace* sp) { set_space((Space*)sp); }
|
||||
|
||||
// Initialize the threshold for an empty heap.
|
||||
void initialize_threshold();
|
||||
// Zero out the entry for _bottom (offset will be zero)
|
||||
void zero_bottom_entry();
|
||||
|
||||
// Return the next threshold, the point at which the table should be
|
||||
// updated.
|
||||
HeapWord* threshold() const { return _next_offset_threshold; }
|
||||
|
||||
// In general, these methods expect to be called with
|
||||
// [blk_start, blk_end) representing a block of memory in the heap.
|
||||
// In this implementation, however, we are OK even if blk_start and/or
|
||||
// blk_end are null because null is represented as 0, and thus
|
||||
// never exceeds the "_next_offset_threshold".
|
||||
void alloc_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (blk_end > _next_offset_threshold) {
|
||||
alloc_block_work(blk_start, blk_end);
|
||||
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
|
||||
if (is_crossing_card_boundary(blk_start, blk_end)) {
|
||||
update_for_block_work(blk_start, blk_end);
|
||||
}
|
||||
}
|
||||
void alloc_block(HeapWord* blk, size_t size) {
|
||||
alloc_block(blk, blk + size);
|
||||
}
|
||||
|
||||
HeapWord* block_start_unsafe(const void* addr) const;
|
||||
|
||||
// Debugging support
|
||||
virtual size_t last_active_index() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SERIAL_SERIALBLOCKOFFSETTABLE_HPP
|
||||
|
||||
@@ -27,24 +27,7 @@
|
||||
|
||||
#include "gc/serial/serialBlockOffsetTable.hpp"
|
||||
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetTable inlines
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
inline HeapWord* BlockOffsetTable::block_start(const void* addr) const {
|
||||
if (addr >= _bottom && addr < _end) {
|
||||
return block_start_unsafe(addr);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// BlockOffsetSharedArray inlines
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
|
||||
inline size_t SerialBlockOffsetSharedArray::index_for(const void* p) const {
|
||||
char* pc = (char*)p;
|
||||
assert(pc >= (char*)_reserved.start() &&
|
||||
pc < (char*)_reserved.end(),
|
||||
@@ -55,7 +38,7 @@ inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
inline HeapWord* SerialBlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
assert(index < _vs.committed_size(), "bad index");
|
||||
HeapWord* result = _reserved.start() + (index << BOTConstants::log_card_size_in_words());
|
||||
assert(result >= _reserved.start() && result < _reserved.end(),
|
||||
@@ -63,10 +46,4 @@ inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) {
|
||||
assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() ||
|
||||
Thread::current()->is_VM_thread() ||
|
||||
Thread::current()->is_ConcurrentGC_thread(), "Crack");
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_SERIAL_SERIALBLOCKOFFSETTABLE_INLINE_HPP
|
||||
|
||||
@@ -295,8 +295,8 @@ TenuredGeneration::TenuredGeneration(ReservedSpace rs,
|
||||
assert((uintptr_t(start) & 3) == 0, "bad alignment");
|
||||
assert((reserved_byte_size & 3) == 0, "bad alignment");
|
||||
MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
|
||||
_bts = new BlockOffsetSharedArray(reserved_mr,
|
||||
heap_word_size(initial_byte_size));
|
||||
_bts = new SerialBlockOffsetSharedArray(reserved_mr,
|
||||
heap_word_size(initial_byte_size));
|
||||
MemRegion committed_mr(start, heap_word_size(initial_byte_size));
|
||||
_rs->resize_covered_region(committed_mr);
|
||||
|
||||
@@ -474,11 +474,10 @@ void TenuredGeneration::object_iterate(ObjectClosure* blk) {
|
||||
void TenuredGeneration::complete_loaded_archive_space(MemRegion archive_space) {
|
||||
// Create the BOT for the archive space.
|
||||
TenuredSpace* space = _the_space;
|
||||
space->initialize_threshold();
|
||||
HeapWord* start = archive_space.start();
|
||||
while (start < archive_space.end()) {
|
||||
size_t word_size = cast_to_oop(start)->size();;
|
||||
space->alloc_block(start, start + word_size);
|
||||
space->update_for_block(start, start + word_size);
|
||||
start += word_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
#include "gc/shared/generationCounters.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class BlockOffsetSharedArray;
|
||||
class SerialBlockOffsetSharedArray;
|
||||
class CardTableRS;
|
||||
class ContiguousSpace;
|
||||
|
||||
@@ -50,7 +50,7 @@ class TenuredGeneration: public Generation {
|
||||
// This is shared with other generations.
|
||||
CardTableRS* _rs;
|
||||
// This is local to this generation.
|
||||
BlockOffsetSharedArray* _bts;
|
||||
SerialBlockOffsetSharedArray* _bts;
|
||||
|
||||
// Current shrinking effect: this damps shrinking when the heap gets empty.
|
||||
size_t _shrink_factor;
|
||||
|
||||
@@ -29,38 +29,31 @@
|
||||
#include "gc/serial/serialHeap.hpp"
|
||||
#include "gc/serial/tenuredGeneration.hpp"
|
||||
|
||||
#define VM_STRUCTS_SERIALGC(nonstatic_field, \
|
||||
volatile_nonstatic_field, \
|
||||
static_field) \
|
||||
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
|
||||
nonstatic_field(TenuredGeneration, _bts, BlockOffsetSharedArray*) \
|
||||
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _the_space, TenuredSpace*) \
|
||||
\
|
||||
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
|
||||
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
||||
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
|
||||
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
|
||||
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
|
||||
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
|
||||
\
|
||||
nonstatic_field(BlockOffsetTable, _bottom, HeapWord*) \
|
||||
nonstatic_field(BlockOffsetTable, _end, HeapWord*) \
|
||||
\
|
||||
nonstatic_field(BlockOffsetSharedArray, _reserved, MemRegion) \
|
||||
nonstatic_field(BlockOffsetSharedArray, _end, HeapWord*) \
|
||||
nonstatic_field(BlockOffsetSharedArray, _vs, VirtualSpace) \
|
||||
nonstatic_field(BlockOffsetSharedArray, _offset_array, u_char*) \
|
||||
\
|
||||
nonstatic_field(BlockOffsetArray, _array, BlockOffsetSharedArray*) \
|
||||
nonstatic_field(BlockOffsetArray, _sp, Space*) \
|
||||
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_threshold, HeapWord*) \
|
||||
nonstatic_field(BlockOffsetArrayContigSpace, _next_offset_index, size_t) \
|
||||
\
|
||||
nonstatic_field(TenuredSpace, _offsets, BlockOffsetArray)
|
||||
#define VM_STRUCTS_SERIALGC(nonstatic_field, \
|
||||
volatile_nonstatic_field, \
|
||||
static_field) \
|
||||
nonstatic_field(TenuredGeneration, _rs, CardTableRS*) \
|
||||
nonstatic_field(TenuredGeneration, _bts, SerialBlockOffsetSharedArray*) \
|
||||
nonstatic_field(TenuredGeneration, _shrink_factor, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _capacity_at_prologue, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _used_at_prologue, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _min_heap_delta_bytes, size_t) \
|
||||
nonstatic_field(TenuredGeneration, _the_space, TenuredSpace*) \
|
||||
\
|
||||
nonstatic_field(DefNewGeneration, _old_gen, Generation*) \
|
||||
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
|
||||
nonstatic_field(DefNewGeneration, _age_table, AgeTable) \
|
||||
nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
|
||||
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
|
||||
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
|
||||
\
|
||||
nonstatic_field(SerialBlockOffsetTable, _array, SerialBlockOffsetSharedArray*) \
|
||||
\
|
||||
nonstatic_field(SerialBlockOffsetSharedArray, _reserved, MemRegion) \
|
||||
nonstatic_field(SerialBlockOffsetSharedArray, _vs, VirtualSpace) \
|
||||
nonstatic_field(SerialBlockOffsetSharedArray, _offset_array, u_char*) \
|
||||
\
|
||||
nonstatic_field(TenuredSpace, _offsets, SerialBlockOffsetTable)
|
||||
|
||||
#define VM_TYPES_SERIALGC(declare_type, \
|
||||
declare_toplevel_type, \
|
||||
@@ -73,11 +66,8 @@
|
||||
declare_type(CardTableRS, CardTable) \
|
||||
\
|
||||
declare_toplevel_type(TenuredGeneration*) \
|
||||
declare_toplevel_type(BlockOffsetSharedArray) \
|
||||
declare_toplevel_type(BlockOffsetTable) \
|
||||
declare_type(BlockOffsetArray, BlockOffsetTable) \
|
||||
declare_type(BlockOffsetArrayContigSpace, BlockOffsetArray) \
|
||||
declare_toplevel_type(BlockOffsetSharedArray*)
|
||||
declare_toplevel_type(SerialBlockOffsetSharedArray) \
|
||||
declare_toplevel_type(SerialBlockOffsetTable)
|
||||
|
||||
#define VM_INT_CONSTANTS_SERIALGC(declare_constant, \
|
||||
declare_constant_with_value)
|
||||
|
||||
@@ -87,25 +87,6 @@ bool ContiguousSpace::is_free_block(const HeapWord* p) const {
|
||||
return p >= _top;
|
||||
}
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
void TenuredSpace::clear(bool mangle_space) {
|
||||
ContiguousSpace::clear(mangle_space);
|
||||
_offsets.initialize_threshold();
|
||||
}
|
||||
|
||||
void TenuredSpace::set_bottom(HeapWord* new_bottom) {
|
||||
Space::set_bottom(new_bottom);
|
||||
_offsets.set_bottom(new_bottom);
|
||||
}
|
||||
|
||||
void TenuredSpace::set_end(HeapWord* new_end) {
|
||||
// Space should not advertise an increase in size
|
||||
// until after the underlying offset table has been enlarged.
|
||||
_offsets.resize(pointer_delta(new_end, bottom()));
|
||||
Space::set_end(new_end);
|
||||
}
|
||||
#endif // INCLUDE_SERIALGC
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
|
||||
@@ -152,7 +133,6 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size,
|
||||
}
|
||||
compact_top = cp->space->bottom();
|
||||
cp->space->set_compaction_top(compact_top);
|
||||
cp->space->initialize_threshold();
|
||||
compaction_max_size = pointer_delta(cp->space->end(), compact_top);
|
||||
}
|
||||
|
||||
@@ -172,7 +152,7 @@ HeapWord* ContiguousSpace::forward(oop q, size_t size,
|
||||
// We need to update the offset table so that the beginnings of objects can be
|
||||
// found during scavenge. Note that we are updating the offset table based on
|
||||
// where the object will be once the compaction phase finishes.
|
||||
cp->space->alloc_block(compact_top - size, compact_top);
|
||||
cp->space->update_for_block(compact_top - size, compact_top);
|
||||
return compact_top;
|
||||
}
|
||||
|
||||
@@ -190,7 +170,6 @@ void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
|
||||
assert(cp->gen != nullptr, "need a generation");
|
||||
assert(cp->gen->first_compaction_space() == this, "just checking");
|
||||
cp->space = cp->gen->first_compaction_space();
|
||||
cp->space->initialize_threshold();
|
||||
cp->space->set_compaction_top(cp->space->bottom());
|
||||
}
|
||||
|
||||
@@ -384,9 +363,8 @@ void ContiguousSpace::print_on(outputStream* st) const {
|
||||
#if INCLUDE_SERIALGC
|
||||
void TenuredSpace::print_on(outputStream* st) const {
|
||||
print_short_on(st);
|
||||
st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", "
|
||||
PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end()));
|
||||
st->print_cr(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
|
||||
p2i(bottom()), p2i(top()), p2i(end()));
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -510,20 +488,30 @@ HeapWord* ContiguousSpace::par_allocate(size_t size) {
|
||||
}
|
||||
|
||||
#if INCLUDE_SERIALGC
|
||||
void TenuredSpace::initialize_threshold() {
|
||||
_offsets.initialize_threshold();
|
||||
void TenuredSpace::update_for_block(HeapWord* start, HeapWord* end) {
|
||||
_offsets.update_for_block(start, end);
|
||||
}
|
||||
|
||||
void TenuredSpace::alloc_block(HeapWord* start, HeapWord* end) {
|
||||
_offsets.alloc_block(start, end);
|
||||
HeapWord* TenuredSpace::block_start_const(const void* addr) const {
|
||||
HeapWord* cur_block = _offsets.block_start_reaching_into_card(addr);
|
||||
|
||||
while (true) {
|
||||
HeapWord* next_block = cur_block + cast_to_oop(cur_block)->size();
|
||||
if (next_block > addr) {
|
||||
assert(cur_block <= addr, "postcondition");
|
||||
return cur_block;
|
||||
}
|
||||
cur_block = next_block;
|
||||
// Because the BOT is precise, we should never step into the next card
|
||||
// (i.e. crossing the card boundary).
|
||||
assert(!SerialBlockOffsetTable::is_crossing_card_boundary(cur_block, (HeapWord*)addr), "must be");
|
||||
}
|
||||
}
|
||||
|
||||
TenuredSpace::TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
||||
TenuredSpace::TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr) :
|
||||
_offsets(sharedOffsetArray, mr),
|
||||
_par_alloc_lock(Mutex::safepoint, "TenuredSpaceParAlloc_lock", true)
|
||||
_offsets(sharedOffsetArray)
|
||||
{
|
||||
_offsets.set_contig_space(this);
|
||||
initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
|
||||
}
|
||||
|
||||
@@ -536,10 +524,6 @@ void TenuredSpace::verify() const {
|
||||
int objs = 0;
|
||||
int blocks = 0;
|
||||
|
||||
if (VerifyObjectStartArray) {
|
||||
_offsets.verify();
|
||||
}
|
||||
|
||||
while (p < top()) {
|
||||
size_t size = cast_to_oop(p)->size();
|
||||
// For a sampling of objects in the space, find it using the
|
||||
|
||||
@@ -47,11 +47,6 @@
|
||||
// Forward decls.
|
||||
class Space;
|
||||
class ContiguousSpace;
|
||||
#if INCLUDE_SERIALGC
|
||||
class BlockOffsetArray;
|
||||
class BlockOffsetArrayContigSpace;
|
||||
class BlockOffsetTable;
|
||||
#endif
|
||||
class Generation;
|
||||
class ContiguousSpace;
|
||||
class CardTableRS;
|
||||
@@ -241,7 +236,7 @@ private:
|
||||
|
||||
// This the function to invoke when an allocation of an object covering
|
||||
// "start" to "end" occurs to update other internal data structures.
|
||||
virtual void alloc_block(HeapWord* start, HeapWord* the_end) { }
|
||||
virtual void update_for_block(HeapWord* start, HeapWord* the_end) { }
|
||||
|
||||
GenSpaceMangler* mangler() { return _mangler; }
|
||||
|
||||
@@ -308,11 +303,6 @@ private:
|
||||
// live part of a compacted space ("deadwood" support.)
|
||||
virtual size_t allowed_dead_ratio() const { return 0; };
|
||||
|
||||
// Some contiguous spaces may maintain some data structures that should
|
||||
// be updated whenever an allocation crosses a boundary. This function
|
||||
// initializes these data structures for further updates.
|
||||
virtual void initialize_threshold() { }
|
||||
|
||||
// "q" is an object of the given "size" that should be forwarded;
|
||||
// "cp" names the generation ("gen") and containing "this" (which must
|
||||
// also equal "cp->space"). "compact_top" is where in "this" the
|
||||
@@ -322,7 +312,7 @@ private:
|
||||
// be one, since compaction must succeed -- we go to the first space of
|
||||
// the previous generation if necessary, updating "cp"), reset compact_top
|
||||
// and then forward. In either case, returns the new value of "compact_top".
|
||||
// Invokes the "alloc_block" function of the then-current compaction
|
||||
// Invokes the "update_for_block" function of the then-current compaction
|
||||
// space.
|
||||
virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
|
||||
HeapWord* compact_top);
|
||||
@@ -412,36 +402,28 @@ private:
|
||||
#if INCLUDE_SERIALGC
|
||||
|
||||
// Class TenuredSpace is used by TenuredGeneration; it supports an efficient
|
||||
// "block_start" operation via a BlockOffsetArray (whose BlockOffsetSharedArray
|
||||
// may be shared with other spaces.)
|
||||
// "block_start" operation via a SerialBlockOffsetTable.
|
||||
|
||||
class TenuredSpace: public ContiguousSpace {
|
||||
friend class VMStructs;
|
||||
protected:
|
||||
BlockOffsetArrayContigSpace _offsets;
|
||||
Mutex _par_alloc_lock;
|
||||
SerialBlockOffsetTable _offsets;
|
||||
|
||||
// Mark sweep support
|
||||
size_t allowed_dead_ratio() const override;
|
||||
public:
|
||||
// Constructor
|
||||
TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
|
||||
TenuredSpace(SerialBlockOffsetSharedArray* sharedOffsetArray,
|
||||
MemRegion mr);
|
||||
|
||||
void set_bottom(HeapWord* value) override;
|
||||
void set_end(HeapWord* value) override;
|
||||
|
||||
void clear(bool mangle_space) override;
|
||||
|
||||
inline HeapWord* block_start_const(const void* p) const override;
|
||||
HeapWord* block_start_const(const void* addr) const override;
|
||||
|
||||
// Add offset table update.
|
||||
inline HeapWord* allocate(size_t word_size) override;
|
||||
inline HeapWord* par_allocate(size_t word_size) override;
|
||||
|
||||
// MarkSweep support phase3
|
||||
void initialize_threshold() override;
|
||||
void alloc_block(HeapWord* start, HeapWord* end) override;
|
||||
void update_for_block(HeapWord* start, HeapWord* end) override;
|
||||
|
||||
void print_on(outputStream* st) const override;
|
||||
|
||||
|
||||
@@ -47,36 +47,19 @@ inline HeapWord* Space::block_start(const void* p) {
|
||||
inline HeapWord* TenuredSpace::allocate(size_t size) {
|
||||
HeapWord* res = ContiguousSpace::allocate(size);
|
||||
if (res != nullptr) {
|
||||
_offsets.alloc_block(res, size);
|
||||
_offsets.update_for_block(res, res + size);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// Because of the requirement of keeping "_offsets" up to date with the
|
||||
// allocations, we sequentialize these with a lock. Therefore, best if
|
||||
// this is used for larger LAB allocations only.
|
||||
inline HeapWord* TenuredSpace::par_allocate(size_t size) {
|
||||
MutexLocker x(&_par_alloc_lock);
|
||||
// This ought to be just "allocate", because of the lock above, but that
|
||||
// ContiguousSpace::allocate asserts that either the allocating thread
|
||||
// holds the heap lock or it is the VM thread and we're at a safepoint.
|
||||
// The best I (dld) could figure was to put a field in ContiguousSpace
|
||||
// meaning "locking at safepoint taken care of", and set/reset that
|
||||
// here. But this will do for now, especially in light of the comment
|
||||
// above. Perhaps in the future some lock-free manner of keeping the
|
||||
// coordination.
|
||||
HeapWord* res = ContiguousSpace::par_allocate(size);
|
||||
if (res != nullptr) {
|
||||
_offsets.alloc_block(res, size);
|
||||
_offsets.update_for_block(res, res + size);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
inline HeapWord*
|
||||
TenuredSpace::block_start_const(const void* p) const {
|
||||
return _offsets.block_start(p);
|
||||
}
|
||||
|
||||
class DeadSpacer : StackObj {
|
||||
size_t _allowed_deadspace_words;
|
||||
bool _active;
|
||||
|
||||
@@ -91,9 +91,6 @@ class TaskTerminator : public CHeapObj<mtGC> {
|
||||
|
||||
size_t tasks_in_queue_set() const;
|
||||
|
||||
// Perform one iteration of spin-master work.
|
||||
void do_delay_step(DelayContext& delay_context);
|
||||
|
||||
NONCOPYABLE(TaskTerminator);
|
||||
|
||||
public:
|
||||
|
||||
@@ -939,14 +939,14 @@
|
||||
<Field type="ulong" contentType="address" name="topAddress" label="Top Address" description="Ending address of the module, if available" />
|
||||
</Event>
|
||||
|
||||
<Event name="NativeLibraryLoad" category="Java Virtual Machine, Runtime" label="Native Library Load" thread="false" stackTrace="true" startTime="true"
|
||||
<Event name="NativeLibraryLoad" category="Java Virtual Machine, Runtime" label="Native Library Load" thread="true" stackTrace="true" startTime="true"
|
||||
description="Information about a dynamic library or other native image load operation">
|
||||
<Field type="string" name="name" label="Name" />
|
||||
<Field type="boolean" name="success" label="Success" description="Success or failure of the load operation" />
|
||||
<Field type="string" name="errorMessage" label="Error Message" description="In case of a load error, error description" />
|
||||
</Event>
|
||||
|
||||
<Event name="NativeLibraryUnload" category="Java Virtual Machine, Runtime" label="Native Library Unload" thread="false" stackTrace="true" startTime="true"
|
||||
<Event name="NativeLibraryUnload" category="Java Virtual Machine, Runtime" label="Native Library Unload" thread="true" stackTrace="true" startTime="true"
|
||||
description="Information about a dynamic library or other native image unload operation">
|
||||
<Field type="string" name="name" label="Name" />
|
||||
<Field type="boolean" name="success" label="Success" description="Success or failure of the unload operation" />
|
||||
|
||||
@@ -114,14 +114,6 @@ class JfrEvent {
|
||||
return JfrEventSetting::has_stacktrace(T::eventId);
|
||||
}
|
||||
|
||||
static bool is_large() {
|
||||
return JfrEventSetting::is_large(T::eventId);
|
||||
}
|
||||
|
||||
static void set_large() {
|
||||
JfrEventSetting::set_large(T::eventId);
|
||||
}
|
||||
|
||||
static JfrEventId id() {
|
||||
return T::eventId;
|
||||
}
|
||||
@@ -248,6 +240,14 @@ class JfrEvent {
|
||||
return writer.end_event_write(large_size) > 0;
|
||||
}
|
||||
|
||||
static bool is_large() {
|
||||
return JfrEventSetting::is_large(T::eventId);
|
||||
}
|
||||
|
||||
static void set_large() {
|
||||
JfrEventSetting::set_large(T::eventId);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
private:
|
||||
// Verification of fields.
|
||||
|
||||
@@ -112,7 +112,6 @@ void JfrPostBox::asynchronous_post(int msg) {
|
||||
void JfrPostBox::synchronous_post(int msg) {
|
||||
assert(is_synchronous(msg), "invariant");
|
||||
assert(!JfrMsg_lock->owned_by_self(), "should not hold JfrMsg_lock here!");
|
||||
NoHandleMark nhm;
|
||||
ThreadBlockInVM transition(JavaThread::current());
|
||||
MonitorLocker msg_lock(JfrMsg_lock, Mutex::_no_safepoint_check_flag);
|
||||
deposit(msg);
|
||||
|
||||
@@ -234,6 +234,7 @@ bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
assert(!_lineno, "invariant");
|
||||
Thread* current_thread = Thread::current();
|
||||
assert(current_thread->is_JfrSampler_thread(), "invariant");
|
||||
assert(jt != current_thread, "invariant");
|
||||
// Explicitly monitor the available space of the thread-local buffer used for enqueuing klasses as part of tagging methods.
|
||||
// We do this because if space becomes sparse, we cannot rely on the implicit allocation of a new buffer as part of the
|
||||
@@ -286,6 +287,7 @@ bool JfrStackTrace::record_async(JavaThread* jt, const frame& frame) {
|
||||
bool JfrStackTrace::record(JavaThread* jt, const frame& frame, int skip) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
assert(jt == Thread::current(), "invariant");
|
||||
assert(jt->thread_state() != _thread_in_native, "invariant");
|
||||
assert(!_lineno, "invariant");
|
||||
// Must use ResetNoHandleMark here to bypass if any NoHandleMark exist on stack.
|
||||
// This is because RegisterMap uses Handles to support continuations.
|
||||
|
||||
123
src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.cpp
Normal file
123
src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.cpp
Normal file
@@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "jfr/support/jfrNativeLibraryLoadEvent.hpp"
|
||||
#include "jfr/utilities/jfrTime.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
|
||||
JfrNativeLibraryEventBase::JfrNativeLibraryEventBase(const char* name) : _name(name), _error_msg(nullptr), _start_time(nullptr) {}
|
||||
|
||||
JfrNativeLibraryEventBase::~JfrNativeLibraryEventBase() {
|
||||
delete _start_time;
|
||||
}
|
||||
|
||||
const char* JfrNativeLibraryEventBase::name() const {
|
||||
return _name;
|
||||
}
|
||||
|
||||
JfrTicksWrapper* JfrNativeLibraryEventBase::start_time() const {
|
||||
return _start_time;
|
||||
}
|
||||
|
||||
bool JfrNativeLibraryEventBase::has_start_time() const {
|
||||
return _start_time != nullptr;
|
||||
}
|
||||
|
||||
const char* JfrNativeLibraryEventBase::error_msg() const {
|
||||
return _error_msg;
|
||||
}
|
||||
|
||||
void JfrNativeLibraryEventBase::set_error_msg(const char* error_msg) {
|
||||
assert(_error_msg == nullptr, "invariant");
|
||||
_error_msg = error_msg;
|
||||
}
|
||||
|
||||
/*
|
||||
* The JfrTicks value is heap allocated inside an object of type JfrTicksWrapper.
|
||||
* The reason is that a raw value object of type Ticks is not possible at this
|
||||
* location because this code runs as part of early VM bootstrap, at a moment
|
||||
* where Ticks support is not yet initialized.
|
||||
*/
|
||||
template <typename EventType>
|
||||
static inline JfrTicksWrapper* allocate_start_time() {
|
||||
return EventType::is_enabled() ? new JfrTicksWrapper() : nullptr;
|
||||
}
|
||||
|
||||
NativeLibraryLoadEvent::NativeLibraryLoadEvent(const char* name, void** result) : JfrNativeLibraryEventBase(name), _result(result) {
|
||||
assert(_result != nullptr, "invariant");
|
||||
_start_time = allocate_start_time<EventNativeLibraryLoad>();
|
||||
}
|
||||
|
||||
bool NativeLibraryLoadEvent::success() const {
|
||||
return *_result != nullptr;
|
||||
}
|
||||
|
||||
NativeLibraryUnloadEvent::NativeLibraryUnloadEvent(const char* name) : JfrNativeLibraryEventBase(name), _result(false) {
|
||||
_start_time = allocate_start_time<EventNativeLibraryUnload>();
|
||||
}
|
||||
|
||||
bool NativeLibraryUnloadEvent::success() const {
|
||||
return _result;
|
||||
}
|
||||
|
||||
void NativeLibraryUnloadEvent::set_result(bool result) {
|
||||
_result = result;
|
||||
}
|
||||
|
||||
template <typename EventType, typename HelperType>
|
||||
static void commit(HelperType& helper) {
|
||||
if (!helper.has_start_time()) {
|
||||
return;
|
||||
}
|
||||
EventType event(UNTIMED);
|
||||
event.set_endtime(JfrTicks::now());
|
||||
event.set_starttime(*helper.start_time());
|
||||
event.set_name(helper.name());
|
||||
event.set_errorMessage(helper.error_msg());
|
||||
event.set_success(helper.success());
|
||||
Thread* thread = Thread::current();
|
||||
assert(thread != nullptr, "invariant");
|
||||
if (thread->is_Java_thread()) {
|
||||
JavaThread* jt = JavaThread::cast(thread);
|
||||
if (jt->thread_state() != _thread_in_vm) {
|
||||
assert(jt->thread_state() == _thread_in_native, "invariant");
|
||||
// For a JavaThread to take a JFR stacktrace, it must be in _thread_in_vm. Can safepoint here.
|
||||
ThreadInVMfromNative transition(jt);
|
||||
event.commit();
|
||||
return;
|
||||
}
|
||||
}
|
||||
event.commit();
|
||||
}
|
||||
|
||||
NativeLibraryLoadEvent::~NativeLibraryLoadEvent() {
|
||||
commit<EventNativeLibraryLoad>(*this);
|
||||
}
|
||||
|
||||
NativeLibraryUnloadEvent::~NativeLibraryUnloadEvent() {
|
||||
commit<EventNativeLibraryUnload>(*this);
|
||||
}
|
||||
71
src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.hpp
Normal file
71
src/hotspot/share/jfr/support/jfrNativeLibraryLoadEvent.hpp
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_SUPPORT_JFRNATIVELIBRARYLOADEVENT_HPP
|
||||
#define SHARE_JFR_SUPPORT_JFRNATIVELIBRARYLOADEVENT_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class JfrTicksWrapper;
|
||||
|
||||
/*
|
||||
* Helper types for populating NativeLibrary events.
|
||||
* Event commit is run as part of destructors.
|
||||
*/
|
||||
|
||||
class JfrNativeLibraryEventBase : public StackObj {
|
||||
protected:
|
||||
const char* _name;
|
||||
const char* _error_msg;
|
||||
JfrTicksWrapper* _start_time;
|
||||
JfrNativeLibraryEventBase(const char* name);
|
||||
~JfrNativeLibraryEventBase();
|
||||
public:
|
||||
const char* name() const;
|
||||
const char* error_msg() const;
|
||||
void set_error_msg(const char* error_msg);
|
||||
JfrTicksWrapper* start_time() const;
|
||||
bool has_start_time() const;
|
||||
};
|
||||
|
||||
class NativeLibraryLoadEvent : public JfrNativeLibraryEventBase {
|
||||
private:
|
||||
void** _result;
|
||||
public:
|
||||
NativeLibraryLoadEvent(const char* name, void** result);
|
||||
~NativeLibraryLoadEvent();
|
||||
bool success() const;
|
||||
};
|
||||
|
||||
class NativeLibraryUnloadEvent : public JfrNativeLibraryEventBase {
|
||||
private:
|
||||
bool _result;
|
||||
public:
|
||||
NativeLibraryUnloadEvent(const char* name);
|
||||
~NativeLibraryUnloadEvent();
|
||||
bool success() const;
|
||||
void set_result(bool result);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRNATIVELIBRARYLOADEVENT_HPP
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_JFR_UTILITIES_JFRTIME_HPP
|
||||
#define SHARE_JFR_UTILITIES_JFRTIME_HPP
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
#include "utilities/ticks.hpp"
|
||||
|
||||
typedef TimeInstant<CounterRepresentation, FastUnorderedElapsedCounterSource> JfrTicks;
|
||||
@@ -41,4 +42,13 @@ class JfrTime {
|
||||
static const void* time_function();
|
||||
};
|
||||
|
||||
// For dynamically allocated Ticks values.
|
||||
class JfrTicksWrapper : public JfrCHeapObj {
|
||||
private:
|
||||
JfrTicks _ticks;
|
||||
public:
|
||||
JfrTicksWrapper() : _ticks(JfrTicks::now()) {}
|
||||
operator JfrTicks() const { return _ticks; }
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_UTILITIES_JFRTIME_HPP
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "compiler/compilerThread.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
@@ -53,6 +54,29 @@ volatile intx JVMCI::_fatal_log_init_thread = -1;
|
||||
volatile int JVMCI::_fatal_log_fd = -1;
|
||||
const char* JVMCI::_fatal_log_filename = nullptr;
|
||||
|
||||
CompilerThreadCanCallJava::CompilerThreadCanCallJava(JavaThread* current, bool new_state) {
|
||||
_current = nullptr;
|
||||
if (current->is_Compiler_thread()) {
|
||||
CompilerThread* ct = CompilerThread::cast(current);
|
||||
if (ct->_can_call_java != new_state &&
|
||||
ct->_compiler != nullptr &&
|
||||
ct->_compiler->is_jvmci())
|
||||
{
|
||||
// Only enter a new context if the ability of the
|
||||
// current thread to call Java actually changes
|
||||
_reset_state = ct->_can_call_java;
|
||||
ct->_can_call_java = new_state;
|
||||
_current = ct;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CompilerThreadCanCallJava::~CompilerThreadCanCallJava() {
|
||||
if (_current != nullptr) {
|
||||
_current->_can_call_java = _reset_state;
|
||||
}
|
||||
}
|
||||
|
||||
void jvmci_vmStructs_init() NOT_DEBUG_RETURN;
|
||||
|
||||
bool JVMCI::can_initialize_JVMCI() {
|
||||
@@ -176,6 +200,10 @@ void JVMCI::ensure_box_caches_initialized(TRAPS) {
|
||||
java_lang_Long_LongCache::symbol()
|
||||
};
|
||||
|
||||
// Class resolution and initialization below
|
||||
// requires calling into Java
|
||||
CompilerThreadCanCallJava ccj(THREAD, true);
|
||||
|
||||
for (unsigned i = 0; i < sizeof(box_classes) / sizeof(Symbol*); i++) {
|
||||
Klass* k = SystemDictionary::resolve_or_fail(box_classes[i], true, CHECK);
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
class BoolObjectClosure;
|
||||
class CompilerThread;
|
||||
class constantPoolHandle;
|
||||
class JavaThread;
|
||||
class JVMCIEnv;
|
||||
@@ -46,6 +47,34 @@ typedef FormatStringEventLog<256> StringEventLog;
|
||||
struct _jmetadata;
|
||||
typedef struct _jmetadata *jmetadata;
|
||||
|
||||
// A stack object that manages a scope in which the current thread, if
|
||||
// it's a CompilerThread, can have its CompilerThread::_can_call_java
|
||||
// field changed. This allows restricting libjvmci better in terms
|
||||
// of when it can make Java calls. If a Java call on a CompilerThread
|
||||
// reaches a clinit, there's a risk of dead-lock when async compilation
|
||||
// is disabled (e.g. -Xbatch or -Xcomp) as the non-CompilerThread thread
|
||||
// waiting for the blocking compilation may hold the clinit lock.
|
||||
//
|
||||
// This scope is primarily used to disable Java calls when libjvmci enters
|
||||
// the VM via a C2V (i.e. CompilerToVM) native method.
|
||||
class CompilerThreadCanCallJava : StackObj {
|
||||
private:
|
||||
CompilerThread* _current; // Only non-null if state of thread changed
|
||||
bool _reset_state; // Value prior to state change, undefined
|
||||
// if no state change.
|
||||
public:
|
||||
// Enters a scope in which the ability of the current CompilerThread
|
||||
// to call Java is specified by `new_state`. This call only makes a
|
||||
// change if the current thread is a CompilerThread associated with
|
||||
// a JVMCI compiler whose CompilerThread::_can_call_java is not
|
||||
// currently `new_state`.
|
||||
CompilerThreadCanCallJava(JavaThread* current, bool new_state);
|
||||
|
||||
// Resets CompilerThread::_can_call_java of the current thread if the
|
||||
// constructor changed it.
|
||||
~CompilerThreadCanCallJava();
|
||||
};
|
||||
|
||||
class JVMCI : public AllStatic {
|
||||
friend class JVMCIRuntime;
|
||||
friend class JVMCIEnv;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user