mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2026-01-24 17:30:47 +01:00
Compare commits
120 Commits
vpr/main_l
...
jdk-26+25
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2acd8776f2 | ||
|
|
c8e64e7c33 | ||
|
|
1535d08f0e | ||
|
|
f5bc6ee90d | ||
|
|
6f1c5733ed | ||
|
|
223cc64518 | ||
|
|
9ea8201b74 | ||
|
|
3949b0f23c | ||
|
|
f0afd89f66 | ||
|
|
0bff5f3dbe | ||
|
|
ae4d9c2e6a | ||
|
|
0b3df489e9 | ||
|
|
d2926dfd9a | ||
|
|
54893dc5c2 | ||
|
|
99135d2e05 | ||
|
|
02ff38f2d7 | ||
|
|
902aa4dcd2 | ||
|
|
152cd4d8ba | ||
|
|
aeea849756 | ||
|
|
256a9beffc | ||
|
|
b086e34f71 | ||
|
|
66fb015267 | ||
|
|
27a38d9093 | ||
|
|
4a975637a1 | ||
|
|
b3e408c078 | ||
|
|
0e6c7e8664 | ||
|
|
ac6f5e9651 | ||
|
|
713de231a6 | ||
|
|
1f99cf9424 | ||
|
|
b6d83eda6b | ||
|
|
43040f30a7 | ||
|
|
dcba014ad5 | ||
|
|
2e68b79a39 | ||
|
|
36b66e13c8 | ||
|
|
f946449997 | ||
|
|
df5b105bbb | ||
|
|
28d94d6ab4 | ||
|
|
3a2845f334 | ||
|
|
72ebca8a0b | ||
|
|
50a3049737 | ||
|
|
8af5943719 | ||
|
|
26460b6f12 | ||
|
|
8cdfec8d1c | ||
|
|
1655773979 | ||
|
|
695a4abd5f | ||
|
|
46b5e588ab | ||
|
|
696821670e | ||
|
|
e067038796 | ||
|
|
e5f6332610 | ||
|
|
6c09529cd6 | ||
|
|
9ec773ad27 | ||
|
|
52ffe8a096 | ||
|
|
6385c663dc | ||
|
|
44087ea5d6 | ||
|
|
960987e8c1 | ||
|
|
8301d9917e | ||
|
|
cebb03ef24 | ||
|
|
df35412db1 | ||
|
|
d19e072f97 | ||
|
|
812add27ab | ||
|
|
09b25cd0a2 | ||
|
|
69e30244c0 | ||
|
|
970533d41d | ||
|
|
d032b28d9d | ||
|
|
8690d263d9 | ||
|
|
ce1adf63ea | ||
|
|
7738131835 | ||
|
|
7d35a283cf | ||
|
|
f510b4a3ba | ||
|
|
f6c90fe8f9 | ||
|
|
6042c9a6f0 | ||
|
|
f971ee5ea0 | ||
|
|
7c169c9814 | ||
|
|
bc928c814b | ||
|
|
7aff8e15ba | ||
|
|
ad3dfaf1fc | ||
|
|
cc05530b81 | ||
|
|
91b97a49d4 | ||
|
|
3924a28a22 | ||
|
|
58b601ac42 | ||
|
|
6e7eaf40d1 | ||
|
|
10f262a6ad | ||
|
|
466cb38314 | ||
|
|
36daa2650d | ||
|
|
5d65c23cd9 | ||
|
|
4cc655a2f4 | ||
|
|
ff851de852 | ||
|
|
00f2c38e37 | ||
|
|
8a7af77e99 | ||
|
|
9eaa364a52 | ||
|
|
81e0c87f28 | ||
|
|
f4305923fb | ||
|
|
0829c6acde | ||
|
|
1baf5164d6 | ||
|
|
7733632f90 | ||
|
|
eaddefb475 | ||
|
|
0d8b5188bb | ||
|
|
155d7df555 | ||
|
|
6322aaba63 | ||
|
|
db3a8386d4 | ||
|
|
d09a8cb81b | ||
|
|
2199b5fef4 | ||
|
|
bfc048aba6 | ||
|
|
8102f436f5 | ||
|
|
7d78818ae6 | ||
|
|
bbc0f9ef30 | ||
|
|
6b6fdf1d92 | ||
|
|
48c59faf58 | ||
|
|
9d6a61fda6 | ||
|
|
10220ed06e | ||
|
|
795ec5c1e9 | ||
|
|
436b3357e9 | ||
|
|
279f39f14a | ||
|
|
42aecc4070 | ||
|
|
d91480b9b0 | ||
|
|
5f42c77085 | ||
|
|
b6ba1ac9aa | ||
|
|
676e6fd8d5 | ||
|
|
bc66d3e65d | ||
|
|
d2571ea76a |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -26,3 +26,8 @@ NashornProfile.txt
|
||||
*.rej
|
||||
*.orig
|
||||
test/benchmarks/**/target
|
||||
/src/hotspot/CMakeLists.txt
|
||||
/src/hotspot/compile_commands.json
|
||||
/src/hotspot/cmake-build-debug/
|
||||
/src/hotspot/.cache/
|
||||
/src/hotspot/.idea/
|
||||
|
||||
@@ -668,7 +668,7 @@ update.</p>
|
||||
(Note that this version is often presented as "MSVC 14.28", and reported
|
||||
by cl.exe as 19.28.) Older versions will not be accepted by
|
||||
<code>configure</code> and will not work. The maximum accepted version
|
||||
of Visual Studio is 2022.</p>
|
||||
of Visual Studio is 2026.</p>
|
||||
<p>If you have multiple versions of Visual Studio installed,
|
||||
<code>configure</code> will by default pick the latest. You can request
|
||||
a specific version to be used by setting
|
||||
|
||||
@@ -468,7 +468,7 @@ available for this update.
|
||||
The minimum accepted version is Visual Studio 2019 version 16.8. (Note that
|
||||
this version is often presented as "MSVC 14.28", and reported by cl.exe as
|
||||
19.28.) Older versions will not be accepted by `configure` and will not work.
|
||||
The maximum accepted version of Visual Studio is 2022.
|
||||
The maximum accepted version of Visual Studio is 2026.
|
||||
|
||||
If you have multiple versions of Visual Studio installed, `configure` will by
|
||||
default pick the latest. You can request a specific version to be used by
|
||||
|
||||
@@ -305,11 +305,11 @@ recognize your tests.</p>
|
||||
the product.</p>
|
||||
<ul>
|
||||
<li><p>All unit tests for a class from <code>foo/bar/baz.cpp</code>
|
||||
should be placed <code>foo/bar/test_baz.cpp</code> in
|
||||
<code>hotspot/test/native/</code> directory. Having all tests for a
|
||||
class in one file is a common practice for unit tests, it helps to see
|
||||
all existing tests at once, share functions and/or resources without
|
||||
losing encapsulation.</p></li>
|
||||
should be placed <code>foo/bar/test_baz.cpp</code> in the
|
||||
<code>test/hotspot/gtest/</code> directory. Having all tests for a class
|
||||
in one file is a common practice for unit tests, it helps to see all
|
||||
existing tests at once, share functions and/or resources without losing
|
||||
encapsulation.</p></li>
|
||||
<li><p>For tests which test more than one class, directory hierarchy
|
||||
should be the same as product hierarchy, and file name should reflect
|
||||
the name of the tested subsystem/functionality. For example, if a
|
||||
@@ -319,7 +319,7 @@ placed in <code>gc/g1</code> directory.</p></li>
|
||||
<p>Please note that framework prepends directory name to a test group
|
||||
name. For example, if <code>TEST(foo, check_this)</code> and
|
||||
<code>TEST(bar, check_that)</code> are defined in
|
||||
<code>hotspot/test/native/gc/shared/test_foo.cpp</code> file, they will
|
||||
<code>test/hotspot/gtest/gc/shared/test_foo.cpp</code> file, they will
|
||||
be reported as <code>gc/shared/foo::check_this</code> and
|
||||
<code>gc/shared/bar::check_that</code>.</p>
|
||||
<h3 id="test-names">Test names</h3>
|
||||
|
||||
@@ -241,7 +241,7 @@ recognize your tests.
|
||||
Test file location should reflect a location of the tested part of the product.
|
||||
|
||||
* All unit tests for a class from `foo/bar/baz.cpp` should be placed
|
||||
`foo/bar/test_baz.cpp` in `hotspot/test/native/` directory. Having all
|
||||
`foo/bar/test_baz.cpp` in the `test/hotspot/gtest/` directory. Having all
|
||||
tests for a class in one file is a common practice for unit tests, it
|
||||
helps to see all existing tests at once, share functions and/or
|
||||
resources without losing encapsulation.
|
||||
@@ -254,7 +254,7 @@ sub-system under tests belongs to `gc/g1`, tests should be placed in
|
||||
|
||||
Please note that framework prepends directory name to a test group
|
||||
name. For example, if `TEST(foo, check_this)` and `TEST(bar, check_that)`
|
||||
are defined in `hotspot/test/native/gc/shared/test_foo.cpp` file, they
|
||||
are defined in `test/hotspot/gtest/gc/shared/test_foo.cpp` file, they
|
||||
will be reported as `gc/shared/foo::check_this` and
|
||||
`gc/shared/bar::check_that`.
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
|
||||
################################################################################
|
||||
# The order of these defines the priority by which we try to find them.
|
||||
VALID_VS_VERSIONS="2022 2019"
|
||||
VALID_VS_VERSIONS="2022 2019 2026"
|
||||
|
||||
VS_DESCRIPTION_2019="Microsoft Visual Studio 2019"
|
||||
VS_VERSION_INTERNAL_2019=142
|
||||
@@ -57,6 +57,21 @@ VS_SDK_PLATFORM_NAME_2022=
|
||||
VS_SUPPORTED_2022=true
|
||||
VS_TOOLSET_SUPPORTED_2022=true
|
||||
|
||||
VS_DESCRIPTION_2026="Microsoft Visual Studio 2026"
|
||||
VS_VERSION_INTERNAL_2026=145
|
||||
VS_MSVCR_2026=vcruntime140.dll
|
||||
VS_VCRUNTIME_1_2026=vcruntime140_1.dll
|
||||
VS_MSVCP_2026=msvcp140.dll
|
||||
VS_ENVVAR_2026="VS180COMNTOOLS"
|
||||
VS_USE_UCRT_2026="true"
|
||||
VS_VS_INSTALLDIR_2026="Microsoft Visual Studio/18"
|
||||
VS_EDITIONS_2026="BuildTools Community Professional Enterprise"
|
||||
VS_SDK_INSTALLDIR_2026=
|
||||
VS_VS_PLATFORM_NAME_2026="v145"
|
||||
VS_SDK_PLATFORM_NAME_2026=
|
||||
VS_SUPPORTED_2026=true
|
||||
VS_TOOLSET_SUPPORTED_2026=true
|
||||
|
||||
################################################################################
|
||||
|
||||
AC_DEFUN([TOOLCHAIN_CHECK_POSSIBLE_VISUAL_STUDIO_ROOT],
|
||||
|
||||
@@ -95,6 +95,7 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
|
||||
EXTRA_OBJECT_FILES := $(BUILD_LIBJVM_ALL_OBJS), \
|
||||
DEFAULT_CFLAGS := false, \
|
||||
CFLAGS := $(JVM_CFLAGS) \
|
||||
-DHOTSPOT_GTEST \
|
||||
-I$(GTEST_FRAMEWORK_SRC)/googletest/include \
|
||||
-I$(GTEST_FRAMEWORK_SRC)/googlemock/include \
|
||||
$(addprefix -I, $(GTEST_TEST_SRC)), \
|
||||
|
||||
@@ -337,6 +337,30 @@ TARGETS += $(BUILD_LIBJVM)
|
||||
# for the associated class. If the class doesn't provide a more specific
|
||||
# declaration (either directly or by inheriting from a class that provides
|
||||
# one) then the global definition will be used, triggering this check.
|
||||
#
|
||||
|
||||
# The HotSpot wrapper for <new> declares as deprecated all the allocation and
|
||||
# deallocation functions that use the global allocator. But that blocking
|
||||
# isn't a bullet-proof. Some of these functions are implicitly available in
|
||||
# every translation unit, without the need to include <new>. So even with that
|
||||
# wrapper we still need this link-time check. The implicitly declared
|
||||
# functions and their mangled names are - from C++17 6.7.4:
|
||||
#
|
||||
# void* operator new(size_t) // _Znwm
|
||||
# void* operator new(size_t, align_val_t) // _ZnwmSt11align_val_t
|
||||
#
|
||||
# void operator delete(void*) noexcept // _ZdlPv
|
||||
# void operator delete(void*, size_t) noexcept // _ZdlPvm
|
||||
# void operator delete(void*, align_val_t) noexcept // _ZdlPvSt11align_val_t
|
||||
# void operator delete(void*, size_t, align_val_t) noexcept // _ZdlPvmSt11align_val_t
|
||||
#
|
||||
# void* operator new[](size_t) // _Znam
|
||||
# void* operator new[](size_t, align_val_t) // _ZnamSt11align_val_t
|
||||
#
|
||||
# void operator delete[](void*) noexcept // _ZdaPv
|
||||
# void operator delete[](void*, size_t) noexcept // _ZdaPvm
|
||||
# void operator delete[](void*, align_val_t) noexcept // _ZdaPvSt11align_val_t
|
||||
# void operator delete[](void*, size_t, align_val_t) noexcept // _ZdaPvmSt11align_val_t
|
||||
|
||||
ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
|
||||
ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
|
||||
@@ -347,10 +371,18 @@ ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
|
||||
# so use mangled names when looking for symbols.
|
||||
# Save the demangling for when something is actually found.
|
||||
MANGLED_SYMS := \
|
||||
_ZdaPv \
|
||||
_ZdlPv \
|
||||
_Znam \
|
||||
_Znwm \
|
||||
_ZnwmSt11align_val_t \
|
||||
_ZdlPv \
|
||||
_ZdlPvm \
|
||||
_ZdlPvSt11align_val_t \
|
||||
_ZdlPvmSt11align_val_t \
|
||||
_Znam \
|
||||
_ZnamSt11align_val_t \
|
||||
_ZdaPv \
|
||||
_ZdaPvm \
|
||||
_ZdaPvSt11align_val_t \
|
||||
_ZdaPvmSt11align_val_t \
|
||||
#
|
||||
UNDEF_PATTERN := ' U '
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -219,13 +219,13 @@ public final class SealedGraph implements Taglet {
|
||||
// This implies the module is always the same.
|
||||
private String relativeLink(TypeElement node) {
|
||||
var util = SealedGraph.this.docletEnvironment.getElementUtils();
|
||||
var rootPackage = util.getPackageOf(rootNode);
|
||||
var nodePackage = util.getPackageOf(node);
|
||||
var backNavigator = rootPackage.getQualifiedName().toString().chars()
|
||||
// Note: SVG files for nested types use the simple names of containing types as parent directories.
|
||||
// We therefore need to convert all dots in the qualified name to "../" below.
|
||||
var backNavigator = rootNode.getQualifiedName().toString().chars()
|
||||
.filter(c -> c == '.')
|
||||
.mapToObj(c -> "../")
|
||||
.collect(joining()) +
|
||||
"../";
|
||||
.collect(joining());
|
||||
var forwardNavigator = nodePackage.getQualifiedName().toString()
|
||||
.replace(".", "/");
|
||||
|
||||
|
||||
@@ -84,6 +84,7 @@ public interface MessageType {
|
||||
FILE_OBJECT("file object", "JavaFileObject", "javax.tools"),
|
||||
PATH("path", "Path", "java.nio.file"),
|
||||
NAME("name", "Name", "com.sun.tools.javac.util"),
|
||||
LONG("long", "long", null),
|
||||
NUMBER("number", "int", null),
|
||||
OPTION_NAME("option name", "Option", "com.sun.tools.javac.main"),
|
||||
PROFILE("profile", "Profile", "com.sun.tools.javac.jvm"),
|
||||
|
||||
@@ -80,6 +80,7 @@ else
|
||||
|
||||
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libExplicitAttach := -pthread
|
||||
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libImplicitAttach := -pthread
|
||||
BUILD_JDK_JTREG_LIBRARIES_LDFLAGS_libJNIAttachMutator := -pthread
|
||||
BUILD_JDK_JTREG_EXCLUDE += exerevokeall.c
|
||||
ifeq ($(call isTargetOs, linux), true)
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exelauncher := -ldl
|
||||
|
||||
@@ -393,6 +393,32 @@ source %{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
// Only SVE supports the predicate feature.
|
||||
if (UseSVE == 0) {
|
||||
// On architectures that do not support predicate, masks are stored in
|
||||
// general vector registers (TypeVect) with sizes ranging from TypeVectA
|
||||
// to TypeVectX based on the vector size in bytes.
|
||||
assert(vt->isa_vectmask() == nullptr, "mask type is not matched");
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(vt->isa_vectmask() != nullptr, "expected TypeVectMask on SVE");
|
||||
switch (opcode) {
|
||||
case Op_VectorMaskToLong:
|
||||
case Op_VectorLongToMask:
|
||||
// These operations lack native SVE predicate instructions and are
|
||||
// implemented using general vector instructions instead. Use vector
|
||||
// registers rather than predicate registers to save the mask for
|
||||
// better performance.
|
||||
return false;
|
||||
default:
|
||||
// By default, the mask operations are implemented with predicate
|
||||
// instructions with a predicate input/output.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the given node is not a variable shift.
|
||||
bool assert_not_var_shift(const Node* n) {
|
||||
assert(!n->as_ShiftV()->is_var_shift(), "illegal variable shift");
|
||||
@@ -6249,31 +6275,44 @@ instruct vmask_tolong_neon(iRegLNoSp dst, vReg src) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vmask_tolong_sve(iRegLNoSp dst, pReg src, vReg tmp1, vReg tmp2) %{
|
||||
predicate(UseSVE > 0);
|
||||
instruct vmask_tolong_sve(iRegLNoSp dst, vReg src, vReg tmp) %{
|
||||
predicate(UseSVE > 0 && !VM_Version::supports_svebitperm());
|
||||
match(Set dst (VectorMaskToLong src));
|
||||
effect(TEMP tmp);
|
||||
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
// Input "src" is a vector of boolean represented as
|
||||
// bytes with 0x00/0x01 as element values.
|
||||
__ sve_vmask_tolong($dst$$Register, $src$$FloatRegister,
|
||||
$tmp$$FloatRegister, Matcher::vector_length(this, $src));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vmask_tolong_sve2(iRegLNoSp dst, vReg src, vReg tmp1, vReg tmp2) %{
|
||||
predicate(VM_Version::supports_svebitperm());
|
||||
match(Set dst (VectorMaskToLong src));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp1, $tmp2" %}
|
||||
format %{ "vmask_tolong_sve2 $dst, $src\t# KILL $tmp1, $tmp2" %}
|
||||
ins_encode %{
|
||||
__ sve_vmask_tolong($dst$$Register, $src$$PRegister,
|
||||
Matcher::vector_element_basic_type(this, $src),
|
||||
Matcher::vector_length(this, $src),
|
||||
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
|
||||
// Input "src" is a vector of boolean represented as
|
||||
// bytes with 0x00/0x01 as element values.
|
||||
__ sve2_vmask_tolong($dst$$Register, $src$$FloatRegister,
|
||||
$tmp1$$FloatRegister, $tmp2$$FloatRegister,
|
||||
Matcher::vector_length(this, $src));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// fromlong
|
||||
|
||||
instruct vmask_fromlong(pReg dst, iRegL src, vReg tmp1, vReg tmp2) %{
|
||||
instruct vmask_fromlong(vReg dst, iRegL src, vReg tmp) %{
|
||||
match(Set dst (VectorLongToMask src));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp1, $tmp2" %}
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp" %}
|
||||
ins_encode %{
|
||||
__ sve_vmask_fromlong($dst$$PRegister, $src$$Register,
|
||||
Matcher::vector_element_basic_type(this),
|
||||
Matcher::vector_length(this),
|
||||
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
|
||||
__ sve_vmask_fromlong($dst$$FloatRegister, $src$$Register,
|
||||
$tmp$$FloatRegister, Matcher::vector_length(this));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@@ -383,6 +383,32 @@ source %{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
// Only SVE supports the predicate feature.
|
||||
if (UseSVE == 0) {
|
||||
// On architectures that do not support predicate, masks are stored in
|
||||
// general vector registers (TypeVect) with sizes ranging from TypeVectA
|
||||
// to TypeVectX based on the vector size in bytes.
|
||||
assert(vt->isa_vectmask() == nullptr, "mask type is not matched");
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(vt->isa_vectmask() != nullptr, "expected TypeVectMask on SVE");
|
||||
switch (opcode) {
|
||||
case Op_VectorMaskToLong:
|
||||
case Op_VectorLongToMask:
|
||||
// These operations lack native SVE predicate instructions and are
|
||||
// implemented using general vector instructions instead. Use vector
|
||||
// registers rather than predicate registers to save the mask for
|
||||
// better performance.
|
||||
return false;
|
||||
default:
|
||||
// By default, the mask operations are implemented with predicate
|
||||
// instructions with a predicate input/output.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Assert that the given node is not a variable shift.
|
||||
bool assert_not_var_shift(const Node* n) {
|
||||
assert(!n->as_ShiftV()->is_var_shift(), "illegal variable shift");
|
||||
@@ -4303,31 +4329,44 @@ instruct vmask_tolong_neon(iRegLNoSp dst, vReg src) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vmask_tolong_sve(iRegLNoSp dst, pReg src, vReg tmp1, vReg tmp2) %{
|
||||
predicate(UseSVE > 0);
|
||||
instruct vmask_tolong_sve(iRegLNoSp dst, vReg src, vReg tmp) %{
|
||||
predicate(UseSVE > 0 && !VM_Version::supports_svebitperm());
|
||||
match(Set dst (VectorMaskToLong src));
|
||||
effect(TEMP tmp);
|
||||
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
// Input "src" is a vector of boolean represented as
|
||||
// bytes with 0x00/0x01 as element values.
|
||||
__ sve_vmask_tolong($dst$$Register, $src$$FloatRegister,
|
||||
$tmp$$FloatRegister, Matcher::vector_length(this, $src));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vmask_tolong_sve2(iRegLNoSp dst, vReg src, vReg tmp1, vReg tmp2) %{
|
||||
predicate(VM_Version::supports_svebitperm());
|
||||
match(Set dst (VectorMaskToLong src));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
format %{ "vmask_tolong_sve $dst, $src\t# KILL $tmp1, $tmp2" %}
|
||||
format %{ "vmask_tolong_sve2 $dst, $src\t# KILL $tmp1, $tmp2" %}
|
||||
ins_encode %{
|
||||
__ sve_vmask_tolong($dst$$Register, $src$$PRegister,
|
||||
Matcher::vector_element_basic_type(this, $src),
|
||||
Matcher::vector_length(this, $src),
|
||||
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
|
||||
// Input "src" is a vector of boolean represented as
|
||||
// bytes with 0x00/0x01 as element values.
|
||||
__ sve2_vmask_tolong($dst$$Register, $src$$FloatRegister,
|
||||
$tmp1$$FloatRegister, $tmp2$$FloatRegister,
|
||||
Matcher::vector_length(this, $src));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// fromlong
|
||||
|
||||
instruct vmask_fromlong(pReg dst, iRegL src, vReg tmp1, vReg tmp2) %{
|
||||
instruct vmask_fromlong(vReg dst, iRegL src, vReg tmp) %{
|
||||
match(Set dst (VectorLongToMask src));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp1, $tmp2" %}
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
format %{ "vmask_fromlong $dst, $src\t# vector (sve2). KILL $tmp" %}
|
||||
ins_encode %{
|
||||
__ sve_vmask_fromlong($dst$$PRegister, $src$$Register,
|
||||
Matcher::vector_element_basic_type(this),
|
||||
Matcher::vector_length(this),
|
||||
$tmp1$$FloatRegister, $tmp2$$FloatRegister);
|
||||
__ sve_vmask_fromlong($dst$$FloatRegister, $src$$Register,
|
||||
$tmp$$FloatRegister, Matcher::vector_length(this));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@@ -1399,137 +1399,125 @@ void C2_MacroAssembler::bytemask_compress(Register dst) {
|
||||
andr(dst, dst, 0xff); // dst = 0x8D
|
||||
}
|
||||
|
||||
// Pack the lowest-numbered bit of each mask element in src into a long value
|
||||
// in dst, at most the first 64 lane elements.
|
||||
// Clobbers: rscratch1, if UseSVE=1 or the hardware doesn't support FEAT_BITPERM.
|
||||
void C2_MacroAssembler::sve_vmask_tolong(Register dst, PRegister src, BasicType bt, int lane_cnt,
|
||||
FloatRegister vtmp1, FloatRegister vtmp2) {
|
||||
// Pack the value of each mask element in "src" into a long value in "dst", at most
|
||||
// the first 64 lane elements. The input "src" is a vector of boolean represented as
|
||||
// bytes with 0x00/0x01 as element values. Each lane value from "src" is packed into
|
||||
// one bit in "dst".
|
||||
//
|
||||
// Example: src = 0x0001010000010001 0100000001010001, lane_cnt = 16
|
||||
// Expected: dst = 0x658D
|
||||
//
|
||||
// Clobbers: rscratch1
|
||||
void C2_MacroAssembler::sve_vmask_tolong(Register dst, FloatRegister src,
|
||||
FloatRegister vtmp, int lane_cnt) {
|
||||
assert(lane_cnt <= 64 && is_power_of_2(lane_cnt), "Unsupported lane count");
|
||||
assert_different_registers(dst, rscratch1);
|
||||
assert_different_registers(vtmp1, vtmp2);
|
||||
assert_different_registers(src, vtmp);
|
||||
assert(UseSVE > 0, "must be");
|
||||
|
||||
Assembler::SIMD_RegVariant size = elemType_to_regVariant(bt);
|
||||
// Example: src = 0b01100101 10001101, bt = T_BYTE, lane_cnt = 16
|
||||
// Expected: dst = 0x658D
|
||||
// Compress the lowest 8 bytes.
|
||||
fmovd(dst, src);
|
||||
bytemask_compress(dst);
|
||||
if (lane_cnt <= 8) return;
|
||||
|
||||
// Convert the mask into vector with sequential bytes.
|
||||
// vtmp1 = 0x00010100 0x00010001 0x01000000 0x01010001
|
||||
sve_cpy(vtmp1, size, src, 1, false);
|
||||
if (bt != T_BYTE) {
|
||||
sve_vector_narrow(vtmp1, B, vtmp1, size, vtmp2);
|
||||
}
|
||||
|
||||
if (UseSVE > 1 && VM_Version::supports_svebitperm()) {
|
||||
// Given a vector with the value 0x00 or 0x01 in each byte, the basic idea
|
||||
// is to compress each significant bit of the byte in a cross-lane way. Due
|
||||
// to the lack of a cross-lane bit-compress instruction, we use BEXT
|
||||
// (bit-compress in each lane) with the biggest lane size (T = D) then
|
||||
// concatenate the results.
|
||||
|
||||
// The second source input of BEXT, initialized with 0x01 in each byte.
|
||||
// vtmp2 = 0x01010101 0x01010101 0x01010101 0x01010101
|
||||
sve_dup(vtmp2, B, 1);
|
||||
|
||||
// BEXT vtmp1.D, vtmp1.D, vtmp2.D
|
||||
// vtmp1 = 0x0001010000010001 | 0x0100000001010001
|
||||
// vtmp2 = 0x0101010101010101 | 0x0101010101010101
|
||||
// ---------------------------------------
|
||||
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
|
||||
sve_bext(vtmp1, D, vtmp1, vtmp2);
|
||||
|
||||
// Concatenate the lowest significant 8 bits in each 8 bytes, and extract the
|
||||
// result to dst.
|
||||
// vtmp1 = 0x0000000000000000 | 0x000000000000658D
|
||||
// dst = 0x658D
|
||||
if (lane_cnt <= 8) {
|
||||
// No need to concatenate.
|
||||
umov(dst, vtmp1, B, 0);
|
||||
} else if (lane_cnt <= 16) {
|
||||
ins(vtmp1, B, vtmp1, 1, 8);
|
||||
umov(dst, vtmp1, H, 0);
|
||||
} else {
|
||||
// As the lane count is 64 at most, the final expected value must be in
|
||||
// the lowest 64 bits after narrowing vtmp1 from D to B.
|
||||
sve_vector_narrow(vtmp1, B, vtmp1, D, vtmp2);
|
||||
umov(dst, vtmp1, D, 0);
|
||||
}
|
||||
} else if (UseSVE > 0) {
|
||||
// Compress the lowest 8 bytes.
|
||||
fmovd(dst, vtmp1);
|
||||
bytemask_compress(dst);
|
||||
if (lane_cnt <= 8) return;
|
||||
|
||||
// Repeat on higher bytes and join the results.
|
||||
// Compress 8 bytes in each iteration.
|
||||
for (int idx = 1; idx < (lane_cnt / 8); idx++) {
|
||||
sve_extract_integral(rscratch1, T_LONG, vtmp1, idx, vtmp2);
|
||||
bytemask_compress(rscratch1);
|
||||
orr(dst, dst, rscratch1, Assembler::LSL, idx << 3);
|
||||
}
|
||||
} else {
|
||||
assert(false, "unsupported");
|
||||
ShouldNotReachHere();
|
||||
// Repeat on higher bytes and join the results.
|
||||
// Compress 8 bytes in each iteration.
|
||||
for (int idx = 1; idx < (lane_cnt / 8); idx++) {
|
||||
sve_extract_integral(rscratch1, T_LONG, src, idx, vtmp);
|
||||
bytemask_compress(rscratch1);
|
||||
orr(dst, dst, rscratch1, Assembler::LSL, idx << 3);
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack the mask, a long value in src, into predicate register dst based on the
|
||||
// corresponding data type. Note that dst can support at most 64 lanes.
|
||||
// Below example gives the expected dst predicate register in different types, with
|
||||
// a valid src(0x658D) on a 1024-bit vector size machine.
|
||||
// BYTE: dst = 0x00 00 00 00 00 00 00 00 00 00 00 00 00 00 65 8D
|
||||
// SHORT: dst = 0x00 00 00 00 00 00 00 00 00 00 00 00 14 11 40 51
|
||||
// INT: dst = 0x00 00 00 00 00 00 00 00 01 10 01 01 10 00 11 01
|
||||
// LONG: dst = 0x00 01 01 00 00 01 00 01 01 00 00 00 01 01 00 01
|
||||
//
|
||||
// The number of significant bits of src must be equal to lane_cnt. E.g., 0xFF658D which
|
||||
// has 24 significant bits would be an invalid input if dst predicate register refers to
|
||||
// a LONG type 1024-bit vector, which has at most 16 lanes.
|
||||
void C2_MacroAssembler::sve_vmask_fromlong(PRegister dst, Register src, BasicType bt, int lane_cnt,
|
||||
FloatRegister vtmp1, FloatRegister vtmp2) {
|
||||
assert(UseSVE == 2 && VM_Version::supports_svebitperm() &&
|
||||
lane_cnt <= 64 && is_power_of_2(lane_cnt), "unsupported");
|
||||
Assembler::SIMD_RegVariant size = elemType_to_regVariant(bt);
|
||||
// Example: src = 0x658D, bt = T_BYTE, size = B, lane_cnt = 16
|
||||
// Expected: dst = 0b01101001 10001101
|
||||
// The function is same as above "sve_vmask_tolong", but it uses SVE2's BEXT
|
||||
// instruction which requires the FEAT_BITPERM feature.
|
||||
void C2_MacroAssembler::sve2_vmask_tolong(Register dst, FloatRegister src,
|
||||
FloatRegister vtmp1, FloatRegister vtmp2,
|
||||
int lane_cnt) {
|
||||
assert(lane_cnt <= 64 && is_power_of_2(lane_cnt), "Unsupported lane count");
|
||||
assert_different_registers(src, vtmp1, vtmp2);
|
||||
assert(UseSVE > 1 && VM_Version::supports_svebitperm(), "must be");
|
||||
|
||||
// Put long value from general purpose register into the first lane of vector.
|
||||
// vtmp1 = 0x0000000000000000 | 0x000000000000658D
|
||||
sve_dup(vtmp1, B, 0);
|
||||
mov(vtmp1, D, 0, src);
|
||||
// Given a vector with the value 0x00 or 0x01 in each byte, the basic idea
|
||||
// is to compress each significant bit of the byte in a cross-lane way. Due
|
||||
// to the lack of a cross-lane bit-compress instruction, we use BEXT
|
||||
// (bit-compress in each lane) with the biggest lane size (T = D) then
|
||||
// concatenate the results.
|
||||
|
||||
// As sve_cmp generates mask value with the minimum unit in byte, we should
|
||||
// transform the value in the first lane which is mask in bit now to the
|
||||
// mask in byte, which can be done by SVE2's BDEP instruction.
|
||||
|
||||
// The first source input of BDEP instruction. Deposite each byte in every 8 bytes.
|
||||
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
|
||||
if (lane_cnt <= 8) {
|
||||
// Nothing. As only one byte exsits.
|
||||
} else if (lane_cnt <= 16) {
|
||||
ins(vtmp1, B, vtmp1, 8, 1);
|
||||
mov(vtmp1, B, 1, zr);
|
||||
} else {
|
||||
sve_vector_extend(vtmp1, D, vtmp1, B);
|
||||
}
|
||||
|
||||
// The second source input of BDEP instruction, initialized with 0x01 for each byte.
|
||||
// The second source input of BEXT, initialized with 0x01 in each byte.
|
||||
// vtmp2 = 0x01010101 0x01010101 0x01010101 0x01010101
|
||||
sve_dup(vtmp2, B, 1);
|
||||
|
||||
// BDEP vtmp1.D, vtmp1.D, vtmp2.D
|
||||
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
|
||||
// BEXT vtmp1.D, src.D, vtmp2.D
|
||||
// src = 0x0001010000010001 | 0x0100000001010001
|
||||
// vtmp2 = 0x0101010101010101 | 0x0101010101010101
|
||||
// ---------------------------------------
|
||||
// vtmp1 = 0x0001010000010001 | 0x0100000001010001
|
||||
sve_bdep(vtmp1, D, vtmp1, vtmp2);
|
||||
// vtmp1 = 0x0000000000000065 | 0x000000000000008D
|
||||
sve_bext(vtmp1, D, src, vtmp2);
|
||||
|
||||
if (bt != T_BYTE) {
|
||||
sve_vector_extend(vtmp1, size, vtmp1, B);
|
||||
// Concatenate the lowest significant 8 bits in each 8 bytes, and extract the
|
||||
// result to dst.
|
||||
// vtmp1 = 0x0000000000000000 | 0x000000000000658D
|
||||
// dst = 0x658D
|
||||
if (lane_cnt <= 8) {
|
||||
// No need to concatenate.
|
||||
umov(dst, vtmp1, B, 0);
|
||||
} else if (lane_cnt <= 16) {
|
||||
ins(vtmp1, B, vtmp1, 1, 8);
|
||||
umov(dst, vtmp1, H, 0);
|
||||
} else {
|
||||
// As the lane count is 64 at most, the final expected value must be in
|
||||
// the lowest 64 bits after narrowing vtmp1 from D to B.
|
||||
sve_vector_narrow(vtmp1, B, vtmp1, D, vtmp2);
|
||||
umov(dst, vtmp1, D, 0);
|
||||
}
|
||||
// Generate mask according to the given vector, in which the elements have been
|
||||
// extended to expected type.
|
||||
// dst = 0b01101001 10001101
|
||||
sve_cmp(Assembler::NE, dst, size, ptrue, vtmp1, 0);
|
||||
}
|
||||
|
||||
// Unpack the mask, a long value in "src", into a vector register of boolean
|
||||
// represented as bytes with 0x00/0x01 as element values in "dst". Each bit in
|
||||
// "src" is unpacked into one byte lane in "dst". Note that "dst" can support at
|
||||
// most 64 lanes.
|
||||
//
|
||||
// Below example gives the expected dst vector register, with a valid src(0x658D)
|
||||
// on a 128-bit vector size machine.
|
||||
// dst = 0x00 01 01 00 00 01 00 01 01 00 00 00 01 01 00 01
|
||||
void C2_MacroAssembler::sve_vmask_fromlong(FloatRegister dst, Register src,
|
||||
FloatRegister vtmp, int lane_cnt) {
|
||||
assert_different_registers(dst, vtmp);
|
||||
assert(UseSVE == 2 && VM_Version::supports_svebitperm() &&
|
||||
lane_cnt <= 64 && is_power_of_2(lane_cnt), "unsupported");
|
||||
|
||||
// Example: src = 0x658D, lane_cnt = 16
|
||||
// Expected: dst = 0x00 01 01 00 00 01 00 01 01 00 00 00 01 01 00 01
|
||||
|
||||
// Put long value from general purpose register into the first lane of vector.
|
||||
// vtmp = 0x0000000000000000 | 0x000000000000658D
|
||||
sve_dup(vtmp, B, 0);
|
||||
mov(vtmp, D, 0, src);
|
||||
|
||||
// Transform the value in the first lane which is mask in bit now to the mask in
|
||||
// byte, which can be done by SVE2's BDEP instruction.
|
||||
|
||||
// The first source input of BDEP instruction. Deposite each byte in every 8 bytes.
|
||||
// vtmp = 0x0000000000000065 | 0x000000000000008D
|
||||
if (lane_cnt <= 8) {
|
||||
// Nothing. As only one byte exsits.
|
||||
} else if (lane_cnt <= 16) {
|
||||
ins(vtmp, B, vtmp, 8, 1);
|
||||
} else {
|
||||
sve_vector_extend(vtmp, D, vtmp, B);
|
||||
}
|
||||
|
||||
// The second source input of BDEP instruction, initialized with 0x01 for each byte.
|
||||
// dst = 0x01010101 0x01010101 0x01010101 0x01010101
|
||||
sve_dup(dst, B, 1);
|
||||
|
||||
// BDEP dst.D, vtmp.D, dst.D
|
||||
// vtmp = 0x0000000000000065 | 0x000000000000008D
|
||||
// dst = 0x0101010101010101 | 0x0101010101010101
|
||||
// ---------------------------------------
|
||||
// dst = 0x0001010000010001 | 0x0100000001010001
|
||||
sve_bdep(dst, D, vtmp, dst);
|
||||
}
|
||||
|
||||
// Clobbers: rflags
|
||||
|
||||
@@ -85,15 +85,19 @@
|
||||
// the higher garbage bits.
|
||||
void bytemask_compress(Register dst);
|
||||
|
||||
// Pack the lowest-numbered bit of each mask element in src into a long value
|
||||
// in dst, at most the first 64 lane elements.
|
||||
void sve_vmask_tolong(Register dst, PRegister src, BasicType bt, int lane_cnt,
|
||||
FloatRegister vtmp1, FloatRegister vtmp2);
|
||||
// Pack the value of each mask element in "src" into a long value in "dst", at most the
|
||||
// first 64 lane elements. The input "src" is a vector of boolean represented as bytes
|
||||
// with 0x00/0x01 as element values. Each lane value from "src" is packed into one bit in
|
||||
// "dst".
|
||||
void sve_vmask_tolong(Register dst, FloatRegister src, FloatRegister vtmp, int lane_cnt);
|
||||
|
||||
// Unpack the mask, a long value in src, into predicate register dst based on the
|
||||
// corresponding data type. Note that dst can support at most 64 lanes.
|
||||
void sve_vmask_fromlong(PRegister dst, Register src, BasicType bt, int lane_cnt,
|
||||
FloatRegister vtmp1, FloatRegister vtmp2);
|
||||
void sve2_vmask_tolong(Register dst, FloatRegister src, FloatRegister vtmp1,
|
||||
FloatRegister vtmp2, int lane_cnt);
|
||||
|
||||
// Unpack the mask, a long value in "src", into vector register "dst" with boolean type.
|
||||
// Each bit in "src" is unpacked into one byte lane in "dst". Note that "dst" can support
|
||||
// at most 64 lanes.
|
||||
void sve_vmask_fromlong(FloatRegister dst, Register src, FloatRegister vtmp, int lane_cnt);
|
||||
|
||||
// SIMD&FP comparison
|
||||
void neon_compare(FloatRegister dst, BasicType bt, FloatRegister src1,
|
||||
|
||||
@@ -879,7 +879,6 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
OrderAccess::fence();
|
||||
ICache::invalidate_word((address)patch_addr);
|
||||
}
|
||||
|
||||
|
||||
@@ -1375,7 +1375,6 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ ldr(r10, Address(rmethod, Method::native_function_offset()));
|
||||
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
|
||||
__ lea(rscratch2, unsatisfied);
|
||||
__ ldr(rscratch2, rscratch2);
|
||||
__ cmp(r10, rscratch2);
|
||||
__ br(Assembler::NE, L);
|
||||
__ call_VM(noreg,
|
||||
|
||||
@@ -378,8 +378,8 @@ void VM_Version::initialize() {
|
||||
if (UseSHA && VM_Version::supports_sha3()) {
|
||||
// Auto-enable UseSHA3Intrinsics on hardware with performance benefit.
|
||||
// Note that the evaluation of UseSHA3Intrinsics shows better performance
|
||||
// on Apple silicon but worse performance on Neoverse V1 and N2.
|
||||
if (_cpu == CPU_APPLE) { // Apple silicon
|
||||
// on Apple and Qualcomm silicon but worse performance on Neoverse V1 and N2.
|
||||
if (_cpu == CPU_APPLE || _cpu == CPU_QUALCOMM) { // Apple or Qualcomm silicon
|
||||
if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseSHA3Intrinsics, true);
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ public:
|
||||
CPU_MOTOROLA = 'M',
|
||||
CPU_NVIDIA = 'N',
|
||||
CPU_AMCC = 'P',
|
||||
CPU_QUALCOM = 'Q',
|
||||
CPU_QUALCOMM = 'Q',
|
||||
CPU_MARVELL = 'V',
|
||||
CPU_INTEL = 'i',
|
||||
CPU_APPLE = 'a',
|
||||
|
||||
@@ -1003,6 +1003,10 @@ bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const RegMask* Matcher::predicate_reg_mask(void) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -2292,6 +2292,10 @@ bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const RegMask* Matcher::predicate_reg_mask(void) {
|
||||
return nullptr;
|
||||
}
|
||||
@@ -6324,36 +6328,8 @@ instruct loadConD_Ex(regD dst, immD src) %{
|
||||
// Prefetch instructions.
|
||||
// Must be safe to execute with invalid address (cannot fault).
|
||||
|
||||
// Special prefetch versions which use the dcbz instruction.
|
||||
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($src$$Register, $mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle == 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
|
||||
size(4);
|
||||
ins_encode %{
|
||||
__ dcbz($mem$$base$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
match(PrefetchAllocation (AddP mem src));
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
|
||||
@@ -6366,7 +6342,6 @@ instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
||||
|
||||
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
|
||||
match(PrefetchAllocation mem);
|
||||
predicate(AllocatePrefetchStyle != 3);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
|
||||
|
||||
@@ -164,6 +164,11 @@ source %{
|
||||
bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
// Prefer predicate if the mask type is "TypeVectMask".
|
||||
return vt->isa_vectmask() != nullptr;
|
||||
}
|
||||
%}
|
||||
|
||||
// All VEC instructions
|
||||
|
||||
@@ -1146,9 +1146,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
Label L;
|
||||
__ ld(x28, Address(xmethod, Method::native_function_offset()));
|
||||
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
|
||||
__ la(t, unsatisfied);
|
||||
__ load_long_misaligned(t1, Address(t, 0), t0, 2); // 2 bytes aligned, but not 4 or 8
|
||||
|
||||
__ la(t1, unsatisfied);
|
||||
__ bne(x28, t1, L);
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
|
||||
@@ -1809,6 +1809,10 @@ bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const RegMask* Matcher::predicate_reg_mask(void) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -3736,6 +3736,11 @@ bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen
|
||||
}
|
||||
}
|
||||
|
||||
bool Matcher::mask_op_prefers_predicate(int opcode, const TypeVect* vt) {
|
||||
// Prefer predicate if the mask type is "TypeVectMask".
|
||||
return vt->isa_vectmask() != nullptr;
|
||||
}
|
||||
|
||||
MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
|
||||
assert(Matcher::is_generic_vector(generic_opnd), "not generic");
|
||||
bool legacy = (generic_opnd->opcode() == LEGVEC);
|
||||
|
||||
@@ -612,7 +612,6 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
|
||||
*
|
||||
* cpu affinity
|
||||
* cgroup cpu quota & cpu period
|
||||
* cgroup cpu shares
|
||||
*
|
||||
* Algorithm:
|
||||
*
|
||||
@@ -623,19 +622,18 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
|
||||
*
|
||||
* All results of division are rounded up to the next whole number.
|
||||
*
|
||||
* If quotas have not been specified, return the
|
||||
* number of active processors in the system.
|
||||
* If quotas have not been specified, sets the result reference to
|
||||
* the number of active processors in the system.
|
||||
*
|
||||
* If quotas have been specified, the resulting number
|
||||
* returned will never exceed the number of active processors.
|
||||
* If quotas have been specified, the number set in the result
|
||||
* reference will never exceed the number of active processors.
|
||||
*
|
||||
* return:
|
||||
* number of CPUs
|
||||
* true if there were no errors. false otherwise.
|
||||
*/
|
||||
int CgroupSubsystem::active_processor_count() {
|
||||
int quota_count = 0;
|
||||
bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
int cpu_count;
|
||||
int result;
|
||||
int result = -1;
|
||||
|
||||
// We use a cache with a timeout to avoid performing expensive
|
||||
// computations in the event this function is called frequently.
|
||||
@@ -643,38 +641,50 @@ int CgroupSubsystem::active_processor_count() {
|
||||
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
|
||||
CachedMetric* cpu_limit = contrl->metrics_cache();
|
||||
if (!cpu_limit->should_check_metric()) {
|
||||
int val = (int)cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", val);
|
||||
return val;
|
||||
value = (int)cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
|
||||
return true;
|
||||
}
|
||||
|
||||
cpu_count = os::Linux::active_processor_count();
|
||||
result = CgroupUtil::processor_count(contrl->controller(), cpu_count);
|
||||
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
|
||||
return false;
|
||||
}
|
||||
assert(result > 0 && result <= cpu_count, "must be");
|
||||
// Update cached metric to avoid re-reading container settings too often
|
||||
cpu_limit->set_value(result, OSCONTAINER_CACHE_TIMEOUT);
|
||||
value = result;
|
||||
|
||||
return result;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* memory_limit_in_bytes
|
||||
*
|
||||
* Return the limit of available memory for this process.
|
||||
* Return the limit of available memory for this process in the provided
|
||||
* physical_memory_size_type reference. If there was no limit value set in the underlying
|
||||
* interface files 'value_unlimited' is returned.
|
||||
*
|
||||
* return:
|
||||
* memory limit in bytes or
|
||||
* -1 for unlimited
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* false if retrieving the value failed
|
||||
* true if retrieving the value was successfull and the value was
|
||||
* set in the 'value' reference.
|
||||
*/
|
||||
jlong CgroupSubsystem::memory_limit_in_bytes(julong upper_bound) {
|
||||
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) {
|
||||
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
|
||||
CachedMetric* memory_limit = contrl->metrics_cache();
|
||||
if (!memory_limit->should_check_metric()) {
|
||||
return memory_limit->value();
|
||||
value = memory_limit->value();
|
||||
return true;
|
||||
}
|
||||
physical_memory_size_type mem_limit = 0;
|
||||
if (!contrl->controller()->read_memory_limit_in_bytes(upper_bound, mem_limit)) {
|
||||
return false;
|
||||
}
|
||||
jlong mem_limit = contrl->controller()->read_memory_limit_in_bytes(upper_bound);
|
||||
// Update cached metric to avoid re-reading container settings too often
|
||||
memory_limit->set_value(mem_limit, OSCONTAINER_CACHE_TIMEOUT);
|
||||
return mem_limit;
|
||||
value = mem_limit;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CgroupController::read_string(const char* filename, char* buf, size_t buf_size) {
|
||||
@@ -719,36 +729,35 @@ bool CgroupController::read_string(const char* filename, char* buf, size_t buf_s
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CgroupController::read_number(const char* filename, julong* result) {
|
||||
bool CgroupController::read_number(const char* filename, uint64_t& result) {
|
||||
char buf[1024];
|
||||
bool is_ok = read_string(filename, buf, 1024);
|
||||
if (!is_ok) {
|
||||
return false;
|
||||
}
|
||||
int matched = sscanf(buf, JULONG_FORMAT, result);
|
||||
int matched = sscanf(buf, UINT64_FORMAT, &result);
|
||||
if (matched == 1) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CgroupController::read_number_handle_max(const char* filename, jlong* result) {
|
||||
bool CgroupController::read_number_handle_max(const char* filename, uint64_t& result) {
|
||||
char buf[1024];
|
||||
bool is_ok = read_string(filename, buf, 1024);
|
||||
if (!is_ok) {
|
||||
return false;
|
||||
}
|
||||
jlong val = limit_from_str(buf);
|
||||
if (val == OSCONTAINER_ERROR) {
|
||||
uint64_t val = 0;
|
||||
if (!limit_from_str(buf, val)) {
|
||||
return false;
|
||||
}
|
||||
*result = val;
|
||||
result = val;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CgroupController::read_numerical_key_value(const char* filename, const char* key, julong* result) {
|
||||
bool CgroupController::read_numerical_key_value(const char* filename, const char* key, uint64_t& result) {
|
||||
assert(key != nullptr, "key must be given");
|
||||
assert(result != nullptr, "result pointer must not be null");
|
||||
assert(filename != nullptr, "file to search in must be given");
|
||||
const char* s_path = subsystem_path();
|
||||
if (s_path == nullptr) {
|
||||
@@ -786,7 +795,7 @@ bool CgroupController::read_numerical_key_value(const char* filename, const char
|
||||
&& after_key != '\n') {
|
||||
// Skip key, skip space
|
||||
const char* value_substr = line + key_len + 1;
|
||||
int matched = sscanf(value_substr, JULONG_FORMAT, result);
|
||||
int matched = sscanf(value_substr, UINT64_FORMAT, &result);
|
||||
found_match = matched == 1;
|
||||
if (found_match) {
|
||||
break;
|
||||
@@ -797,12 +806,12 @@ bool CgroupController::read_numerical_key_value(const char* filename, const char
|
||||
if (found_match) {
|
||||
return true;
|
||||
}
|
||||
log_debug(os, container)("Type %s (key == %s) not found in file %s", JULONG_FORMAT,
|
||||
log_debug(os, container)("Type %s (key == %s) not found in file %s", UINT64_FORMAT,
|
||||
key, absolute_path);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CgroupController::read_numerical_tuple_value(const char* filename, bool use_first, jlong* result) {
|
||||
bool CgroupController::read_numerical_tuple_value(const char* filename, bool use_first, uint64_t& result) {
|
||||
char buf[1024];
|
||||
bool is_ok = read_string(filename, buf, 1024);
|
||||
if (!is_ok) {
|
||||
@@ -813,80 +822,90 @@ bool CgroupController::read_numerical_tuple_value(const char* filename, bool use
|
||||
if (matched != 1) {
|
||||
return false;
|
||||
}
|
||||
jlong val = limit_from_str(token);
|
||||
if (val == OSCONTAINER_ERROR) {
|
||||
uint64_t val = 0;
|
||||
if (!limit_from_str(token, val)) {
|
||||
return false;
|
||||
}
|
||||
*result = val;
|
||||
result = val;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupController::limit_from_str(char* limit_str) {
|
||||
bool CgroupController::limit_from_str(char* limit_str, uint64_t& value) {
|
||||
if (limit_str == nullptr) {
|
||||
return OSCONTAINER_ERROR;
|
||||
return false;
|
||||
}
|
||||
// Unlimited memory in cgroups is the literal string 'max' for
|
||||
// some controllers, for example the pids controller.
|
||||
if (strcmp("max", limit_str) == 0) {
|
||||
return (jlong)-1;
|
||||
value = value_unlimited;
|
||||
return true;
|
||||
}
|
||||
julong limit;
|
||||
if (sscanf(limit_str, JULONG_FORMAT, &limit) != 1) {
|
||||
return OSCONTAINER_ERROR;
|
||||
uint64_t limit;
|
||||
if (sscanf(limit_str, UINT64_FORMAT, &limit) != 1) {
|
||||
return false;
|
||||
}
|
||||
return (jlong)limit;
|
||||
value = limit;
|
||||
return true;
|
||||
}
|
||||
|
||||
// CgroupSubsystem implementations
|
||||
|
||||
jlong CgroupSubsystem::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
|
||||
return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
|
||||
bool CgroupSubsystem::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->memory_and_swap_limit_in_bytes(upper_mem_bound,
|
||||
upper_swap_bound,
|
||||
value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
|
||||
return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound, upper_swap_bound);
|
||||
bool CgroupSubsystem::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->memory_and_swap_usage_in_bytes(upper_mem_bound,
|
||||
upper_swap_bound,
|
||||
value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::memory_soft_limit_in_bytes(julong upper_bound) {
|
||||
return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound);
|
||||
bool CgroupSubsystem::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->memory_soft_limit_in_bytes(upper_bound, value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::memory_throttle_limit_in_bytes() {
|
||||
return memory_controller()->controller()->memory_throttle_limit_in_bytes();
|
||||
bool CgroupSubsystem::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->memory_throttle_limit_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::memory_usage_in_bytes() {
|
||||
return memory_controller()->controller()->memory_usage_in_bytes();
|
||||
bool CgroupSubsystem::memory_usage_in_bytes(physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->memory_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::memory_max_usage_in_bytes() {
|
||||
return memory_controller()->controller()->memory_max_usage_in_bytes();
|
||||
bool CgroupSubsystem::memory_max_usage_in_bytes(physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->memory_max_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::rss_usage_in_bytes() {
|
||||
return memory_controller()->controller()->rss_usage_in_bytes();
|
||||
bool CgroupSubsystem::rss_usage_in_bytes(physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->rss_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::cache_usage_in_bytes() {
|
||||
return memory_controller()->controller()->cache_usage_in_bytes();
|
||||
bool CgroupSubsystem::cache_usage_in_bytes(physical_memory_size_type& value) {
|
||||
return memory_controller()->controller()->cache_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
int CgroupSubsystem::cpu_quota() {
|
||||
return cpu_controller()->controller()->cpu_quota();
|
||||
bool CgroupSubsystem::cpu_quota(int& value) {
|
||||
return cpu_controller()->controller()->cpu_quota(value);
|
||||
}
|
||||
|
||||
int CgroupSubsystem::cpu_period() {
|
||||
return cpu_controller()->controller()->cpu_period();
|
||||
bool CgroupSubsystem::cpu_period(int& value) {
|
||||
return cpu_controller()->controller()->cpu_period(value);
|
||||
}
|
||||
|
||||
int CgroupSubsystem::cpu_shares() {
|
||||
return cpu_controller()->controller()->cpu_shares();
|
||||
bool CgroupSubsystem::cpu_shares(int& value) {
|
||||
return cpu_controller()->controller()->cpu_shares(value);
|
||||
}
|
||||
|
||||
jlong CgroupSubsystem::cpu_usage_in_micros() {
|
||||
return cpuacct_controller()->cpu_usage_in_micros();
|
||||
bool CgroupSubsystem::cpu_usage_in_micros(uint64_t& value) {
|
||||
return cpuacct_controller()->cpu_usage_in_micros(value);
|
||||
}
|
||||
|
||||
void CgroupSubsystem::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
|
||||
void CgroupSubsystem::print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) {
|
||||
memory_controller()->controller()->print_version_specific_info(st, upper_mem_bound);
|
||||
}
|
||||
|
||||
@@ -72,23 +72,29 @@
|
||||
#define CONTAINER_READ_NUMBER_CHECKED(controller, filename, log_string, retval) \
|
||||
{ \
|
||||
bool is_ok; \
|
||||
is_ok = controller->read_number(filename, &retval); \
|
||||
is_ok = controller->read_number(filename, retval); \
|
||||
if (!is_ok) { \
|
||||
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
|
||||
return OSCONTAINER_ERROR; \
|
||||
log_trace(os, container)(log_string " failed"); \
|
||||
return false; \
|
||||
} \
|
||||
log_trace(os, container)(log_string " is: " JULONG_FORMAT, retval); \
|
||||
log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
|
||||
return true; \
|
||||
}
|
||||
|
||||
#define CONTAINER_READ_NUMBER_CHECKED_MAX(controller, filename, log_string, retval) \
|
||||
{ \
|
||||
bool is_ok; \
|
||||
is_ok = controller->read_number_handle_max(filename, &retval); \
|
||||
is_ok = controller->read_number_handle_max(filename, retval); \
|
||||
if (!is_ok) { \
|
||||
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
|
||||
return OSCONTAINER_ERROR; \
|
||||
log_trace(os, container)(log_string " failed"); \
|
||||
return false; \
|
||||
} \
|
||||
log_trace(os, container)(log_string " is: " JLONG_FORMAT, retval); \
|
||||
if (retval == value_unlimited) { \
|
||||
log_trace(os, container)(log_string " is: unlimited"); \
|
||||
} else { \
|
||||
log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
|
||||
} \
|
||||
return true; \
|
||||
}
|
||||
|
||||
#define CONTAINER_READ_STRING_CHECKED(controller, filename, log_string, retval, buf_size) \
|
||||
@@ -96,7 +102,7 @@
|
||||
bool is_ok; \
|
||||
is_ok = controller->read_string(filename, retval, buf_size); \
|
||||
if (!is_ok) { \
|
||||
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
|
||||
log_trace(os, container)(log_string " failed"); \
|
||||
return nullptr; \
|
||||
} \
|
||||
log_trace(os, container)(log_string " is: %s", retval); \
|
||||
@@ -105,12 +111,13 @@
|
||||
#define CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(controller, filename, key, log_string, retval) \
|
||||
{ \
|
||||
bool is_ok; \
|
||||
is_ok = controller->read_numerical_key_value(filename, key, &retval); \
|
||||
is_ok = controller->read_numerical_key_value(filename, key, retval); \
|
||||
if (!is_ok) { \
|
||||
log_trace(os, container)(log_string " failed: %d", OSCONTAINER_ERROR); \
|
||||
return OSCONTAINER_ERROR; \
|
||||
log_trace(os, container)(log_string " failed"); \
|
||||
return false; \
|
||||
} \
|
||||
log_trace(os, container)(log_string " is: " JULONG_FORMAT, retval); \
|
||||
log_trace(os, container)(log_string " is: " UINT64_FORMAT, retval); \
|
||||
return true; \
|
||||
}
|
||||
|
||||
class CgroupController: public CHeapObj<mtInternal> {
|
||||
@@ -124,21 +131,22 @@ class CgroupController: public CHeapObj<mtInternal> {
|
||||
const char* mount_point() { return _mount_point; }
|
||||
virtual bool needs_hierarchy_adjustment() { return false; }
|
||||
|
||||
/* Read a numerical value as unsigned long
|
||||
/* Read a numerical value as uint64_t
|
||||
*
|
||||
* returns: false if any error occurred. true otherwise and
|
||||
* the parsed value is set in the provided julong pointer.
|
||||
* the parsed value is set in the provided result reference.
|
||||
*/
|
||||
bool read_number(const char* filename, julong* result);
|
||||
bool read_number(const char* filename, uint64_t& result);
|
||||
|
||||
/* Convenience method to deal with numbers as well as the string 'max'
|
||||
* in interface files. Otherwise same as read_number().
|
||||
*
|
||||
* returns: false if any error occurred. true otherwise and
|
||||
* the parsed value (which might be negative) is being set in
|
||||
* the provided jlong pointer.
|
||||
* the parsed value will be set in the provided result reference.
|
||||
* When the value was the string 'max' then 'value_unlimited' is
|
||||
* being set as the value.
|
||||
*/
|
||||
bool read_number_handle_max(const char* filename, jlong* result);
|
||||
bool read_number_handle_max(const char* filename, uint64_t& result);
|
||||
|
||||
/* Read a string of at most buf_size - 1 characters from the interface file.
|
||||
* The provided buffer must be at least buf_size in size so as to account
|
||||
@@ -156,37 +164,37 @@ class CgroupController: public CHeapObj<mtInternal> {
|
||||
* parsing interface files like cpu.max which contain such tuples.
|
||||
*
|
||||
* returns: false if any error occurred. true otherwise and the parsed
|
||||
* value of the appropriate tuple entry set in the provided jlong pointer.
|
||||
* value of the appropriate tuple entry set in the provided result reference.
|
||||
*/
|
||||
bool read_numerical_tuple_value(const char* filename, bool use_first, jlong* result);
|
||||
bool read_numerical_tuple_value(const char* filename, bool use_first, uint64_t& result);
|
||||
|
||||
/* Read a numerical value from a multi-line interface file. The matched line is
|
||||
* determined by the provided 'key'. The associated numerical value is being set
|
||||
* via the passed in julong pointer. Example interface file 'memory.stat'
|
||||
* via the passed in result reference. Example interface file 'memory.stat'
|
||||
*
|
||||
* returns: false if any error occurred. true otherwise and the parsed value is
|
||||
* being set in the provided julong pointer.
|
||||
* being set in the provided result reference.
|
||||
*/
|
||||
bool read_numerical_key_value(const char* filename, const char* key, julong* result);
|
||||
bool read_numerical_key_value(const char* filename, const char* key, uint64_t& result);
|
||||
|
||||
private:
|
||||
static jlong limit_from_str(char* limit_str);
|
||||
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
|
||||
};
|
||||
|
||||
class CachedMetric : public CHeapObj<mtInternal>{
|
||||
private:
|
||||
volatile jlong _metric;
|
||||
volatile physical_memory_size_type _metric;
|
||||
volatile jlong _next_check_counter;
|
||||
public:
|
||||
CachedMetric() {
|
||||
_metric = -1;
|
||||
_metric = value_unlimited;
|
||||
_next_check_counter = min_jlong;
|
||||
}
|
||||
bool should_check_metric() {
|
||||
return os::elapsed_counter() > _next_check_counter;
|
||||
}
|
||||
jlong value() { return _metric; }
|
||||
void set_value(jlong value, jlong timeout) {
|
||||
physical_memory_size_type value() { return _metric; }
|
||||
void set_value(physical_memory_size_type value, jlong timeout) {
|
||||
_metric = value;
|
||||
// Metric is unlikely to change, but we want to remain
|
||||
// responsive to configuration changes. A very short grace time
|
||||
@@ -216,9 +224,9 @@ class CachingCgroupController : public CHeapObj<mtInternal> {
|
||||
// Pure virtual class representing version agnostic CPU controllers
|
||||
class CgroupCpuController: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
virtual int cpu_quota() = 0;
|
||||
virtual int cpu_period() = 0;
|
||||
virtual int cpu_shares() = 0;
|
||||
virtual bool cpu_quota(int& value) = 0;
|
||||
virtual bool cpu_period(int& value) = 0;
|
||||
virtual bool cpu_shares(int& value) = 0;
|
||||
virtual bool needs_hierarchy_adjustment() = 0;
|
||||
virtual bool is_read_only() = 0;
|
||||
virtual const char* subsystem_path() = 0;
|
||||
@@ -230,7 +238,7 @@ class CgroupCpuController: public CHeapObj<mtInternal> {
|
||||
// Pure virtual class representing version agnostic CPU accounting controllers
|
||||
class CgroupCpuacctController: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
virtual jlong cpu_usage_in_micros() = 0;
|
||||
virtual bool cpu_usage_in_micros(uint64_t& value) = 0;
|
||||
virtual bool needs_hierarchy_adjustment() = 0;
|
||||
virtual bool is_read_only() = 0;
|
||||
virtual const char* subsystem_path() = 0;
|
||||
@@ -242,16 +250,22 @@ class CgroupCpuacctController: public CHeapObj<mtInternal> {
|
||||
// Pure virtual class representing version agnostic memory controllers
|
||||
class CgroupMemoryController: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
virtual jlong read_memory_limit_in_bytes(julong upper_bound) = 0;
|
||||
virtual jlong memory_usage_in_bytes() = 0;
|
||||
virtual jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
|
||||
virtual jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) = 0;
|
||||
virtual jlong memory_soft_limit_in_bytes(julong upper_bound) = 0;
|
||||
virtual jlong memory_throttle_limit_in_bytes() = 0;
|
||||
virtual jlong memory_max_usage_in_bytes() = 0;
|
||||
virtual jlong rss_usage_in_bytes() = 0;
|
||||
virtual jlong cache_usage_in_bytes() = 0;
|
||||
virtual void print_version_specific_info(outputStream* st, julong upper_mem_bound) = 0;
|
||||
virtual bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) = 0;
|
||||
virtual bool memory_usage_in_bytes(physical_memory_size_type& value) = 0;
|
||||
virtual bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& value) = 0;
|
||||
virtual bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& value) = 0;
|
||||
virtual bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) = 0;
|
||||
virtual bool memory_throttle_limit_in_bytes(physical_memory_size_type& value) = 0;
|
||||
virtual bool memory_max_usage_in_bytes(physical_memory_size_type& value) = 0;
|
||||
virtual bool rss_usage_in_bytes(physical_memory_size_type& value) = 0;
|
||||
virtual bool cache_usage_in_bytes(physical_memory_size_type& value) = 0;
|
||||
virtual void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) = 0;
|
||||
virtual bool needs_hierarchy_adjustment() = 0;
|
||||
virtual bool is_read_only() = 0;
|
||||
virtual const char* subsystem_path() = 0;
|
||||
@@ -262,11 +276,11 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
|
||||
|
||||
class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
jlong memory_limit_in_bytes(julong upper_bound);
|
||||
int active_processor_count();
|
||||
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
|
||||
bool active_processor_count(int& value);
|
||||
|
||||
virtual jlong pids_max() = 0;
|
||||
virtual jlong pids_current() = 0;
|
||||
virtual bool pids_max(uint64_t& value) = 0;
|
||||
virtual bool pids_current(uint64_t& value) = 0;
|
||||
virtual bool is_containerized() = 0;
|
||||
|
||||
virtual char * cpu_cpuset_cpus() = 0;
|
||||
@@ -276,21 +290,26 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
|
||||
virtual CgroupCpuacctController* cpuacct_controller() = 0;
|
||||
|
||||
int cpu_quota();
|
||||
int cpu_period();
|
||||
int cpu_shares();
|
||||
bool cpu_quota(int& value);
|
||||
bool cpu_period(int& value);
|
||||
bool cpu_shares(int& value);
|
||||
|
||||
jlong cpu_usage_in_micros();
|
||||
bool cpu_usage_in_micros(uint64_t& value);
|
||||
|
||||
jlong memory_usage_in_bytes();
|
||||
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
|
||||
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound);
|
||||
jlong memory_soft_limit_in_bytes(julong upper_bound);
|
||||
jlong memory_throttle_limit_in_bytes();
|
||||
jlong memory_max_usage_in_bytes();
|
||||
jlong rss_usage_in_bytes();
|
||||
jlong cache_usage_in_bytes();
|
||||
void print_version_specific_info(outputStream* st, julong upper_mem_bound);
|
||||
bool memory_usage_in_bytes(physical_memory_size_type& value);
|
||||
bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& value);
|
||||
bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& value);
|
||||
bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value);
|
||||
bool memory_throttle_limit_in_bytes(physical_memory_size_type& value);
|
||||
bool memory_max_usage_in_bytes(physical_memory_size_type& value);
|
||||
bool rss_usage_in_bytes(physical_memory_size_type& value);
|
||||
bool cache_usage_in_bytes(physical_memory_size_type& value);
|
||||
void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound);
|
||||
};
|
||||
|
||||
// Utility class for storing info retrieved from /proc/cgroups,
|
||||
|
||||
@@ -25,13 +25,19 @@
|
||||
#include "cgroupUtil_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
|
||||
int CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int host_cpus) {
|
||||
assert(host_cpus > 0, "physical host cpus must be positive");
|
||||
int limit_count = host_cpus;
|
||||
int quota = cpu_ctrl->cpu_quota();
|
||||
int period = cpu_ctrl->cpu_period();
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
|
||||
assert(upper_bound > 0, "upper bound of cpus must be positive");
|
||||
int limit_count = upper_bound;
|
||||
int quota = -1;
|
||||
int period = -1;
|
||||
if (!cpu_ctrl->cpu_quota(quota)) {
|
||||
return false;
|
||||
}
|
||||
if (!cpu_ctrl->cpu_period(period)) {
|
||||
return false;
|
||||
}
|
||||
int quota_count = 0;
|
||||
int result = 0;
|
||||
int result = upper_bound;
|
||||
|
||||
if (quota > -1 && period > 0) {
|
||||
quota_count = ceilf((float)quota / (float)period);
|
||||
@@ -43,16 +49,50 @@ int CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int host_cpus) {
|
||||
limit_count = quota_count;
|
||||
}
|
||||
|
||||
result = MIN2(host_cpus, limit_count);
|
||||
result = MIN2(upper_bound, limit_count);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
|
||||
return result;
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Get an updated memory limit. The return value is strictly less than or equal to the
|
||||
// passed in 'lowest' value.
|
||||
physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryController* mem,
|
||||
physical_memory_size_type lowest,
|
||||
physical_memory_size_type upper_bound) {
|
||||
assert(lowest <= upper_bound, "invariant");
|
||||
physical_memory_size_type current_limit = value_unlimited;
|
||||
if (mem->read_memory_limit_in_bytes(upper_bound, current_limit) && current_limit != value_unlimited) {
|
||||
assert(current_limit <= upper_bound, "invariant");
|
||||
if (lowest > current_limit) {
|
||||
return current_limit;
|
||||
}
|
||||
}
|
||||
return lowest;
|
||||
}
|
||||
|
||||
// Get an updated cpu limit. The return value is strictly less than or equal to the
|
||||
// passed in 'lowest' value.
|
||||
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
int lowest,
|
||||
int upper_bound) {
|
||||
assert(lowest > 0 && lowest <= upper_bound, "invariant");
|
||||
int cpu_limit_val = -1;
|
||||
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
|
||||
assert(cpu_limit_val <= upper_bound, "invariant");
|
||||
if (lowest > cpu_limit_val) {
|
||||
return cpu_limit_val;
|
||||
}
|
||||
}
|
||||
return lowest;
|
||||
}
|
||||
|
||||
void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
|
||||
assert(mem->cgroup_path() != nullptr, "invariant");
|
||||
if (strstr(mem->cgroup_path(), "../") != nullptr) {
|
||||
log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved to '%s', detected limits won't be accurate",
|
||||
mem->mount_point(), mem->cgroup_path());
|
||||
log_warning(os, container)("Cgroup memory controller path at '%s' seems to have moved "
|
||||
"to '%s'. Detected limits won't be accurate",
|
||||
mem->mount_point(), mem->cgroup_path());
|
||||
mem->set_subsystem_path("/");
|
||||
return;
|
||||
}
|
||||
@@ -65,17 +105,18 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
|
||||
char* cg_path = os::strdup(orig);
|
||||
char* last_slash;
|
||||
assert(cg_path[0] == '/', "cgroup path must start with '/'");
|
||||
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
|
||||
physical_memory_size_type phys_mem = os::Linux::physical_memory();
|
||||
char* limit_cg_path = nullptr;
|
||||
jlong limit = mem->read_memory_limit_in_bytes(phys_mem);
|
||||
jlong lowest_limit = limit < 0 ? phys_mem : limit;
|
||||
julong orig_limit = ((julong)lowest_limit) != phys_mem ? lowest_limit : phys_mem;
|
||||
physical_memory_size_type limit = value_unlimited;
|
||||
physical_memory_size_type lowest_limit = phys_mem;
|
||||
lowest_limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
|
||||
physical_memory_size_type orig_limit = lowest_limit != phys_mem ? lowest_limit : phys_mem;
|
||||
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
|
||||
*last_slash = '\0'; // strip path
|
||||
// update to shortened path and try again
|
||||
mem->set_subsystem_path(cg_path);
|
||||
limit = mem->read_memory_limit_in_bytes(phys_mem);
|
||||
if (limit >= 0 && limit < lowest_limit) {
|
||||
limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
|
||||
if (limit < lowest_limit) {
|
||||
lowest_limit = limit;
|
||||
os::free(limit_cg_path); // handles nullptr
|
||||
limit_cg_path = os::strdup(cg_path);
|
||||
@@ -83,24 +124,24 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
|
||||
}
|
||||
// need to check limit at mount point
|
||||
mem->set_subsystem_path("/");
|
||||
limit = mem->read_memory_limit_in_bytes(phys_mem);
|
||||
if (limit >= 0 && limit < lowest_limit) {
|
||||
limit = get_updated_mem_limit(mem, lowest_limit, phys_mem);
|
||||
if (limit < lowest_limit) {
|
||||
lowest_limit = limit;
|
||||
os::free(limit_cg_path); // handles nullptr
|
||||
limit_cg_path = os::strdup("/");
|
||||
}
|
||||
assert(lowest_limit >= 0, "limit must be positive");
|
||||
if ((julong)lowest_limit != orig_limit) {
|
||||
assert(lowest_limit <= phys_mem, "limit must not exceed host memory");
|
||||
if (lowest_limit != orig_limit) {
|
||||
// we've found a lower limit anywhere in the hierarchy,
|
||||
// set the path to the limit path
|
||||
assert(limit_cg_path != nullptr, "limit path must be set");
|
||||
mem->set_subsystem_path(limit_cg_path);
|
||||
log_trace(os, container)("Adjusted controller path for memory to: %s. "
|
||||
"Lowest limit was: " JLONG_FORMAT,
|
||||
"Lowest limit was: " PHYS_MEM_TYPE_FORMAT,
|
||||
mem->subsystem_path(),
|
||||
lowest_limit);
|
||||
} else {
|
||||
log_trace(os, container)("Lowest limit was: " JLONG_FORMAT, lowest_limit);
|
||||
log_trace(os, container)("Lowest limit was: " PHYS_MEM_TYPE_FORMAT, lowest_limit);
|
||||
log_trace(os, container)("No lower limit found for memory in hierarchy %s, "
|
||||
"adjusting to original path %s",
|
||||
mem->mount_point(), orig);
|
||||
@@ -114,8 +155,9 @@ void CgroupUtil::adjust_controller(CgroupMemoryController* mem) {
|
||||
void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
assert(cpu->cgroup_path() != nullptr, "invariant");
|
||||
if (strstr(cpu->cgroup_path(), "../") != nullptr) {
|
||||
log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved to '%s', detected limits won't be accurate",
|
||||
cpu->mount_point(), cpu->cgroup_path());
|
||||
log_warning(os, container)("Cgroup cpu controller path at '%s' seems to have moved "
|
||||
"to '%s'. Detected limits won't be accurate",
|
||||
cpu->mount_point(), cpu->cgroup_path());
|
||||
cpu->set_subsystem_path("/");
|
||||
return;
|
||||
}
|
||||
@@ -129,15 +171,15 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
char* last_slash;
|
||||
assert(cg_path[0] == '/', "cgroup path must start with '/'");
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int cpus = CgroupUtil::processor_count(cpu, host_cpus);
|
||||
int lowest_limit = cpus < host_cpus ? cpus: host_cpus;
|
||||
int lowest_limit = host_cpus;
|
||||
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
|
||||
char* limit_cg_path = nullptr;
|
||||
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
|
||||
*last_slash = '\0'; // strip path
|
||||
// update to shortened path and try again
|
||||
cpu->set_subsystem_path(cg_path);
|
||||
cpus = CgroupUtil::processor_count(cpu, host_cpus);
|
||||
cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
if (cpus != host_cpus && cpus < lowest_limit) {
|
||||
lowest_limit = cpus;
|
||||
os::free(limit_cg_path); // handles nullptr
|
||||
@@ -146,7 +188,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
}
|
||||
// need to check limit at mount point
|
||||
cpu->set_subsystem_path("/");
|
||||
cpus = CgroupUtil::processor_count(cpu, host_cpus);
|
||||
cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
if (cpus != host_cpus && cpus < lowest_limit) {
|
||||
lowest_limit = cpus;
|
||||
os::free(limit_cg_path); // handles nullptr
|
||||
@@ -160,8 +202,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
cpu->set_subsystem_path(limit_cg_path);
|
||||
log_trace(os, container)("Adjusted controller path for cpu to: %s. "
|
||||
"Lowest limit was: %d",
|
||||
cpu->subsystem_path(),
|
||||
lowest_limit);
|
||||
cpu->subsystem_path(), lowest_limit);
|
||||
} else {
|
||||
log_trace(os, container)("Lowest limit was: %d", lowest_limit);
|
||||
log_trace(os, container)("No lower limit found for cpu in hierarchy %s, "
|
||||
|
||||
@@ -31,13 +31,20 @@
|
||||
class CgroupUtil: AllStatic {
|
||||
|
||||
public:
|
||||
static int processor_count(CgroupCpuController* cpu, int host_cpus);
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
|
||||
// Given a memory controller, adjust its path to a point in the hierarchy
|
||||
// that represents the closest memory limit.
|
||||
static void adjust_controller(CgroupMemoryController* m);
|
||||
// Given a cpu controller, adjust its path to a point in the hierarchy
|
||||
// that represents the closest cpu limit.
|
||||
static void adjust_controller(CgroupCpuController* c);
|
||||
private:
|
||||
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
|
||||
physical_memory_size_type lowest,
|
||||
physical_memory_size_type upper_bound);
|
||||
static int get_updated_cpu_limit(CgroupCpuController* c,
|
||||
int lowest,
|
||||
int upper_bound);
|
||||
};
|
||||
|
||||
#endif // CGROUP_UTIL_LINUX_HPP
|
||||
|
||||
@@ -124,10 +124,13 @@ void CgroupV1Controller::set_subsystem_path(const char* cgroup_path) {
|
||||
}
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::uses_mem_hierarchy() {
|
||||
julong use_hierarchy;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.use_hierarchy", "Use Hierarchy", use_hierarchy);
|
||||
return (jlong)use_hierarchy;
|
||||
bool CgroupV1MemoryController::read_use_hierarchy_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.use_hierarchy", "Use Hierarchy", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::uses_mem_hierarchy() {
|
||||
physical_memory_size_type use_hierarchy = 0;
|
||||
return read_use_hierarchy_val(use_hierarchy) && use_hierarchy > 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -141,125 +144,177 @@ bool CgroupV1Controller::needs_hierarchy_adjustment() {
|
||||
return strcmp(_root, _cgroup_path) != 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
void verbose_log(julong read_mem_limit, julong upper_mem_bound) {
|
||||
if (log_is_enabled(Debug, os, container)) {
|
||||
jlong mem_limit = (jlong)read_mem_limit; // account for negative values
|
||||
if (mem_limit < 0 || read_mem_limit >= upper_mem_bound) {
|
||||
const char *reason;
|
||||
if (mem_limit == OSCONTAINER_ERROR) {
|
||||
reason = "failed";
|
||||
} else if (mem_limit == -1) {
|
||||
reason = "unlimited";
|
||||
} else {
|
||||
assert(read_mem_limit >= upper_mem_bound, "Expected read value exceeding upper memory bound");
|
||||
// Exceeding physical memory is treated as unlimited. This implementation
|
||||
// caps it at host_mem since Cg v1 has no value to represent 'max'.
|
||||
reason = "ignored";
|
||||
}
|
||||
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
|
||||
reason, mem_limit, upper_mem_bound);
|
||||
bool CgroupV1MemoryController::read_memory_limit_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::read_hierarchical_memory_limit_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
|
||||
"hierarchical_memory_limit", "Hierarchical Memory Limit",
|
||||
result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type memlimit = 0;
|
||||
if (!read_memory_limit_val(memlimit)) {
|
||||
log_trace(os, container)("container memory limit failed, upper bound is " PHYS_MEM_TYPE_FORMAT, upper_bound);
|
||||
return false;
|
||||
}
|
||||
if (memlimit >= upper_bound) {
|
||||
physical_memory_size_type hierlimit = 0;
|
||||
if (uses_mem_hierarchy() && read_hierarchical_memory_limit_val(hierlimit) &&
|
||||
hierlimit < upper_bound) {
|
||||
log_trace(os, container)("Memory Limit is: " PHYS_MEM_TYPE_FORMAT, hierlimit);
|
||||
result = hierlimit;
|
||||
} else {
|
||||
// Exceeding physical memory is treated as unlimited. This implementation
|
||||
// caps it at host_mem since Cg v1 has no value to represent 'max'.
|
||||
log_trace(os, container)("container memory limit ignored: " PHYS_MEM_TYPE_FORMAT
|
||||
", upper bound is " PHYS_MEM_TYPE_FORMAT, memlimit, upper_bound);
|
||||
result = value_unlimited;
|
||||
}
|
||||
} else {
|
||||
result = memlimit;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
|
||||
julong memlimit;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.limit_in_bytes", "Memory Limit", memlimit);
|
||||
if (memlimit >= upper_bound && uses_mem_hierarchy()) {
|
||||
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
|
||||
"hierarchical_memory_limit", "Hierarchical Memory Limit",
|
||||
memlimit);
|
||||
}
|
||||
verbose_log(memlimit, upper_bound);
|
||||
return (jlong)((memlimit < upper_bound) ? memlimit : -1);
|
||||
bool CgroupV1MemoryController::read_mem_swap(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", result);
|
||||
}
|
||||
|
||||
/* read_mem_swap
|
||||
bool CgroupV1MemoryController::read_hierarchical_mem_swap_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
|
||||
"hierarchical_memsw_limit", "Hierarchical Memory and Swap Limit",
|
||||
result);
|
||||
}
|
||||
|
||||
/* memory_and_swap_limit_in_bytes
|
||||
*
|
||||
* Determine the memory and swap limit metric. Returns a positive limit value strictly
|
||||
* lower than the physical memory and swap limit iff there is a limit. Otherwise a
|
||||
* negative value is returned indicating the determined status.
|
||||
* Determine the memory and swap limit metric. Sets the 'result' reference to a positive limit value or
|
||||
* 'value_unlimited' (for unlimited).
|
||||
*
|
||||
* returns:
|
||||
* * A number > 0 if the limit is available and lower than a physical upper bound.
|
||||
* * OSCONTAINER_ERROR if the limit cannot be retrieved (i.e. not supported) or
|
||||
* * -1 if there isn't any limit in place (note: includes values which exceed a physical
|
||||
* upper bound)
|
||||
* * false if an error occurred. 'result' reference remains unchanged.
|
||||
* * true if the limit value has been set in the 'result' reference
|
||||
*/
|
||||
jlong CgroupV1MemoryController::read_mem_swap(julong upper_memsw_bound) {
|
||||
julong memswlimit;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.memsw.limit_in_bytes", "Memory and Swap Limit", memswlimit);
|
||||
if (memswlimit >= upper_memsw_bound && uses_mem_hierarchy()) {
|
||||
CONTAINER_READ_NUMERICAL_KEY_VALUE_CHECKED(reader(), "/memory.stat",
|
||||
"hierarchical_memsw_limit", "Hierarchical Memory and Swap Limit",
|
||||
memswlimit);
|
||||
bool CgroupV1MemoryController::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type total_mem_swap = upper_mem_bound + upper_swap_bound;
|
||||
physical_memory_size_type memory_swap = 0;
|
||||
bool mem_swap_read_failed = false;
|
||||
if (!read_mem_swap(memory_swap)) {
|
||||
mem_swap_read_failed = true;
|
||||
}
|
||||
if (memory_swap >= total_mem_swap) {
|
||||
physical_memory_size_type hiermswlimit = 0;
|
||||
if (uses_mem_hierarchy() && read_hierarchical_mem_swap_val(hiermswlimit) &&
|
||||
hiermswlimit < total_mem_swap) {
|
||||
log_trace(os, container)("Memory and Swap Limit is: " PHYS_MEM_TYPE_FORMAT, hiermswlimit);
|
||||
memory_swap = hiermswlimit;
|
||||
} else {
|
||||
memory_swap = value_unlimited;
|
||||
}
|
||||
}
|
||||
if (memory_swap == value_unlimited) {
|
||||
log_trace(os, container)("Memory and Swap Limit is: Unlimited");
|
||||
result = value_unlimited;
|
||||
return true;
|
||||
}
|
||||
verbose_log(memswlimit, upper_memsw_bound);
|
||||
return (jlong)((memswlimit < upper_memsw_bound) ? memswlimit : -1);
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
|
||||
jlong memory_swap = read_mem_swap(upper_mem_bound + upper_swap_bound);
|
||||
if (memory_swap == -1) {
|
||||
return memory_swap;
|
||||
}
|
||||
// If there is a swap limit, but swappiness == 0, reset the limit
|
||||
// to the memory limit. Do the same for cases where swap isn't
|
||||
// supported.
|
||||
jlong swappiness = read_mem_swappiness();
|
||||
if (swappiness == 0 || memory_swap == OSCONTAINER_ERROR) {
|
||||
jlong memlimit = read_memory_limit_in_bytes(upper_mem_bound);
|
||||
if (memory_swap == OSCONTAINER_ERROR) {
|
||||
log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swap is not supported", memlimit);
|
||||
} else {
|
||||
log_trace(os, container)("Memory and Swap Limit has been reset to " JLONG_FORMAT " because swappiness is 0", memlimit);
|
||||
}
|
||||
return memlimit;
|
||||
physical_memory_size_type swappiness = 0;
|
||||
if (!read_mem_swappiness(swappiness)) {
|
||||
// assume no swap
|
||||
mem_swap_read_failed = true;
|
||||
}
|
||||
return memory_swap;
|
||||
if (swappiness == 0 || mem_swap_read_failed) {
|
||||
physical_memory_size_type memlimit = value_unlimited;
|
||||
if (!read_memory_limit_in_bytes(upper_mem_bound, memlimit)) {
|
||||
return false;
|
||||
}
|
||||
if (memlimit == value_unlimited) {
|
||||
result = value_unlimited; // No memory limit, thus no swap limit
|
||||
return true;
|
||||
}
|
||||
if (mem_swap_read_failed) {
|
||||
log_trace(os, container)("Memory and Swap Limit has been reset to " PHYS_MEM_TYPE_FORMAT
|
||||
" because swap is not supported", memlimit);
|
||||
} else {
|
||||
log_trace(os, container)("Memory and Swap Limit has been reset to " PHYS_MEM_TYPE_FORMAT
|
||||
" because swappiness is 0", memlimit);
|
||||
}
|
||||
result = memlimit;
|
||||
return true;
|
||||
}
|
||||
result = memory_swap;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline
|
||||
jlong memory_swap_usage_impl(CgroupController* ctrl) {
|
||||
julong memory_swap_usage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.memsw.usage_in_bytes", "mem swap usage", memory_swap_usage);
|
||||
return (jlong)memory_swap_usage;
|
||||
bool memory_swap_usage_impl(CgroupController* ctrl, physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.memsw.usage_in_bytes", "mem swap usage", result);
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
|
||||
jlong memory_sw_limit = memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound);
|
||||
jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
|
||||
if (memory_sw_limit > 0 && memory_limit > 0) {
|
||||
jlong delta_swap = memory_sw_limit - memory_limit;
|
||||
if (delta_swap > 0) {
|
||||
return memory_swap_usage_impl(reader());
|
||||
bool CgroupV1MemoryController::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type memory_sw_limit = value_unlimited;
|
||||
if (!memory_and_swap_limit_in_bytes(upper_mem_bound, upper_swap_bound, memory_sw_limit)) {
|
||||
return false;
|
||||
}
|
||||
physical_memory_size_type mem_limit_val = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (read_memory_limit_in_bytes(upper_mem_bound, mem_limit_val)) {
|
||||
if (mem_limit_val != value_unlimited) {
|
||||
memory_limit = mem_limit_val;
|
||||
}
|
||||
}
|
||||
return memory_usage_in_bytes();
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::read_mem_swappiness() {
|
||||
julong swappiness;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.swappiness", "Swappiness", swappiness);
|
||||
return (jlong)swappiness;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
|
||||
julong memsoftlimit;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", memsoftlimit);
|
||||
if (memsoftlimit >= upper_bound) {
|
||||
log_trace(os, container)("Memory Soft Limit is: Unlimited");
|
||||
return (jlong)-1;
|
||||
} else {
|
||||
return (jlong)memsoftlimit;
|
||||
if (memory_sw_limit != value_unlimited && memory_limit != value_unlimited) {
|
||||
if (memory_limit < memory_sw_limit) {
|
||||
// swap allowed and > 0
|
||||
physical_memory_size_type swap_usage = 0;
|
||||
if (!memory_swap_usage_impl(reader(), swap_usage)) {
|
||||
return false;
|
||||
}
|
||||
result = swap_usage;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return memory_usage_in_bytes(result);
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::memory_throttle_limit_in_bytes() {
|
||||
bool CgroupV1MemoryController::read_mem_swappiness(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.swappiness", "Swappiness", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::memory_soft_limit_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.soft_limit_in_bytes", "Memory Soft Limit", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type mem_soft_limit = 0;
|
||||
if (!memory_soft_limit_val(mem_soft_limit)) {
|
||||
return false;
|
||||
}
|
||||
if (mem_soft_limit >= upper_bound) {
|
||||
log_trace(os, container)("Memory Soft Limit is: Unlimited");
|
||||
result = value_unlimited;
|
||||
} else {
|
||||
result = mem_soft_limit;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::memory_throttle_limit_in_bytes(physical_memory_size_type& result) {
|
||||
// Log this string at trace level so as to make tests happy.
|
||||
log_trace(os, container)("Memory Throttle Limit is not supported.");
|
||||
return OSCONTAINER_ERROR; // not supported
|
||||
return false;
|
||||
}
|
||||
|
||||
// Constructor
|
||||
@@ -288,80 +343,129 @@ bool CgroupV1Subsystem::is_containerized() {
|
||||
_cpuset->is_read_only();
|
||||
}
|
||||
|
||||
/* memory_usage_in_bytes
|
||||
bool CgroupV1MemoryController::memory_usage_in_bytes(physical_memory_size_type& result) {
|
||||
physical_memory_size_type memory_usage = 0;
|
||||
if (!memory_usage_val(memory_usage)) {
|
||||
return false;
|
||||
}
|
||||
result = memory_usage;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* memory_usage_val
|
||||
*
|
||||
* Return the amount of used memory for this process.
|
||||
* Read the amount of used memory for this process into the passed in reference 'result'
|
||||
*
|
||||
* return:
|
||||
* memory usage in bytes or
|
||||
* -1 for unlimited
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true when reading of the file was successful and 'result' was set appropriately
|
||||
* false when reading of the file failed
|
||||
*/
|
||||
jlong CgroupV1MemoryController::memory_usage_in_bytes() {
|
||||
julong memusage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.usage_in_bytes", "Memory Usage", memusage);
|
||||
return (jlong)memusage;
|
||||
bool CgroupV1MemoryController::memory_usage_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.usage_in_bytes", "Memory Usage", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::memory_max_usage_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.max_usage_in_bytes", "Maximum Memory Usage", result);
|
||||
}
|
||||
|
||||
/* memory_max_usage_in_bytes
|
||||
*
|
||||
* Return the maximum amount of used memory for this process.
|
||||
* Return the maximum amount of used memory for this process in the
|
||||
* result reference.
|
||||
*
|
||||
* return:
|
||||
* max memory usage in bytes or
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true if the result reference has been set
|
||||
* false otherwise (e.g. on error)
|
||||
*/
|
||||
jlong CgroupV1MemoryController::memory_max_usage_in_bytes() {
|
||||
julong memmaxusage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.max_usage_in_bytes", "Maximum Memory Usage", memmaxusage);
|
||||
return (jlong)memmaxusage;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::rss_usage_in_bytes() {
|
||||
julong rss;
|
||||
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "rss", &rss);
|
||||
if (!is_ok) {
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV1MemoryController::memory_max_usage_in_bytes(physical_memory_size_type& result) {
|
||||
physical_memory_size_type memory_max_usage = 0;
|
||||
if (!memory_max_usage_val(memory_max_usage)) {
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("RSS usage is: " JULONG_FORMAT, rss);
|
||||
return (jlong)rss;
|
||||
result = memory_max_usage;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::cache_usage_in_bytes() {
|
||||
julong cache;
|
||||
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "cache", &cache);
|
||||
if (!is_ok) {
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV1MemoryController::rss_usage_in_bytes(physical_memory_size_type& result) {
|
||||
physical_memory_size_type rss = 0;
|
||||
|
||||
if (!reader()->read_numerical_key_value("/memory.stat", "rss", rss)) {
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("Cache usage is: " JULONG_FORMAT, cache);
|
||||
return cache;
|
||||
log_trace(os, container)("RSS usage is: " PHYS_MEM_TYPE_FORMAT, rss);
|
||||
result = rss;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::kernel_memory_usage_in_bytes() {
|
||||
julong kmem_usage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.usage_in_bytes", "Kernel Memory Usage", kmem_usage);
|
||||
return (jlong)kmem_usage;
|
||||
bool CgroupV1MemoryController::cache_usage_in_bytes(physical_memory_size_type& result) {
|
||||
physical_memory_size_type cache = 0;
|
||||
if (!reader()->read_numerical_key_value("/memory.stat", "cache", cache)) {
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("Cache usage is: " PHYS_MEM_TYPE_FORMAT, cache);
|
||||
result = cache;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::kernel_memory_limit_in_bytes(julong upper_bound) {
|
||||
julong kmem_limit;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", kmem_limit);
|
||||
bool CgroupV1MemoryController::kernel_memory_usage_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.usage_in_bytes", "Kernel Memory Usage", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::kernel_memory_usage_in_bytes(physical_memory_size_type& result) {
|
||||
physical_memory_size_type kmem_usage = 0;
|
||||
if (!kernel_memory_usage_val(kmem_usage)) {
|
||||
return false;
|
||||
}
|
||||
result = kmem_usage;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::kernel_memory_limit_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.limit_in_bytes", "Kernel Memory Limit", result);
|
||||
}
|
||||
|
||||
bool CgroupV1MemoryController::kernel_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type kmem_limit = 0;
|
||||
if (!kernel_memory_limit_val(kmem_limit)) {
|
||||
return false;
|
||||
}
|
||||
if (kmem_limit >= upper_bound) {
|
||||
return (jlong)-1;
|
||||
kmem_limit = value_unlimited;
|
||||
}
|
||||
return (jlong)kmem_limit;
|
||||
result = kmem_limit;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV1MemoryController::kernel_memory_max_usage_in_bytes() {
|
||||
julong kmem_max_usage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.max_usage_in_bytes", "Maximum Kernel Memory Usage", kmem_max_usage);
|
||||
return (jlong)kmem_max_usage;
|
||||
bool CgroupV1MemoryController::kernel_memory_max_usage_val(physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.kmem.max_usage_in_bytes", "Maximum Kernel Memory Usage", result);
|
||||
}
|
||||
|
||||
void CgroupV1MemoryController::print_version_specific_info(outputStream* st, julong mem_bound) {
|
||||
jlong kmem_usage = kernel_memory_usage_in_bytes();
|
||||
jlong kmem_limit = kernel_memory_limit_in_bytes(mem_bound);
|
||||
jlong kmem_max_usage = kernel_memory_max_usage_in_bytes();
|
||||
bool CgroupV1MemoryController::kernel_memory_max_usage_in_bytes(physical_memory_size_type& result) {
|
||||
physical_memory_size_type kmem_max_usage = 0;
|
||||
if (!kernel_memory_max_usage_val(kmem_max_usage)) {
|
||||
return false;
|
||||
}
|
||||
result = kmem_max_usage;
|
||||
return true;
|
||||
}
|
||||
|
||||
void CgroupV1MemoryController::print_version_specific_info(outputStream* st, physical_memory_size_type mem_bound) {
|
||||
MetricResult kmem_usage;
|
||||
physical_memory_size_type temp = 0;
|
||||
if (kernel_memory_usage_in_bytes(temp)) {
|
||||
kmem_usage.set_value(temp);
|
||||
}
|
||||
MetricResult kmem_limit;
|
||||
temp = value_unlimited;
|
||||
if (kernel_memory_limit_in_bytes(mem_bound, temp)) {
|
||||
kmem_limit.set_value(temp);
|
||||
}
|
||||
MetricResult kmem_max_usage;
|
||||
temp = 0;
|
||||
if (kernel_memory_max_usage_in_bytes(temp)) {
|
||||
kmem_max_usage.set_value(temp);
|
||||
}
|
||||
|
||||
OSContainer::print_container_helper(st, kmem_limit, "kernel_memory_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, kmem_usage, "kernel_memory_usage_in_bytes");
|
||||
@@ -383,74 +487,114 @@ char* CgroupV1Subsystem::cpu_cpuset_memory_nodes() {
|
||||
/* cpu_quota
|
||||
*
|
||||
* Return the number of microseconds per period
|
||||
* process is guaranteed to run.
|
||||
* a process is guaranteed to run in the provided
|
||||
* result reference.
|
||||
*
|
||||
* return:
|
||||
* quota time in microseconds
|
||||
* -1 for no quota
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true if the value was set in the result reference
|
||||
* false on failure to read the number from the file
|
||||
* and the result reference has not been touched.
|
||||
*/
|
||||
int CgroupV1CpuController::cpu_quota() {
|
||||
julong quota;
|
||||
bool is_ok = reader()->read_number("/cpu.cfs_quota_us", "a);
|
||||
if (!is_ok) {
|
||||
log_trace(os, container)("CPU Quota failed: %d", OSCONTAINER_ERROR);
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV1CpuController::cpu_quota(int& result) {
|
||||
uint64_t quota = 0;
|
||||
|
||||
// intentionally not using the macro so as to not log a
|
||||
// negative value as a large unsiged int
|
||||
if (!reader()->read_number("/cpu.cfs_quota_us", quota)) {
|
||||
log_trace(os, container)("CPU Quota failed");
|
||||
return false;
|
||||
}
|
||||
// cast to int since the read value might be negative
|
||||
// and we want to avoid logging -1 as a large unsigned value.
|
||||
int quota_int = (int)quota;
|
||||
int quota_int = static_cast<int>(quota);
|
||||
log_trace(os, container)("CPU Quota is: %d", quota_int);
|
||||
return quota_int;
|
||||
result = quota_int;
|
||||
return true;
|
||||
}
|
||||
|
||||
int CgroupV1CpuController::cpu_period() {
|
||||
julong period;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.cfs_period_us", "CPU Period", period);
|
||||
return (int)period;
|
||||
bool CgroupV1CpuController::cpu_period_val(uint64_t& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.cfs_period_us", "CPU Period", result);
|
||||
}
|
||||
|
||||
bool CgroupV1CpuController::cpu_period(int& result) {
|
||||
uint64_t period = value_unlimited;
|
||||
if (!cpu_period_val(period)) {
|
||||
return false;
|
||||
}
|
||||
result = static_cast<int>(period);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CgroupV1CpuController::cpu_shares_val(uint64_t& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.shares", "CPU Shares", result);
|
||||
}
|
||||
|
||||
/* cpu_shares
|
||||
*
|
||||
* Return the amount of cpu shares available to the process
|
||||
* - Share number (typically a number relative to 1024)
|
||||
* - (2048 typically expresses 2 CPUs worth of processing)
|
||||
*
|
||||
* return:
|
||||
* Share number (typically a number relative to 1024)
|
||||
* (2048 typically expresses 2 CPUs worth of processing)
|
||||
* -1 for no share setup
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* false on error
|
||||
* true if the result has been set in the result reference
|
||||
*/
|
||||
int CgroupV1CpuController::cpu_shares() {
|
||||
julong shares;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.shares", "CPU Shares", shares);
|
||||
int shares_int = (int)shares;
|
||||
// Convert 1024 to no shares setup
|
||||
if (shares_int == 1024) return -1;
|
||||
bool CgroupV1CpuController::cpu_shares(int& result) {
|
||||
uint64_t shares = 0;
|
||||
if (!cpu_shares_val(shares)) {
|
||||
return false;
|
||||
}
|
||||
int shares_int = static_cast<int>(shares);
|
||||
// Convert 1024 to no shares setup (-1)
|
||||
if (shares_int == 1024) {
|
||||
shares_int = -1;
|
||||
}
|
||||
|
||||
return shares_int;
|
||||
result = shares_int;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV1CpuacctController::cpu_usage_in_micros() {
|
||||
julong cpu_usage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpuacct.usage", "CPU Usage", cpu_usage);
|
||||
bool CgroupV1CpuacctController::cpu_usage_in_micros_val(uint64_t& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpuacct.usage", "CPU Usage", result);
|
||||
}
|
||||
|
||||
bool CgroupV1CpuacctController::cpu_usage_in_micros(uint64_t& result) {
|
||||
uint64_t cpu_usage = 0;
|
||||
if (!cpu_usage_in_micros_val(cpu_usage)) {
|
||||
return false;
|
||||
}
|
||||
// Output is in nanoseconds, convert to microseconds.
|
||||
return (jlong)cpu_usage / 1000;
|
||||
result = static_cast<uint64_t>(cpu_usage / 1000);
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
bool pids_max_val(CgroupController* ctrl, uint64_t& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/pids.max", "Maximum number of tasks", result);
|
||||
}
|
||||
|
||||
/* pids_max
|
||||
*
|
||||
* Return the maximum number of tasks available to the process
|
||||
* in the passed result reference (might be value_unlimited).
|
||||
*
|
||||
* return:
|
||||
* maximum number of tasks
|
||||
* -1 for unlimited
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* false on error
|
||||
* true when the result reference has been appropriately set
|
||||
*/
|
||||
jlong CgroupV1Subsystem::pids_max() {
|
||||
if (_pids == nullptr) return OSCONTAINER_ERROR;
|
||||
jlong pids_max;
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(_pids, "/pids.max", "Maximum number of tasks", pids_max);
|
||||
return pids_max;
|
||||
bool CgroupV1Subsystem::pids_max(uint64_t& result) {
|
||||
if (_pids == nullptr) return false;
|
||||
uint64_t pids_val = 0;
|
||||
if (!pids_max_val(_pids, pids_val)) {
|
||||
return false;
|
||||
}
|
||||
result = pids_val;
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
bool pids_current_val(CgroupController* ctrl, uint64_t& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/pids.current", "Current number of tasks", result);
|
||||
}
|
||||
|
||||
/* pids_current
|
||||
@@ -458,12 +602,15 @@ jlong CgroupV1Subsystem::pids_max() {
|
||||
* The number of tasks currently in the cgroup (and its descendants) of the process
|
||||
*
|
||||
* return:
|
||||
* current number of tasks
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true if the current number of tasks has been set in the result reference
|
||||
* false if an error occurred
|
||||
*/
|
||||
jlong CgroupV1Subsystem::pids_current() {
|
||||
if (_pids == nullptr) return OSCONTAINER_ERROR;
|
||||
julong pids_current;
|
||||
CONTAINER_READ_NUMBER_CHECKED(_pids, "/pids.current", "Current number of tasks", pids_current);
|
||||
return (jlong)pids_current;
|
||||
bool CgroupV1Subsystem::pids_current(uint64_t& result) {
|
||||
if (_pids == nullptr) return false;
|
||||
uint64_t pids_current = 0;
|
||||
if (!pids_current_val(_pids, pids_current)) {
|
||||
return false;
|
||||
}
|
||||
result = pids_current;
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -73,23 +73,44 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
|
||||
private:
|
||||
CgroupV1Controller _reader;
|
||||
CgroupV1Controller* reader() { return &_reader; }
|
||||
bool read_memory_limit_val(physical_memory_size_type& result);
|
||||
bool read_hierarchical_memory_limit_val(physical_memory_size_type& result);
|
||||
bool read_hierarchical_mem_swap_val(physical_memory_size_type& result);
|
||||
bool read_use_hierarchy_val(physical_memory_size_type& result);
|
||||
bool memory_usage_val(physical_memory_size_type& result);
|
||||
bool read_mem_swappiness(physical_memory_size_type& result);
|
||||
bool read_mem_swap(physical_memory_size_type& result);
|
||||
bool memory_soft_limit_val(physical_memory_size_type& result);
|
||||
bool memory_max_usage_val(physical_memory_size_type& result);
|
||||
bool kernel_memory_usage_val(physical_memory_size_type& result);
|
||||
bool kernel_memory_limit_val(physical_memory_size_type& result);
|
||||
bool kernel_memory_max_usage_val(physical_memory_size_type& result);
|
||||
bool uses_mem_hierarchy();
|
||||
|
||||
public:
|
||||
void set_subsystem_path(const char *cgroup_path) override {
|
||||
reader()->set_subsystem_path(cgroup_path);
|
||||
}
|
||||
jlong read_memory_limit_in_bytes(julong upper_bound) override;
|
||||
jlong memory_usage_in_bytes() override;
|
||||
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
|
||||
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
|
||||
jlong memory_soft_limit_in_bytes(julong upper_bound) override;
|
||||
jlong memory_throttle_limit_in_bytes() override;
|
||||
jlong memory_max_usage_in_bytes() override;
|
||||
jlong rss_usage_in_bytes() override;
|
||||
jlong cache_usage_in_bytes() override;
|
||||
jlong kernel_memory_usage_in_bytes();
|
||||
jlong kernel_memory_limit_in_bytes(julong upper_bound);
|
||||
jlong kernel_memory_max_usage_in_bytes();
|
||||
void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
|
||||
bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) override;
|
||||
bool memory_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_throttle_limit_in_bytes(physical_memory_size_type& result) override;
|
||||
bool memory_max_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool rss_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool cache_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool kernel_memory_usage_in_bytes(physical_memory_size_type& result);
|
||||
bool kernel_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result);
|
||||
bool kernel_memory_max_usage_in_bytes(physical_memory_size_type& result);
|
||||
void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) override;
|
||||
bool needs_hierarchy_adjustment() override {
|
||||
return reader()->needs_hierarchy_adjustment();
|
||||
}
|
||||
@@ -99,10 +120,6 @@ class CgroupV1MemoryController final : public CgroupMemoryController {
|
||||
const char* subsystem_path() override { return reader()->subsystem_path(); }
|
||||
const char* mount_point() override { return reader()->mount_point(); }
|
||||
const char* cgroup_path() override { return reader()->cgroup_path(); }
|
||||
private:
|
||||
jlong uses_mem_hierarchy();
|
||||
jlong read_mem_swappiness();
|
||||
jlong read_mem_swap(julong upper_memsw_bound);
|
||||
|
||||
public:
|
||||
CgroupV1MemoryController(const CgroupV1Controller& reader)
|
||||
@@ -116,10 +133,12 @@ class CgroupV1CpuController final : public CgroupCpuController {
|
||||
private:
|
||||
CgroupV1Controller _reader;
|
||||
CgroupV1Controller* reader() { return &_reader; }
|
||||
bool cpu_period_val(uint64_t& result);
|
||||
bool cpu_shares_val(uint64_t& result);
|
||||
public:
|
||||
int cpu_quota() override;
|
||||
int cpu_period() override;
|
||||
int cpu_shares() override;
|
||||
bool cpu_quota(int& result) override;
|
||||
bool cpu_period(int& result) override;
|
||||
bool cpu_shares(int& result) override;
|
||||
void set_subsystem_path(const char *cgroup_path) override {
|
||||
reader()->set_subsystem_path(cgroup_path);
|
||||
}
|
||||
@@ -147,8 +166,9 @@ class CgroupV1CpuacctController final : public CgroupCpuacctController {
|
||||
private:
|
||||
CgroupV1Controller _reader;
|
||||
CgroupV1Controller* reader() { return &_reader; }
|
||||
bool cpu_usage_in_micros_val(uint64_t& result);
|
||||
public:
|
||||
jlong cpu_usage_in_micros() override;
|
||||
bool cpu_usage_in_micros(uint64_t& result) override;
|
||||
void set_subsystem_path(const char *cgroup_path) override {
|
||||
reader()->set_subsystem_path(cgroup_path);
|
||||
}
|
||||
@@ -180,15 +200,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
|
||||
CgroupV1Controller* pids,
|
||||
CgroupV1MemoryController* memory);
|
||||
|
||||
jlong kernel_memory_usage_in_bytes();
|
||||
jlong kernel_memory_limit_in_bytes();
|
||||
jlong kernel_memory_max_usage_in_bytes();
|
||||
bool kernel_memory_usage_in_bytes(physical_memory_size_type& result);
|
||||
bool kernel_memory_limit_in_bytes(physical_memory_size_type& result);
|
||||
bool kernel_memory_max_usage_in_bytes(physical_memory_size_type& result);
|
||||
|
||||
char * cpu_cpuset_cpus();
|
||||
char * cpu_cpuset_memory_nodes();
|
||||
char * cpu_cpuset_cpus() override;
|
||||
char * cpu_cpuset_memory_nodes() override;
|
||||
|
||||
jlong pids_max();
|
||||
jlong pids_current();
|
||||
bool pids_max(uint64_t& result) override;
|
||||
bool pids_current(uint64_t& result) override;
|
||||
bool is_containerized();
|
||||
|
||||
const char * container_type() {
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
#include "cgroupUtil_linux.hpp"
|
||||
#include "cgroupV2Subsystem_linux.hpp"
|
||||
|
||||
#include <math.h>
|
||||
|
||||
// Constructor
|
||||
CgroupV2Controller::CgroupV2Controller(char* mount_path,
|
||||
char *cgroup_path,
|
||||
@@ -42,43 +44,72 @@ CgroupV2Controller::CgroupV2Controller(const CgroupV2Controller& o) :
|
||||
_mount_point = o._mount_point;
|
||||
}
|
||||
|
||||
static
|
||||
bool read_cpu_shares_value(CgroupV2Controller* ctrl, uint64_t& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/cpu.weight", "Raw value for CPU Shares", value);
|
||||
}
|
||||
|
||||
/* cpu_shares
|
||||
*
|
||||
* Return the amount of cpu shares available to the process
|
||||
* Return the amount of cpu shares available to the process in the
|
||||
* 'result' reference.
|
||||
*
|
||||
* return:
|
||||
* Share number (typically a number relative to 1024)
|
||||
* (2048 typically expresses 2 CPUs worth of processing)
|
||||
* -1 for no share setup
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
*
|
||||
* return:
|
||||
* true if the result reference got updated
|
||||
* false if there was an error
|
||||
*/
|
||||
int CgroupV2CpuController::cpu_shares() {
|
||||
julong shares;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/cpu.weight", "Raw value for CPU Shares", shares);
|
||||
int shares_int = (int)shares;
|
||||
bool CgroupV2CpuController::cpu_shares(int& result) {
|
||||
uint64_t shares = 0;
|
||||
bool is_ok = read_cpu_shares_value(reader(), shares);
|
||||
if (!is_ok) {
|
||||
return false;
|
||||
}
|
||||
int shares_int = static_cast<int>(shares);
|
||||
// Convert default value of 100 to no shares setup
|
||||
if (shares_int == 100) {
|
||||
log_debug(os, container)("CPU Shares is: %d", -1);
|
||||
return -1;
|
||||
log_debug(os, container)("CPU Shares is: unlimited");
|
||||
result = -1;
|
||||
return true;
|
||||
}
|
||||
// cg v2 values must be in range [1-10000]
|
||||
assert(shares_int >= 1 && shares_int <= 10000, "invariant");
|
||||
|
||||
// CPU shares (OCI) value needs to get translated into
|
||||
// a proper Cgroups v2 value. See:
|
||||
// https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
|
||||
// https://github.com/containers/crun/blob/1.24/crun.1.md#cpu-controller
|
||||
//
|
||||
// Use the inverse of (x == OCI value, y == cgroupsv2 value):
|
||||
// ((262142 * y - 1)/9999) + 2 = x
|
||||
// y = 10^(log2(x)^2/612 + 125/612 * log2(x) - 7.0/34.0)
|
||||
//
|
||||
int x = 262142 * shares_int - 1;
|
||||
double frac = x/9999.0;
|
||||
x = ((int)frac) + 2;
|
||||
// By re-arranging it to the standard quadratic form:
|
||||
// log2(x)^2 + 125 * log2(x) - (126 + 612 * log_10(y)) = 0
|
||||
//
|
||||
// Therefore, log2(x) = (-125 + sqrt( 125^2 - 4 * (-(126 + 612 * log_10(y)))))/2
|
||||
//
|
||||
// As a result we have the inverse (we can discount substraction of the
|
||||
// square root value since those values result in very small numbers and the
|
||||
// cpu shares values - OCI - are in range [2,262144]):
|
||||
//
|
||||
// x = 2^((-125 + sqrt(16129 + 2448* log10(y)))/2)
|
||||
//
|
||||
double log_multiplicand = log10(shares_int);
|
||||
double discriminant = 16129 + 2448 * log_multiplicand;
|
||||
double square_root = sqrt(discriminant);
|
||||
double exponent = (-125 + square_root)/2;
|
||||
double scaled_val = pow(2, exponent);
|
||||
int x = (int) scaled_val;
|
||||
log_trace(os, container)("Scaled CPU shares value is: %d", x);
|
||||
// Since the scaled value is not precise, return the closest
|
||||
// multiple of PER_CPU_SHARES for a more conservative mapping
|
||||
if ( x <= PER_CPU_SHARES ) {
|
||||
// will always map to 1 CPU
|
||||
// Don't do the multiples of PER_CPU_SHARES mapping since we
|
||||
// have a value <= PER_CPU_SHARES
|
||||
log_debug(os, container)("CPU Shares is: %d", x);
|
||||
return x;
|
||||
result = x;
|
||||
return true;
|
||||
}
|
||||
int f = x/PER_CPU_SHARES;
|
||||
int lower_multiple = f * PER_CPU_SHARES;
|
||||
@@ -88,28 +119,33 @@ int CgroupV2CpuController::cpu_shares() {
|
||||
x = distance_lower <= distance_upper ? lower_multiple : upper_multiple;
|
||||
log_trace(os, container)("Closest multiple of %d of the CPU Shares value is: %d", PER_CPU_SHARES, x);
|
||||
log_debug(os, container)("CPU Shares is: %d", x);
|
||||
return x;
|
||||
result = x;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* cpu_quota
|
||||
*
|
||||
* Return the number of microseconds per period
|
||||
* process is guaranteed to run.
|
||||
* process is guaranteed to run in the passed in 'result' reference.
|
||||
*
|
||||
* return:
|
||||
* quota time in microseconds
|
||||
* -1 for no quota
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true if the result reference has been set
|
||||
* false on error
|
||||
*/
|
||||
int CgroupV2CpuController::cpu_quota() {
|
||||
jlong quota_val;
|
||||
bool is_ok = reader()->read_numerical_tuple_value("/cpu.max", true /* use_first */, "a_val);
|
||||
if (!is_ok) {
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV2CpuController::cpu_quota(int& result) {
|
||||
uint64_t quota_val = 0;
|
||||
if (!reader()->read_numerical_tuple_value("/cpu.max", true /* use_first */, quota_val)) {
|
||||
return false;
|
||||
}
|
||||
int limit = -1;
|
||||
// The read first tuple value might be 'max' which maps
|
||||
// to value_unlimited. Keep that at -1;
|
||||
if (quota_val != value_unlimited) {
|
||||
limit = static_cast<int>(quota_val);
|
||||
}
|
||||
int limit = (int)quota_val;
|
||||
log_trace(os, container)("CPU Quota is: %d", limit);
|
||||
return limit;
|
||||
result = limit;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Constructor
|
||||
@@ -143,80 +179,67 @@ char* CgroupV2Subsystem::cpu_cpuset_memory_nodes() {
|
||||
return os::strdup(mems);
|
||||
}
|
||||
|
||||
int CgroupV2CpuController::cpu_period() {
|
||||
jlong period_val;
|
||||
bool is_ok = reader()->read_numerical_tuple_value("/cpu.max", false /* use_first */, &period_val);
|
||||
if (!is_ok) {
|
||||
log_trace(os, container)("CPU Period failed: %d", OSCONTAINER_ERROR);
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV2CpuController::cpu_period(int& result) {
|
||||
uint64_t cpu_period = 0;
|
||||
if (!reader()->read_numerical_tuple_value("/cpu.max", false /* use_first */, cpu_period)) {
|
||||
log_trace(os, container)("CPU Period failed");
|
||||
return false;
|
||||
}
|
||||
int period = (int)period_val;
|
||||
log_trace(os, container)("CPU Period is: %d", period);
|
||||
return period;
|
||||
int period_int = static_cast<int>(cpu_period);
|
||||
log_trace(os, container)("CPU Period is: %d", period_int);
|
||||
result = period_int;
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV2CpuController::cpu_usage_in_micros() {
|
||||
julong cpu_usage;
|
||||
bool is_ok = reader()->read_numerical_key_value("/cpu.stat", "usage_usec", &cpu_usage);
|
||||
bool CgroupV2CpuController::cpu_usage_in_micros(uint64_t& value) {
|
||||
bool is_ok = reader()->read_numerical_key_value("/cpu.stat", "usage_usec", value);
|
||||
if (!is_ok) {
|
||||
log_trace(os, container)("CPU Usage failed: %d", OSCONTAINER_ERROR);
|
||||
return OSCONTAINER_ERROR;
|
||||
log_trace(os, container)("CPU Usage failed");
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("CPU Usage is: " JULONG_FORMAT, cpu_usage);
|
||||
return (jlong)cpu_usage;
|
||||
log_trace(os, container)("CPU Usage is: " UINT64_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* memory_usage_in_bytes
|
||||
*
|
||||
* Return the amount of used memory used by this cgroup and descendents
|
||||
* read the amount of used memory used by this cgroup and descendents
|
||||
* into the passed in 'value' reference.
|
||||
*
|
||||
* return:
|
||||
* memory usage in bytes or
|
||||
* -1 for unlimited
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* false on failure, true otherwise.
|
||||
*/
|
||||
jlong CgroupV2MemoryController::memory_usage_in_bytes() {
|
||||
julong memusage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.current", "Memory Usage", memusage);
|
||||
return (jlong)memusage;
|
||||
bool CgroupV2MemoryController::memory_usage_in_bytes(physical_memory_size_type& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.current", "Memory Usage", value);
|
||||
}
|
||||
|
||||
jlong CgroupV2MemoryController::memory_soft_limit_in_bytes(julong upper_bound) {
|
||||
jlong mem_soft_limit;
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", mem_soft_limit);
|
||||
return mem_soft_limit;
|
||||
bool CgroupV2MemoryController::memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.low", "Memory Soft Limit", value);
|
||||
}
|
||||
|
||||
jlong CgroupV2MemoryController::memory_throttle_limit_in_bytes() {
|
||||
jlong mem_throttle_limit;
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.high", "Memory Throttle Limit", mem_throttle_limit);
|
||||
return mem_throttle_limit;
|
||||
bool CgroupV2MemoryController::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(reader(), "/memory.high", "Memory Throttle Limit", value);
|
||||
}
|
||||
|
||||
jlong CgroupV2MemoryController::memory_max_usage_in_bytes() {
|
||||
julong mem_max_usage;
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.peak", "Maximum Memory Usage", mem_max_usage);
|
||||
return mem_max_usage;
|
||||
bool CgroupV2MemoryController::memory_max_usage_in_bytes(physical_memory_size_type& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(reader(), "/memory.peak", "Maximum Memory Usage", value);
|
||||
}
|
||||
|
||||
jlong CgroupV2MemoryController::rss_usage_in_bytes() {
|
||||
julong rss;
|
||||
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "anon", &rss);
|
||||
if (!is_ok) {
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV2MemoryController::rss_usage_in_bytes(physical_memory_size_type& value) {
|
||||
if (!reader()->read_numerical_key_value("/memory.stat", "anon", value)) {
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("RSS usage is: " JULONG_FORMAT, rss);
|
||||
return (jlong)rss;
|
||||
log_trace(os, container)("RSS usage is: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
jlong CgroupV2MemoryController::cache_usage_in_bytes() {
|
||||
julong cache;
|
||||
bool is_ok = reader()->read_numerical_key_value("/memory.stat", "file", &cache);
|
||||
if (!is_ok) {
|
||||
return OSCONTAINER_ERROR;
|
||||
bool CgroupV2MemoryController::cache_usage_in_bytes(physical_memory_size_type& value) {
|
||||
if (!reader()->read_numerical_key_value("/memory.stat", "file", value)) {
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("Cache usage is: " JULONG_FORMAT, cache);
|
||||
return (jlong)cache;
|
||||
log_trace(os, container)("Cache usage is: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Note that for cgroups v2 the actual limits set for swap and
|
||||
@@ -224,91 +247,108 @@ jlong CgroupV2MemoryController::cache_usage_in_bytes() {
|
||||
// respectively. In order to properly report a cgroup v1 like
|
||||
// compound value we need to sum the two values. Setting a swap limit
|
||||
// without also setting a memory limit is not allowed.
|
||||
jlong CgroupV2MemoryController::memory_and_swap_limit_in_bytes(julong upper_mem_bound,
|
||||
julong upper_swap_bound /* unused in cg v2 */) {
|
||||
jlong swap_limit;
|
||||
bool is_ok = reader()->read_number_handle_max("/memory.swap.max", &swap_limit);
|
||||
if (!is_ok) {
|
||||
bool CgroupV2MemoryController::memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound, /* unused in cg v2 */
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type swap_limit_val = 0;
|
||||
if (!reader()->read_number_handle_max("/memory.swap.max", swap_limit_val)) {
|
||||
// Some container tests rely on this trace logging to happen.
|
||||
log_trace(os, container)("Swap Limit failed: %d", OSCONTAINER_ERROR);
|
||||
log_trace(os, container)("Swap Limit failed");
|
||||
// swap disabled at kernel level, treat it as no swap
|
||||
return read_memory_limit_in_bytes(upper_mem_bound);
|
||||
physical_memory_size_type mem_limit = value_unlimited;
|
||||
if (!read_memory_limit_in_bytes(upper_mem_bound, mem_limit)) {
|
||||
return false;
|
||||
}
|
||||
result = mem_limit;
|
||||
return true;
|
||||
}
|
||||
log_trace(os, container)("Swap Limit is: " JLONG_FORMAT, swap_limit);
|
||||
if (swap_limit >= 0) {
|
||||
jlong memory_limit = read_memory_limit_in_bytes(upper_mem_bound);
|
||||
assert(memory_limit >= 0, "swap limit without memory limit?");
|
||||
return memory_limit + swap_limit;
|
||||
if (swap_limit_val == value_unlimited) {
|
||||
log_trace(os, container)("Memory and Swap Limit is: Unlimited");
|
||||
result = swap_limit_val;
|
||||
return true;
|
||||
}
|
||||
log_trace(os, container)("Swap Limit is: " PHYS_MEM_TYPE_FORMAT, swap_limit_val);
|
||||
physical_memory_size_type memory_limit = 0;
|
||||
if (read_memory_limit_in_bytes(upper_mem_bound, memory_limit)) {
|
||||
assert(memory_limit != value_unlimited, "swap limit without memory limit?");
|
||||
result = memory_limit + swap_limit_val;
|
||||
log_trace(os, container)("Memory and Swap Limit is: " PHYS_MEM_TYPE_FORMAT, result);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
log_trace(os, container)("Memory and Swap Limit is: " JLONG_FORMAT, swap_limit);
|
||||
return swap_limit;
|
||||
}
|
||||
|
||||
// memory.swap.current : total amount of swap currently used by the cgroup and its descendants
|
||||
static
|
||||
jlong memory_swap_current_value(CgroupV2Controller* ctrl) {
|
||||
julong swap_current;
|
||||
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.swap.current", "Swap currently used", swap_current);
|
||||
return (jlong)swap_current;
|
||||
bool memory_swap_current_value(CgroupV2Controller* ctrl, physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(ctrl, "/memory.swap.current", "Swap currently used", result);
|
||||
}
|
||||
|
||||
jlong CgroupV2MemoryController::memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) {
|
||||
jlong memory_usage = memory_usage_in_bytes();
|
||||
if (memory_usage >= 0) {
|
||||
jlong swap_current = memory_swap_current_value(reader());
|
||||
return memory_usage + (swap_current >= 0 ? swap_current : 0);
|
||||
bool CgroupV2MemoryController::memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type memory_usage = 0;
|
||||
if (!memory_usage_in_bytes(memory_usage)) {
|
||||
return false;
|
||||
}
|
||||
return memory_usage; // not supported or unlimited case
|
||||
physical_memory_size_type swap_current = 0;
|
||||
if (!memory_swap_current_value(reader(), swap_current)) {
|
||||
result = memory_usage; // treat as no swap usage
|
||||
return true;
|
||||
}
|
||||
result = memory_usage + swap_current;
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
jlong memory_limit_value(CgroupV2Controller* ctrl) {
|
||||
jlong memory_limit;
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.max", "Memory Limit", memory_limit);
|
||||
return memory_limit;
|
||||
bool memory_limit_value(CgroupV2Controller* ctrl, physical_memory_size_type& result) {
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.max", "Memory Limit", result);
|
||||
}
|
||||
|
||||
/* read_memory_limit_in_bytes
|
||||
*
|
||||
* Return the limit of available memory for this process.
|
||||
* Calculate the limit of available memory for this process. The result will be
|
||||
* set in the 'result' variable if the function returns true.
|
||||
*
|
||||
* return:
|
||||
* memory limit in bytes or
|
||||
* -1 for unlimited, OSCONTAINER_ERROR for an error
|
||||
* true when the limit could be read correctly.
|
||||
* false in case of any error.
|
||||
*/
|
||||
jlong CgroupV2MemoryController::read_memory_limit_in_bytes(julong upper_bound) {
|
||||
jlong limit = memory_limit_value(reader());
|
||||
bool CgroupV2MemoryController::read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) {
|
||||
physical_memory_size_type limit = 0; // default unlimited
|
||||
if (!memory_limit_value(reader(), limit)) {
|
||||
log_trace(os, container)("container memory limit failed, using host value " PHYS_MEM_TYPE_FORMAT,
|
||||
upper_bound);
|
||||
return false;
|
||||
}
|
||||
bool is_unlimited = limit == value_unlimited;
|
||||
bool exceeds_physical_mem = false;
|
||||
if (!is_unlimited && limit >= upper_bound) {
|
||||
exceeds_physical_mem = true;
|
||||
}
|
||||
if (log_is_enabled(Trace, os, container)) {
|
||||
if (limit == -1) {
|
||||
log_trace(os, container)("Memory Limit is: Unlimited");
|
||||
} else {
|
||||
log_trace(os, container)("Memory Limit is: " JLONG_FORMAT, limit);
|
||||
if (!is_unlimited) {
|
||||
log_trace(os, container)("Memory Limit is: " PHYS_MEM_TYPE_FORMAT, limit);
|
||||
}
|
||||
}
|
||||
if (log_is_enabled(Debug, os, container)) {
|
||||
julong read_limit = (julong)limit; // avoid signed/unsigned compare
|
||||
if (limit < 0 || read_limit >= upper_bound) {
|
||||
const char* reason;
|
||||
if (limit == -1) {
|
||||
reason = "unlimited";
|
||||
} else if (limit == OSCONTAINER_ERROR) {
|
||||
reason = "failed";
|
||||
if (is_unlimited || exceeds_physical_mem) {
|
||||
if (is_unlimited) {
|
||||
log_trace(os, container)("Memory Limit is: Unlimited");
|
||||
log_trace(os, container)("container memory limit unlimited, using upper bound value " PHYS_MEM_TYPE_FORMAT, upper_bound);
|
||||
} else {
|
||||
assert(read_limit >= upper_bound, "Expected mem limit to exceed upper memory bound");
|
||||
reason = "ignored";
|
||||
log_trace(os, container)("container memory limit ignored: " PHYS_MEM_TYPE_FORMAT ", upper bound is " PHYS_MEM_TYPE_FORMAT,
|
||||
limit, upper_bound);
|
||||
}
|
||||
log_debug(os, container)("container memory limit %s: " JLONG_FORMAT ", upper bound is " JLONG_FORMAT,
|
||||
reason, limit, upper_bound);
|
||||
}
|
||||
}
|
||||
return limit;
|
||||
result = limit;
|
||||
return true;
|
||||
}
|
||||
|
||||
static
|
||||
jlong memory_swap_limit_value(CgroupV2Controller* ctrl) {
|
||||
jlong swap_limit;
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.swap.max", "Swap Limit", swap_limit);
|
||||
return swap_limit;
|
||||
bool memory_swap_limit_value(CgroupV2Controller* ctrl, physical_memory_size_type& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(ctrl, "/memory.swap.max", "Swap Limit", value);
|
||||
}
|
||||
|
||||
void CgroupV2Controller::set_subsystem_path(const char* cgroup_path) {
|
||||
@@ -327,10 +367,17 @@ bool CgroupV2Controller::needs_hierarchy_adjustment() {
|
||||
return strcmp(_cgroup_path, "/") != 0;
|
||||
}
|
||||
|
||||
void CgroupV2MemoryController::print_version_specific_info(outputStream* st, julong upper_mem_bound) {
|
||||
jlong swap_current = memory_swap_current_value(reader());
|
||||
jlong swap_limit = memory_swap_limit_value(reader());
|
||||
|
||||
void CgroupV2MemoryController::print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) {
|
||||
MetricResult swap_current;
|
||||
physical_memory_size_type swap_current_val = 0;
|
||||
if (memory_swap_current_value(reader(), swap_current_val)) {
|
||||
swap_current.set_value(swap_current_val);
|
||||
}
|
||||
MetricResult swap_limit;
|
||||
physical_memory_size_type swap_limit_val = 0;
|
||||
if (memory_swap_limit_value(reader(), swap_limit_val)) {
|
||||
swap_limit.set_value(swap_limit_val);
|
||||
}
|
||||
OSContainer::print_container_helper(st, swap_current, "memory_swap_current_in_bytes");
|
||||
OSContainer::print_container_helper(st, swap_limit, "memory_swap_max_limit_in_bytes");
|
||||
}
|
||||
@@ -346,29 +393,27 @@ char* CgroupV2Controller::construct_path(char* mount_path, const char* cgroup_pa
|
||||
|
||||
/* pids_max
|
||||
*
|
||||
* Return the maximum number of tasks available to the process
|
||||
* Calculate the maximum number of tasks available to the process. Set the
|
||||
* value in the passed in 'value' reference. The value might be 'value_unlimited' when
|
||||
* there is no limit.
|
||||
*
|
||||
* return:
|
||||
* maximum number of tasks
|
||||
* -1 for unlimited
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true if the value has been set appropriately
|
||||
* false if there was an error
|
||||
*/
|
||||
jlong CgroupV2Subsystem::pids_max() {
|
||||
jlong pids_max;
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(unified(), "/pids.max", "Maximum number of tasks", pids_max);
|
||||
return pids_max;
|
||||
bool CgroupV2Subsystem::pids_max(uint64_t& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED_MAX(unified(), "/pids.max", "Maximum number of tasks", value);
|
||||
}
|
||||
|
||||
/* pids_current
|
||||
*
|
||||
* The number of tasks currently in the cgroup (and its descendants) of the process
|
||||
* The number of tasks currently in the cgroup (and its descendants) of the process. Set
|
||||
* in the passed in 'value' reference.
|
||||
*
|
||||
* return:
|
||||
* current number of tasks
|
||||
* OSCONTAINER_ERROR for not supported
|
||||
* true on success
|
||||
* false when there was an error
|
||||
*/
|
||||
jlong CgroupV2Subsystem::pids_current() {
|
||||
julong pids_current;
|
||||
CONTAINER_READ_NUMBER_CHECKED(unified(), "/pids.current", "Current number of tasks", pids_current);
|
||||
return pids_current;
|
||||
bool CgroupV2Subsystem::pids_current(uint64_t& value) {
|
||||
CONTAINER_READ_NUMBER_CHECKED(unified(), "/pids.current", "Current number of tasks", value);
|
||||
}
|
||||
|
||||
@@ -59,10 +59,10 @@ class CgroupV2CpuController: public CgroupCpuController {
|
||||
public:
|
||||
CgroupV2CpuController(const CgroupV2Controller& reader) : _reader(reader) {
|
||||
}
|
||||
int cpu_quota() override;
|
||||
int cpu_period() override;
|
||||
int cpu_shares() override;
|
||||
jlong cpu_usage_in_micros();
|
||||
bool cpu_quota(int& value) override;
|
||||
bool cpu_period(int& value) override;
|
||||
bool cpu_shares(int& value) override;
|
||||
bool cpu_usage_in_micros(uint64_t& value);
|
||||
bool is_read_only() override {
|
||||
return reader()->is_read_only();
|
||||
}
|
||||
@@ -87,8 +87,8 @@ class CgroupV2CpuacctController: public CgroupCpuacctController {
|
||||
CgroupV2CpuacctController(CgroupV2CpuController* reader) : _reader(reader) {
|
||||
}
|
||||
// In cgroup v2, cpu usage is a part of the cpu controller.
|
||||
jlong cpu_usage_in_micros() override {
|
||||
return reader()->cpu_usage_in_micros();
|
||||
bool cpu_usage_in_micros(uint64_t& result) override {
|
||||
return reader()->cpu_usage_in_micros(result);
|
||||
}
|
||||
bool is_read_only() override {
|
||||
return reader()->is_read_only();
|
||||
@@ -110,20 +110,27 @@ class CgroupV2MemoryController final: public CgroupMemoryController {
|
||||
private:
|
||||
CgroupV2Controller _reader;
|
||||
CgroupV2Controller* reader() { return &_reader; }
|
||||
|
||||
public:
|
||||
CgroupV2MemoryController(const CgroupV2Controller& reader) : _reader(reader) {
|
||||
}
|
||||
|
||||
jlong read_memory_limit_in_bytes(julong upper_bound) override;
|
||||
jlong memory_and_swap_limit_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
|
||||
jlong memory_and_swap_usage_in_bytes(julong upper_mem_bound, julong upper_swap_bound) override;
|
||||
jlong memory_soft_limit_in_bytes(julong upper_bound) override;
|
||||
jlong memory_throttle_limit_in_bytes() override;
|
||||
jlong memory_usage_in_bytes() override;
|
||||
jlong memory_max_usage_in_bytes() override;
|
||||
jlong rss_usage_in_bytes() override;
|
||||
jlong cache_usage_in_bytes() override;
|
||||
void print_version_specific_info(outputStream* st, julong upper_mem_bound) override;
|
||||
bool read_memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_and_swap_limit_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_and_swap_usage_in_bytes(physical_memory_size_type upper_mem_bound,
|
||||
physical_memory_size_type upper_swap_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_soft_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& result) override;
|
||||
bool memory_throttle_limit_in_bytes(physical_memory_size_type& result) override;
|
||||
bool memory_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool memory_max_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool rss_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
bool cache_usage_in_bytes(physical_memory_size_type& result) override;
|
||||
void print_version_specific_info(outputStream* st, physical_memory_size_type upper_mem_bound) override;
|
||||
bool is_read_only() override {
|
||||
return reader()->is_read_only();
|
||||
}
|
||||
@@ -160,8 +167,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
|
||||
char * cpu_cpuset_cpus() override;
|
||||
char * cpu_cpuset_memory_nodes() override;
|
||||
jlong pids_max() override;
|
||||
jlong pids_current() override;
|
||||
bool pids_max(uint64_t& result) override;
|
||||
bool pids_current(uint64_t& result) override;
|
||||
|
||||
bool is_containerized() override;
|
||||
|
||||
|
||||
@@ -84,8 +84,12 @@ void OSContainer::init() {
|
||||
// We can be in one of two cases:
|
||||
// 1.) On a physical Linux system without any limit
|
||||
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
|
||||
any_mem_cpu_limit_present = memory_limit_in_bytes() > 0 ||
|
||||
os::Linux::active_processor_count() != active_processor_count();
|
||||
physical_memory_size_type mem_limit_val = value_unlimited;
|
||||
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int cpus = host_cpus;
|
||||
(void)active_processor_count(cpus); // discard error and use default
|
||||
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
|
||||
if (any_mem_cpu_limit_present) {
|
||||
reason = " because either a cpu or a memory limit is present";
|
||||
} else {
|
||||
@@ -103,77 +107,138 @@ const char * OSContainer::container_type() {
|
||||
return cgroup_subsystem->container_type();
|
||||
}
|
||||
|
||||
bool OSContainer::available_memory_in_container(julong& value) {
|
||||
jlong mem_limit = memory_limit_in_bytes();
|
||||
jlong mem_usage = memory_usage_in_bytes();
|
||||
bool OSContainer::memory_limit_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
physical_memory_size_type phys_mem = os::Linux::physical_memory();
|
||||
return cgroup_subsystem->memory_limit_in_bytes(phys_mem, value);
|
||||
}
|
||||
|
||||
if (mem_limit > 0 && mem_usage <= 0) {
|
||||
log_debug(os, container)("container memory usage failed: " JLONG_FORMAT, mem_usage);
|
||||
bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_limit = value_unlimited;
|
||||
physical_memory_size_type mem_usage = 0;
|
||||
if (memory_limit_in_bytes(mem_limit) && memory_usage_in_bytes(mem_usage)) {
|
||||
assert(mem_usage != value_unlimited, "invariant");
|
||||
if (mem_limit != value_unlimited) {
|
||||
value = (mem_limit > mem_usage) ? mem_limit - mem_usage : 0;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
log_trace(os, container)("calculating available memory in container failed");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mem_limit <= 0 || mem_usage <= 0) {
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_limit = 0;
|
||||
physical_memory_size_type mem_swap_limit = 0;
|
||||
if (memory_limit_in_bytes(mem_limit) &&
|
||||
memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
mem_limit != value_unlimited &&
|
||||
mem_swap_limit != value_unlimited) {
|
||||
if (mem_limit >= mem_swap_limit) {
|
||||
value = 0; // no swap, thus no free swap
|
||||
return true;
|
||||
}
|
||||
physical_memory_size_type swap_limit = mem_swap_limit - mem_limit;
|
||||
physical_memory_size_type mem_swap_usage = 0;
|
||||
physical_memory_size_type mem_usage = 0;
|
||||
if (memory_and_swap_usage_in_bytes(mem_swap_usage) &&
|
||||
memory_usage_in_bytes(mem_usage)) {
|
||||
physical_memory_size_type swap_usage = value_unlimited;
|
||||
if (mem_usage > mem_swap_usage) {
|
||||
swap_usage = 0; // delta usage must not be negative
|
||||
} else {
|
||||
swap_usage = mem_swap_usage - mem_usage;
|
||||
}
|
||||
// free swap is based on swap limit (upper bound) and swap usage
|
||||
if (swap_usage >= swap_limit) {
|
||||
value = 0; // free swap must not be negative
|
||||
return true;
|
||||
}
|
||||
value = swap_limit - swap_usage;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
// unlimited or not supported. Leave an appropriate trace message
|
||||
if (log_is_enabled(Trace, os, container)) {
|
||||
char mem_swap_buf[25]; // uint64_t => 20 + 1, 'unlimited' => 9 + 1; 10 < 21 < 25
|
||||
char mem_limit_buf[25];
|
||||
int num = 0;
|
||||
if (mem_swap_limit == value_unlimited) {
|
||||
num = os::snprintf(mem_swap_buf, sizeof(mem_swap_buf), "%s", "unlimited");
|
||||
} else {
|
||||
num = os::snprintf(mem_swap_buf, sizeof(mem_swap_buf), PHYS_MEM_TYPE_FORMAT, mem_swap_limit);
|
||||
}
|
||||
assert(num < 25, "buffer too small");
|
||||
mem_swap_buf[num] = '\0';
|
||||
if (mem_limit == value_unlimited) {
|
||||
num = os::snprintf(mem_limit_buf, sizeof(mem_limit_buf), "%s", "unlimited");
|
||||
} else {
|
||||
num = os::snprintf(mem_limit_buf, sizeof(mem_limit_buf), PHYS_MEM_TYPE_FORMAT, mem_limit);
|
||||
}
|
||||
assert(num < 25, "buffer too small");
|
||||
mem_limit_buf[num] = '\0';
|
||||
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
|
||||
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
|
||||
mem_swap_buf, mem_limit_buf, host_free_swap);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OSContainer::memory_and_swap_limit_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
physical_memory_size_type phys_mem = os::Linux::physical_memory();
|
||||
physical_memory_size_type host_swap = 0;
|
||||
if (!os::Linux::host_swap(host_swap)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
value = mem_limit > mem_usage ? static_cast<julong>(mem_limit - mem_usage) : 0;
|
||||
|
||||
return true;
|
||||
return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap, value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_limit_in_bytes() {
|
||||
bool OSContainer::memory_and_swap_usage_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
|
||||
return cgroup_subsystem->memory_limit_in_bytes(phys_mem);
|
||||
physical_memory_size_type phys_mem = os::Linux::physical_memory();
|
||||
physical_memory_size_type host_swap = 0;
|
||||
if (!os::Linux::host_swap(host_swap)) {
|
||||
return false;
|
||||
}
|
||||
return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap, value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_and_swap_limit_in_bytes() {
|
||||
bool OSContainer::memory_soft_limit_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
|
||||
julong host_swap = os::Linux::host_swap();
|
||||
return cgroup_subsystem->memory_and_swap_limit_in_bytes(phys_mem, host_swap);
|
||||
physical_memory_size_type phys_mem = os::Linux::physical_memory();
|
||||
return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem, value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_and_swap_usage_in_bytes() {
|
||||
bool OSContainer::memory_throttle_limit_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
|
||||
julong host_swap = os::Linux::host_swap();
|
||||
return cgroup_subsystem->memory_and_swap_usage_in_bytes(phys_mem, host_swap);
|
||||
return cgroup_subsystem->memory_throttle_limit_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_soft_limit_in_bytes() {
|
||||
bool OSContainer::memory_usage_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
|
||||
return cgroup_subsystem->memory_soft_limit_in_bytes(phys_mem);
|
||||
return cgroup_subsystem->memory_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_throttle_limit_in_bytes() {
|
||||
bool OSContainer::memory_max_usage_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->memory_throttle_limit_in_bytes();
|
||||
return cgroup_subsystem->memory_max_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_usage_in_bytes() {
|
||||
bool OSContainer::rss_usage_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->memory_usage_in_bytes();
|
||||
return cgroup_subsystem->rss_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::memory_max_usage_in_bytes() {
|
||||
bool OSContainer::cache_usage_in_bytes(physical_memory_size_type& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->memory_max_usage_in_bytes();
|
||||
}
|
||||
|
||||
jlong OSContainer::rss_usage_in_bytes() {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->rss_usage_in_bytes();
|
||||
}
|
||||
|
||||
jlong OSContainer::cache_usage_in_bytes() {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->cache_usage_in_bytes();
|
||||
return cgroup_subsystem->cache_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
void OSContainer::print_version_specific_info(outputStream* st) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
julong phys_mem = static_cast<julong>(os::Linux::physical_memory());
|
||||
physical_memory_size_type phys_mem = os::Linux::physical_memory();
|
||||
cgroup_subsystem->print_version_specific_info(st, phys_mem);
|
||||
}
|
||||
|
||||
@@ -187,50 +252,55 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
|
||||
return cgroup_subsystem->cpu_cpuset_memory_nodes();
|
||||
}
|
||||
|
||||
int OSContainer::active_processor_count() {
|
||||
bool OSContainer::active_processor_count(int& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->active_processor_count();
|
||||
return cgroup_subsystem->active_processor_count(value);
|
||||
}
|
||||
|
||||
int OSContainer::cpu_quota() {
|
||||
bool OSContainer::cpu_quota(int& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->cpu_quota();
|
||||
return cgroup_subsystem->cpu_quota(value);
|
||||
}
|
||||
|
||||
int OSContainer::cpu_period() {
|
||||
bool OSContainer::cpu_period(int& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->cpu_period();
|
||||
return cgroup_subsystem->cpu_period(value);
|
||||
}
|
||||
|
||||
int OSContainer::cpu_shares() {
|
||||
bool OSContainer::cpu_shares(int& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->cpu_shares();
|
||||
return cgroup_subsystem->cpu_shares(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::cpu_usage_in_micros() {
|
||||
bool OSContainer::cpu_usage_in_micros(uint64_t& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->cpu_usage_in_micros();
|
||||
return cgroup_subsystem->cpu_usage_in_micros(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::pids_max() {
|
||||
bool OSContainer::pids_max(uint64_t& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->pids_max();
|
||||
return cgroup_subsystem->pids_max(value);
|
||||
}
|
||||
|
||||
jlong OSContainer::pids_current() {
|
||||
bool OSContainer::pids_current(uint64_t& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->pids_current();
|
||||
return cgroup_subsystem->pids_current(value);
|
||||
}
|
||||
|
||||
void OSContainer::print_container_helper(outputStream* st, jlong j, const char* metrics) {
|
||||
void OSContainer::print_container_helper(outputStream* st, MetricResult& res, const char* metrics) {
|
||||
st->print("%s: ", metrics);
|
||||
if (j >= 0) {
|
||||
if (j >= 1024) {
|
||||
st->print_cr(UINT64_FORMAT " k", uint64_t(j) / K);
|
||||
if (res.success()) {
|
||||
if (res.value() != value_unlimited) {
|
||||
if (res.value() >= 1024) {
|
||||
st->print_cr(PHYS_MEM_TYPE_FORMAT " k", (physical_memory_size_type)(res.value() / K));
|
||||
} else {
|
||||
st->print_cr(PHYS_MEM_TYPE_FORMAT, res.value());
|
||||
}
|
||||
} else {
|
||||
st->print_cr(UINT64_FORMAT, uint64_t(j));
|
||||
st->print_cr("%s", "unlimited");
|
||||
}
|
||||
} else {
|
||||
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
|
||||
// Not supported
|
||||
st->print_cr("%s", "unavailable");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,11 +30,30 @@
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
#define OSCONTAINER_ERROR (-2)
|
||||
// Some cgroup interface files define the value 'max' for unlimited.
|
||||
// Define this constant value to indicate this value.
|
||||
const uint64_t value_unlimited = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
// 20ms timeout between re-reads of memory limit and _active_processor_count.
|
||||
#define OSCONTAINER_CACHE_TIMEOUT (NANOSECS_PER_SEC/50)
|
||||
|
||||
// Carrier object for print_container_helper()
|
||||
class MetricResult: public StackObj {
|
||||
private:
|
||||
static const uint64_t value_unused = 0;
|
||||
bool _success = false;
|
||||
physical_memory_size_type _value = value_unused;
|
||||
public:
|
||||
void set_value(physical_memory_size_type val) {
|
||||
// having a value means success
|
||||
_success = true;
|
||||
_value = val;
|
||||
}
|
||||
|
||||
bool success() { return _success; }
|
||||
physical_memory_size_type value() { return _value; }
|
||||
};
|
||||
|
||||
class OSContainer: AllStatic {
|
||||
|
||||
private:
|
||||
@@ -45,36 +64,38 @@ class OSContainer: AllStatic {
|
||||
public:
|
||||
static void init();
|
||||
static void print_version_specific_info(outputStream* st);
|
||||
static void print_container_helper(outputStream* st, jlong j, const char* metrics);
|
||||
static void print_container_helper(outputStream* st, MetricResult& res, const char* metrics);
|
||||
|
||||
static inline bool is_containerized();
|
||||
static const char * container_type();
|
||||
|
||||
static bool available_memory_in_container(julong& value);
|
||||
static jlong memory_limit_in_bytes();
|
||||
static jlong memory_and_swap_limit_in_bytes();
|
||||
static jlong memory_and_swap_usage_in_bytes();
|
||||
static jlong memory_soft_limit_in_bytes();
|
||||
static jlong memory_throttle_limit_in_bytes();
|
||||
static jlong memory_usage_in_bytes();
|
||||
static jlong memory_max_usage_in_bytes();
|
||||
static jlong rss_usage_in_bytes();
|
||||
static jlong cache_usage_in_bytes();
|
||||
static bool available_memory_in_bytes(physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value);
|
||||
static bool memory_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_soft_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_throttle_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_max_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool rss_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool cache_usage_in_bytes(physical_memory_size_type& value);
|
||||
|
||||
static int active_processor_count();
|
||||
static bool active_processor_count(int& value);
|
||||
|
||||
static char * cpu_cpuset_cpus();
|
||||
static char * cpu_cpuset_memory_nodes();
|
||||
|
||||
static int cpu_quota();
|
||||
static int cpu_period();
|
||||
static bool cpu_quota(int& value);
|
||||
static bool cpu_period(int& value);
|
||||
|
||||
static int cpu_shares();
|
||||
static bool cpu_shares(int& value);
|
||||
|
||||
static jlong cpu_usage_in_micros();
|
||||
static bool cpu_usage_in_micros(uint64_t& value);
|
||||
|
||||
static jlong pids_max();
|
||||
static jlong pids_current();
|
||||
static bool pids_max(uint64_t& value);
|
||||
static bool pids_current(uint64_t& value);
|
||||
};
|
||||
|
||||
inline bool OSContainer::is_containerized() {
|
||||
|
||||
@@ -214,10 +214,8 @@ static bool suppress_primordial_thread_resolution = false;
|
||||
// utility functions
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
julong avail_mem = 0;
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(avail_mem)) {
|
||||
log_trace(os)("available container memory: " JULONG_FORMAT, avail_mem);
|
||||
value = static_cast<physical_memory_size_type>(avail_mem);
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -225,36 +223,38 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
julong avail_mem = static_cast<julong>(-1L);
|
||||
physical_memory_size_type avail_mem = 0;
|
||||
|
||||
bool found_available_mem = false;
|
||||
FILE *fp = os::fopen("/proc/meminfo", "r");
|
||||
if (fp != nullptr) {
|
||||
char buf[80];
|
||||
do {
|
||||
if (fscanf(fp, "MemAvailable: " JULONG_FORMAT " kB", &avail_mem) == 1) {
|
||||
if (fscanf(fp, "MemAvailable: " PHYS_MEM_TYPE_FORMAT " kB", &avail_mem) == 1) {
|
||||
avail_mem *= K;
|
||||
found_available_mem = true;
|
||||
break;
|
||||
}
|
||||
} while (fgets(buf, sizeof(buf), fp) != nullptr);
|
||||
fclose(fp);
|
||||
}
|
||||
if (avail_mem == static_cast<julong>(-1L)) {
|
||||
// Only enter the free memory block if we
|
||||
// haven't found the available memory
|
||||
if (!found_available_mem) {
|
||||
physical_memory_size_type free_mem = 0;
|
||||
if (!free_memory(free_mem)) {
|
||||
return false;
|
||||
}
|
||||
avail_mem = static_cast<julong>(free_mem);
|
||||
avail_mem = free_mem;
|
||||
}
|
||||
log_trace(os)("available memory: " JULONG_FORMAT, avail_mem);
|
||||
value = static_cast<physical_memory_size_type>(avail_mem);
|
||||
log_trace(os)("available memory: " PHYS_MEM_TYPE_FORMAT, avail_mem);
|
||||
value = avail_mem;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
julong free_mem = 0;
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_container(free_mem)) {
|
||||
log_trace(os)("free container memory: " JULONG_FORMAT, free_mem);
|
||||
value = static_cast<physical_memory_size_type>(free_mem);
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -269,29 +269,26 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
|
||||
if (ret != 0) {
|
||||
return false;
|
||||
}
|
||||
julong free_mem = (julong)si.freeram * si.mem_unit;
|
||||
log_trace(os)("free memory: " JULONG_FORMAT, free_mem);
|
||||
value = static_cast<physical_memory_size_type>(free_mem);
|
||||
physical_memory_size_type free_mem = (physical_memory_size_type)si.freeram * si.mem_unit;
|
||||
log_trace(os)("free memory: " PHYS_MEM_TYPE_FORMAT, free_mem);
|
||||
value = free_mem;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized()) {
|
||||
jlong memory_and_swap_limit_in_bytes = OSContainer::memory_and_swap_limit_in_bytes();
|
||||
jlong memory_limit_in_bytes = OSContainer::memory_limit_in_bytes();
|
||||
if (memory_limit_in_bytes > 0 && memory_and_swap_limit_in_bytes > 0) {
|
||||
value = static_cast<physical_memory_size_type>(memory_and_swap_limit_in_bytes - memory_limit_in_bytes);
|
||||
return true;
|
||||
physical_memory_size_type mem_swap_limit = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
OSContainer::memory_limit_in_bytes(memory_limit)) {
|
||||
if (memory_limit != value_unlimited && mem_swap_limit != value_unlimited &&
|
||||
mem_swap_limit >= memory_limit /* ensure swap is >= 0 */) {
|
||||
value = mem_swap_limit - memory_limit;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} // fallback to the host swap space if the container did return the unbound value of -1
|
||||
struct sysinfo si;
|
||||
int ret = sysinfo(&si);
|
||||
if (ret != 0) {
|
||||
assert(false, "sysinfo failed in total_swap_space(): %s", os::strerror(errno));
|
||||
return false;
|
||||
}
|
||||
value = static_cast<physical_memory_size_type>(si.totalswap) * si.mem_unit;
|
||||
return true;
|
||||
} // fallback to the host swap space if the container returned unlimited
|
||||
return Linux::host_swap(value);
|
||||
}
|
||||
|
||||
static bool host_free_swap_f(physical_memory_size_type& value) {
|
||||
@@ -315,29 +312,12 @@ bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
|
||||
if (OSContainer::is_containerized()) {
|
||||
jlong mem_swap_limit = OSContainer::memory_and_swap_limit_in_bytes();
|
||||
jlong mem_limit = OSContainer::memory_limit_in_bytes();
|
||||
if (mem_swap_limit >= 0 && mem_limit >= 0) {
|
||||
jlong delta_limit = mem_swap_limit - mem_limit;
|
||||
if (delta_limit <= 0) {
|
||||
value = 0;
|
||||
return true;
|
||||
}
|
||||
jlong mem_swap_usage = OSContainer::memory_and_swap_usage_in_bytes();
|
||||
jlong mem_usage = OSContainer::memory_usage_in_bytes();
|
||||
if (mem_swap_usage > 0 && mem_usage > 0) {
|
||||
jlong delta_usage = mem_swap_usage - mem_usage;
|
||||
if (delta_usage >= 0) {
|
||||
jlong free_swap = delta_limit - delta_usage;
|
||||
value = free_swap >= 0 ? static_cast<physical_memory_size_type>(free_swap) : 0;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
|
||||
return true;
|
||||
}
|
||||
// unlimited or not supported. Fall through to return host value
|
||||
log_trace(os,container)("os::free_swap_space: container_swap_limit=" JLONG_FORMAT
|
||||
" container_mem_limit=" JLONG_FORMAT " returning host value: " PHYS_MEM_TYPE_FORMAT,
|
||||
mem_swap_limit, mem_limit, host_free_swap_val);
|
||||
// Fall through to use host value
|
||||
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
|
||||
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
|
||||
}
|
||||
value = host_free_swap_val;
|
||||
return true;
|
||||
@@ -345,10 +325,10 @@ bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
|
||||
physical_memory_size_type os::physical_memory() {
|
||||
if (OSContainer::is_containerized()) {
|
||||
jlong mem_limit;
|
||||
if ((mem_limit = OSContainer::memory_limit_in_bytes()) > 0) {
|
||||
log_trace(os)("total container memory: " JLONG_FORMAT, mem_limit);
|
||||
return static_cast<physical_memory_size_type>(mem_limit);
|
||||
physical_memory_size_type mem_limit = value_unlimited;
|
||||
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
|
||||
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
|
||||
return mem_limit;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -508,10 +488,15 @@ pid_t os::Linux::gettid() {
|
||||
|
||||
// Returns the amount of swap currently configured, in bytes.
|
||||
// This can change at any time.
|
||||
julong os::Linux::host_swap() {
|
||||
bool os::Linux::host_swap(physical_memory_size_type& value) {
|
||||
struct sysinfo si;
|
||||
sysinfo(&si);
|
||||
return (julong)(si.totalswap * si.mem_unit);
|
||||
int ret = sysinfo(&si);
|
||||
if (ret != 0) {
|
||||
assert(false, "sysinfo failed in host_swap(): %s", os::strerror(errno));
|
||||
return false;
|
||||
}
|
||||
value = static_cast<physical_memory_size_type>(si.totalswap) * si.mem_unit;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Most versions of linux have a bug where the number of processors are
|
||||
@@ -2469,9 +2454,11 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
st->print_cr("cpu_memory_nodes: %s", p != nullptr ? p : "not supported");
|
||||
free(p);
|
||||
|
||||
int i = OSContainer::active_processor_count();
|
||||
int i = -1;
|
||||
bool supported = OSContainer::active_processor_count(i);
|
||||
st->print("active_processor_count: ");
|
||||
if (i > 0) {
|
||||
if (supported) {
|
||||
assert(i > 0, "must be");
|
||||
if (ActiveProcessorCount > 0) {
|
||||
st->print_cr("%d, but overridden by -XX:ActiveProcessorCount %d", i, ActiveProcessorCount);
|
||||
} else {
|
||||
@@ -2481,65 +2468,105 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
st->print_cr("not supported");
|
||||
}
|
||||
|
||||
i = OSContainer::cpu_quota();
|
||||
|
||||
supported = OSContainer::cpu_quota(i);
|
||||
st->print("cpu_quota: ");
|
||||
if (i > 0) {
|
||||
if (supported && i > 0) {
|
||||
st->print_cr("%d", i);
|
||||
} else {
|
||||
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no quota");
|
||||
st->print_cr("%s", !supported ? "not supported" : "no quota");
|
||||
}
|
||||
|
||||
i = OSContainer::cpu_period();
|
||||
supported = OSContainer::cpu_period(i);
|
||||
st->print("cpu_period: ");
|
||||
if (i > 0) {
|
||||
if (supported && i > 0) {
|
||||
st->print_cr("%d", i);
|
||||
} else {
|
||||
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no period");
|
||||
st->print_cr("%s", !supported ? "not supported" : "no period");
|
||||
}
|
||||
|
||||
i = OSContainer::cpu_shares();
|
||||
supported = OSContainer::cpu_shares(i);
|
||||
st->print("cpu_shares: ");
|
||||
if (i > 0) {
|
||||
if (supported && i > 0) {
|
||||
st->print_cr("%d", i);
|
||||
} else {
|
||||
st->print_cr("%s", i == OSCONTAINER_ERROR ? "not supported" : "no shares");
|
||||
st->print_cr("%s", !supported ? "not supported" : "no shares");
|
||||
}
|
||||
|
||||
jlong j = OSContainer::cpu_usage_in_micros();
|
||||
uint64_t j = 0;
|
||||
supported = OSContainer::cpu_usage_in_micros(j);
|
||||
st->print("cpu_usage_in_micros: ");
|
||||
if (j >= 0) {
|
||||
st->print_cr(JLONG_FORMAT, j);
|
||||
if (supported && j > 0) {
|
||||
st->print_cr(UINT64_FORMAT, j);
|
||||
} else {
|
||||
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "no usage");
|
||||
st->print_cr("%s", !supported ? "not supported" : "no usage");
|
||||
}
|
||||
|
||||
OSContainer::print_container_helper(st, OSContainer::memory_limit_in_bytes(), "memory_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::memory_and_swap_limit_in_bytes(), "memory_and_swap_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::memory_soft_limit_in_bytes(), "memory_soft_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::memory_throttle_limit_in_bytes(), "memory_throttle_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::memory_usage_in_bytes(), "memory_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::memory_max_usage_in_bytes(), "memory_max_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::rss_usage_in_bytes(), "rss_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, OSContainer::cache_usage_in_bytes(), "cache_usage_in_bytes");
|
||||
MetricResult memory_limit;
|
||||
physical_memory_size_type val = value_unlimited;
|
||||
if (OSContainer::memory_limit_in_bytes(val)) {
|
||||
memory_limit.set_value(val);
|
||||
}
|
||||
MetricResult mem_swap_limit;
|
||||
val = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(val)) {
|
||||
mem_swap_limit.set_value(val);
|
||||
}
|
||||
MetricResult mem_soft_limit;
|
||||
val = value_unlimited;
|
||||
if (OSContainer::memory_soft_limit_in_bytes(val)) {
|
||||
mem_soft_limit.set_value(val);
|
||||
}
|
||||
MetricResult mem_throttle_limit;
|
||||
val = value_unlimited;
|
||||
if (OSContainer::memory_throttle_limit_in_bytes(val)) {
|
||||
mem_throttle_limit.set_value(val);
|
||||
}
|
||||
MetricResult mem_usage;
|
||||
val = 0;
|
||||
if (OSContainer::memory_usage_in_bytes(val)) {
|
||||
mem_usage.set_value(val);
|
||||
}
|
||||
MetricResult mem_max_usage;
|
||||
val = 0;
|
||||
if (OSContainer::memory_max_usage_in_bytes(val)) {
|
||||
mem_max_usage.set_value(val);
|
||||
}
|
||||
MetricResult rss_usage;
|
||||
val = 0;
|
||||
if (OSContainer::rss_usage_in_bytes(val)) {
|
||||
rss_usage.set_value(val);
|
||||
}
|
||||
MetricResult cache_usage;
|
||||
val = 0;
|
||||
if (OSContainer::cache_usage_in_bytes(val)) {
|
||||
cache_usage.set_value(val);
|
||||
}
|
||||
OSContainer::print_container_helper(st, memory_limit, "memory_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_swap_limit, "memory_and_swap_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_soft_limit, "memory_soft_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_throttle_limit, "memory_throttle_limit_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_usage, "memory_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, mem_max_usage, "memory_max_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, rss_usage, "rss_usage_in_bytes");
|
||||
OSContainer::print_container_helper(st, cache_usage, "cache_usage_in_bytes");
|
||||
|
||||
OSContainer::print_version_specific_info(st);
|
||||
|
||||
j = OSContainer::pids_max();
|
||||
supported = OSContainer::pids_max(j);
|
||||
st->print("maximum number of tasks: ");
|
||||
if (j > 0) {
|
||||
st->print_cr(JLONG_FORMAT, j);
|
||||
if (supported && j != value_unlimited) {
|
||||
st->print_cr(UINT64_FORMAT, j);
|
||||
} else {
|
||||
st->print_cr("%s", j == OSCONTAINER_ERROR ? "not supported" : "unlimited");
|
||||
st->print_cr("%s", !supported ? "not supported" : "unlimited");
|
||||
}
|
||||
|
||||
j = OSContainer::pids_current();
|
||||
supported = OSContainer::pids_current(j);
|
||||
st->print("current number of tasks: ");
|
||||
if (j > 0) {
|
||||
st->print_cr(JLONG_FORMAT, j);
|
||||
if (supported && j > 0) {
|
||||
st->print_cr(UINT64_FORMAT, j);
|
||||
} else {
|
||||
if (j == OSCONTAINER_ERROR) {
|
||||
st->print_cr("not supported");
|
||||
}
|
||||
st->print_cr("%s", !supported ? "not supported" : "no current tasks");
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -4643,7 +4670,7 @@ int os::Linux::active_processor_count() {
|
||||
//
|
||||
// 1. User option -XX:ActiveProcessorCount
|
||||
// 2. kernel os calls (sched_getaffinity or sysconf(_SC_NPROCESSORS_ONLN)
|
||||
// 3. extracted from cgroup cpu subsystem (shares and quotas)
|
||||
// 3. extracted from cgroup cpu subsystem (quotas)
|
||||
//
|
||||
// Option 1, if specified, will always override.
|
||||
// If the cgroup subsystem is active and configured, we
|
||||
@@ -4660,9 +4687,8 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
int active_cpus;
|
||||
if (OSContainer::is_containerized()) {
|
||||
active_cpus = OSContainer::active_processor_count();
|
||||
int active_cpus = -1;
|
||||
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
|
||||
log_trace(os)("active_processor_count: determined by OSContainer: %d",
|
||||
active_cpus);
|
||||
} else {
|
||||
|
||||
@@ -45,8 +45,6 @@ class os::Linux {
|
||||
static GrowableArray<int>* _cpu_to_node;
|
||||
static GrowableArray<int>* _nindex_to_node;
|
||||
|
||||
static julong available_memory_in_container();
|
||||
|
||||
protected:
|
||||
|
||||
static physical_memory_size_type _physical_memory;
|
||||
@@ -117,7 +115,7 @@ class os::Linux {
|
||||
static uintptr_t initial_thread_stack_size(void) { return _initial_thread_stack_size; }
|
||||
|
||||
static physical_memory_size_type physical_memory() { return _physical_memory; }
|
||||
static julong host_swap();
|
||||
static bool host_swap(physical_memory_size_type& value);
|
||||
|
||||
static intptr_t* ucontext_get_sp(const ucontext_t* uc);
|
||||
static intptr_t* ucontext_get_fp(const ucontext_t* uc);
|
||||
|
||||
@@ -84,6 +84,8 @@ void VM_Version::get_os_cpu_info() {
|
||||
_cpu = CPU_AMCC;
|
||||
} else if (buf && strstr(buf, "Cavium Inc.") != nullptr) {
|
||||
_cpu = CPU_CAVIUM;
|
||||
} else if (buf && strstr(buf, "Qualcomm Technologies Inc") != nullptr) {
|
||||
_cpu = CPU_QUALCOMM;
|
||||
} else {
|
||||
log_info(os)("VM_Version: unknown CPU model");
|
||||
}
|
||||
|
||||
@@ -39,7 +39,7 @@ class TypeArrayKlass;
|
||||
// It also decides what Klasses must be cached in aot-initialized state.
|
||||
//
|
||||
// ArchiveBuilder uses [1] as roots to scan for all MetaspaceObjs that need to be cached.
|
||||
// ArchiveHeapWriter uses [2] to create an image of the archived heap.
|
||||
// HeapShared uses [2] to create an image of the archived heap.
|
||||
//
|
||||
// [1] is stored in _all_cached_classes in aotArtifactFinder.cpp.
|
||||
// [2] is stored in HeapShared::archived_object_cache().
|
||||
|
||||
@@ -796,7 +796,7 @@ void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_i
|
||||
address buffer_start = address(r.start()); // start of the current oop inside the buffer
|
||||
address buffer_end = address(r.end());
|
||||
|
||||
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
|
||||
address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
|
||||
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
|
||||
|
||||
log_region_range("heap", buffer_start, buffer_end, requested_start);
|
||||
|
||||
@@ -55,7 +55,7 @@
|
||||
|
||||
GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
|
||||
|
||||
// The following are offsets from buffer_bottom()
|
||||
bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
|
||||
size_t AOTMappedHeapWriter::_buffer_used;
|
||||
|
||||
// Heap root segments
|
||||
@@ -74,7 +74,7 @@ AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
|
||||
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
|
||||
|
||||
typedef HashTable<
|
||||
size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
|
||||
size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
|
||||
size_t, // size of this filler (in bytes)
|
||||
127, // prime number
|
||||
AnyObj::C_HEAP,
|
||||
@@ -96,6 +96,45 @@ void AOTMappedHeapWriter::init() {
|
||||
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
|
||||
|
||||
guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
|
||||
|
||||
if (CDSConfig::old_cds_flags_used()) {
|
||||
// With the old CDS workflow, we can guatantee determninistic output: given
|
||||
// the same classlist file, we can generate the same static CDS archive.
|
||||
// To ensure determinism, we always use the same compressed oop encoding
|
||||
// (zero-based, no shift). See set_requested_address_range().
|
||||
_is_writing_deterministic_heap = true;
|
||||
} else {
|
||||
// Determninistic output is not supported by the new AOT workflow, so
|
||||
// we don't force the (zero-based, no shift) encoding. This way, it is more
|
||||
// likely that we can avoid oop relocation in the production run.
|
||||
_is_writing_deterministic_heap = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
|
||||
// in AOTMappedHeapWriter::set_requested_address_range(),
|
||||
CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
|
||||
if (is_writing_deterministic_heap()) {
|
||||
return CompressedOops::UnscaledNarrowOop;
|
||||
} else {
|
||||
return CompressedOops::mode();
|
||||
}
|
||||
}
|
||||
|
||||
address AOTMappedHeapWriter::narrow_oop_base() {
|
||||
if (is_writing_deterministic_heap()) {
|
||||
return (address)0;
|
||||
} else {
|
||||
return CompressedOops::base();
|
||||
}
|
||||
}
|
||||
|
||||
int AOTMappedHeapWriter::narrow_oop_shift() {
|
||||
if (is_writing_deterministic_heap()) {
|
||||
return 0;
|
||||
} else {
|
||||
return CompressedOops::shift();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -116,7 +155,7 @@ void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
|
||||
assert(CDSConfig::is_dumping_heap(), "sanity");
|
||||
allocate_buffer();
|
||||
copy_source_objs_to_buffer(roots);
|
||||
set_requested_address(heap_info);
|
||||
set_requested_address_range(heap_info);
|
||||
relocate_embedded_oops(roots, heap_info);
|
||||
}
|
||||
|
||||
@@ -536,14 +575,55 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
|
||||
return buffered_obj_offset;
|
||||
}
|
||||
|
||||
void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
|
||||
// Set the range [_requested_bottom, _requested_top), the requested address range of all
|
||||
// the archived heap objects in the production run.
|
||||
//
|
||||
// (1) UseCompressedOops == true && !is_writing_deterministic_heap()
|
||||
//
|
||||
// The archived objects are stored using the COOPS encoding of the assembly phase.
|
||||
// We pick a range within the heap used by the assembly phase.
|
||||
//
|
||||
// In the production run, if different COOPS encodings are used:
|
||||
// - The heap contents needs to be relocated.
|
||||
//
|
||||
// (2) UseCompressedOops == true && is_writing_deterministic_heap()
|
||||
//
|
||||
// We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
|
||||
//
|
||||
// (3) UseCompressedOops == false:
|
||||
//
|
||||
// In the production run, the heap range is usually picked (randomly) by the OS, so we
|
||||
// will almost always need to perform relocation, regardless of how we pick the requested
|
||||
// address range.
|
||||
//
|
||||
// So we just hard code it to NOCOOPS_REQUESTED_BASE.
|
||||
//
|
||||
void AOTMappedHeapWriter::set_requested_address_range(ArchiveMappedHeapInfo* info) {
|
||||
assert(!info->is_used(), "only set once");
|
||||
|
||||
size_t heap_region_byte_size = _buffer_used;
|
||||
assert(heap_region_byte_size > 0, "must archived at least one object!");
|
||||
|
||||
if (UseCompressedOops) {
|
||||
if (UseG1GC) {
|
||||
if (is_writing_deterministic_heap()) {
|
||||
// Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
|
||||
// We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
|
||||
// heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
|
||||
// requested location to avoid relocation.
|
||||
//
|
||||
// For other collectors or larger heaps, relocation is unavoidable, but is usually
|
||||
// quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
|
||||
address heap_end = (address)0x100000000;
|
||||
size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
|
||||
if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
|
||||
log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
|
||||
AOTMetaspace::unrecoverable_writing_error();
|
||||
}
|
||||
_requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
|
||||
} else if (UseG1GC) {
|
||||
// For G1, pick the range at the top of the current heap. If the exact same heap sizes
|
||||
// are used in the production run, it's likely that we can map the archived objects
|
||||
// at the requested location to avoid relocation.
|
||||
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
|
||||
log_info(aot, heap)("Heap end = %p", heap_end);
|
||||
_requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
|
||||
@@ -612,7 +692,14 @@ oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
|
||||
|
||||
template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
|
||||
oop request_referent = source_obj_to_requested_obj(source_referent);
|
||||
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
|
||||
if (UseCompressedOops && is_writing_deterministic_heap()) {
|
||||
// We use zero-based, 0-shift encoding, so the narrowOop is just the lower
|
||||
// 32 bits of request_referent
|
||||
intptr_t addr = cast_from_oop<intptr_t>(request_referent);
|
||||
*((narrowOop*)field_addr_in_buffer) = checked_cast<narrowOop>(addr);
|
||||
} else {
|
||||
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
|
||||
}
|
||||
if (request_referent != nullptr) {
|
||||
mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
|
||||
}
|
||||
@@ -918,9 +1005,9 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
|
||||
address buffer_start = address(r.start());
|
||||
address buffer_end = address(r.end());
|
||||
|
||||
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
|
||||
address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
|
||||
int requested_shift = CompressedOops::shift();
|
||||
address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
|
||||
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
|
||||
int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
|
||||
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
|
||||
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
|
||||
if (UseCompressedOops) {
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "cds/heapShared.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
@@ -71,7 +72,7 @@ class AOTMappedHeapWriter : AllStatic {
|
||||
// These are entered into HeapShared::archived_object_cache().
|
||||
//
|
||||
// - "buffered objects" are copies of the "source objects", and are stored in into
|
||||
// ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
|
||||
// AOTMappedHeapWriter::_buffer, which is a GrowableArray that sits outside of
|
||||
// the valid heap range. Therefore we avoid using the addresses of these copies
|
||||
// as oops. They are usually called "buffered_addr" in the code (of the type "address").
|
||||
//
|
||||
@@ -81,26 +82,11 @@ class AOTMappedHeapWriter : AllStatic {
|
||||
// - Each archived object has a "requested address" -- at run time, if the object
|
||||
// can be mapped at this address, we can avoid relocation.
|
||||
//
|
||||
// The requested address is implemented differently depending on UseCompressedOops:
|
||||
// The requested address of an archived object is essentially its buffered_addr + delta,
|
||||
// where delta is (_requested_bottom - buffer_bottom());
|
||||
//
|
||||
// UseCompressedOops == true:
|
||||
// The archived objects are stored assuming that the runtime COOPS compression
|
||||
// scheme is exactly the same as in dump time (or else a more expensive runtime relocation
|
||||
// would be needed.)
|
||||
//
|
||||
// At dump time, we assume that the runtime heap range is exactly the same as
|
||||
// in dump time. The requested addresses of the archived objects are chosen such that
|
||||
// they would occupy the top end of a G1 heap (TBD when dumping is supported by other
|
||||
// collectors. See JDK-8298614).
|
||||
//
|
||||
// UseCompressedOops == false:
|
||||
// At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
|
||||
// need to perform relocation. Hence, the goal of the "requested address" is to ensure that
|
||||
// the contents of the archived objects are deterministic. I.e., the oop fields of archived
|
||||
// objects will always point to deterministic addresses.
|
||||
//
|
||||
// For G1, the archived heap is written such that the lowest archived object is placed
|
||||
// at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
|
||||
// The requested addresses of all archived objects are within [_requested_bottom, _requested_top).
|
||||
// See AOTMappedHeapWriter::set_requested_address_range() for more info.
|
||||
// ----------------------------------------------------------------------
|
||||
|
||||
public:
|
||||
@@ -111,6 +97,15 @@ public:
|
||||
// Shenandoah heap region size can never be smaller than 256K.
|
||||
static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K;
|
||||
|
||||
// The heap contents are required to be deterministic when dumping "old" CDS archives, in order
|
||||
// to support reproducible lib/server/classes*.jsa when building the JDK.
|
||||
static bool is_writing_deterministic_heap() { return _is_writing_deterministic_heap; }
|
||||
|
||||
// The oop encoding used by the archived heap objects.
|
||||
static CompressedOops::Mode narrow_oop_mode();
|
||||
static address narrow_oop_base();
|
||||
static int narrow_oop_shift();
|
||||
|
||||
static const int INITIAL_TABLE_SIZE = 15889; // prime number
|
||||
static const int MAX_TABLE_SIZE = 1000000;
|
||||
|
||||
@@ -121,6 +116,7 @@ private:
|
||||
int _field_offset;
|
||||
};
|
||||
|
||||
static bool _is_writing_deterministic_heap;
|
||||
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
|
||||
|
||||
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
|
||||
@@ -130,15 +126,15 @@ private:
|
||||
static HeapRootSegments _heap_root_segments;
|
||||
|
||||
// The address range of the requested location of the archived heap objects.
|
||||
static address _requested_bottom;
|
||||
static address _requested_top;
|
||||
static address _requested_bottom; // The requested address of the lowest archived heap object
|
||||
static address _requested_top; // The exclusive end of the highest archived heap object
|
||||
|
||||
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
|
||||
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
|
||||
static DumpedInternedStrings *_dumped_interned_strings;
|
||||
|
||||
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
|
||||
// See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
|
||||
// See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
|
||||
// The objects will be written in the order of:
|
||||
//_source_objs->at(_source_objs_order->at(0)._index)
|
||||
// source_objs->at(_source_objs_order->at(1)._index)
|
||||
@@ -200,7 +196,7 @@ private:
|
||||
static int filler_array_length(size_t fill_bytes);
|
||||
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
|
||||
|
||||
static void set_requested_address(ArchiveMappedHeapInfo* info);
|
||||
static void set_requested_address_range(ArchiveMappedHeapInfo* info);
|
||||
static void mark_native_pointers(oop orig_obj);
|
||||
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveMappedHeapInfo* info);
|
||||
static void compute_ptrmap(ArchiveMappedHeapInfo *info);
|
||||
|
||||
@@ -114,6 +114,7 @@ intx AOTMetaspace::_relocation_delta;
|
||||
char* AOTMetaspace::_requested_base_address;
|
||||
Array<Method*>* AOTMetaspace::_archived_method_handle_intrinsics = nullptr;
|
||||
bool AOTMetaspace::_use_optimized_module_handling = true;
|
||||
FileMapInfo* AOTMetaspace::_output_mapinfo = nullptr;
|
||||
|
||||
// The CDS archive is divided into the following regions:
|
||||
// rw - read-write metadata
|
||||
@@ -322,6 +323,24 @@ void AOTMetaspace::initialize_for_static_dump() {
|
||||
AOTMetaspace::unrecoverable_writing_error();
|
||||
}
|
||||
_symbol_region.init(&_symbol_rs, &_symbol_vs);
|
||||
if (CDSConfig::is_dumping_preimage_static_archive()) {
|
||||
// We are in the AOT training run. User code is executed.
|
||||
//
|
||||
// On Windows, if the user code closes System.out and we open the AOT config file for output
|
||||
// only at VM exit, we might get back the same file HANDLE as stdout, and the AOT config
|
||||
// file may get corrupted by UL logs. By opening early, we ensure that the output
|
||||
// HANDLE is different than stdout so we can avoid such corruption.
|
||||
open_output_mapinfo();
|
||||
} else {
|
||||
// No need for the above as we won't execute any user code.
|
||||
}
|
||||
}
|
||||
|
||||
void AOTMetaspace::open_output_mapinfo() {
|
||||
const char* static_archive = CDSConfig::output_archive_path();
|
||||
assert(static_archive != nullptr, "sanity");
|
||||
_output_mapinfo = new FileMapInfo(static_archive, true);
|
||||
_output_mapinfo->open_as_output();
|
||||
}
|
||||
|
||||
// Called by universe_post_init()
|
||||
@@ -655,15 +674,14 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b) :
|
||||
VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(nullptr), _builder(b) {}
|
||||
VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b, FileMapInfo* map_info) :
|
||||
VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(map_info), _builder(b) {}
|
||||
|
||||
bool skip_operation() const { return false; }
|
||||
|
||||
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
|
||||
ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
|
||||
ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
|
||||
FileMapInfo* map_info() const { return _map_info; }
|
||||
void doit(); // outline because gdb sucks
|
||||
bool allow_nested_vm_operations() const { return true; }
|
||||
}; // class VM_PopulateDumpSharedSpace
|
||||
@@ -795,12 +813,6 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
CppVtables::zero_archived_vtables();
|
||||
|
||||
// Write the archive file
|
||||
if (CDSConfig::is_dumping_final_static_archive()) {
|
||||
FileMapInfo::free_current_info(); // FIXME: should not free current info
|
||||
}
|
||||
const char* static_archive = CDSConfig::output_archive_path();
|
||||
assert(static_archive != nullptr, "sanity");
|
||||
_map_info = new FileMapInfo(static_archive, true);
|
||||
_map_info->populate_header(AOTMetaspace::core_region_alignment());
|
||||
_map_info->set_early_serialized_data(early_serialized_data);
|
||||
_map_info->set_serialized_data(serialized_data);
|
||||
@@ -1138,7 +1150,14 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
|
||||
}
|
||||
#endif
|
||||
|
||||
VM_PopulateDumpSharedSpace op(builder);
|
||||
if (!CDSConfig::is_dumping_preimage_static_archive()) {
|
||||
if (CDSConfig::is_dumping_final_static_archive()) {
|
||||
FileMapInfo::free_current_info(); // FIXME: should not free current info
|
||||
}
|
||||
open_output_mapinfo();
|
||||
}
|
||||
|
||||
VM_PopulateDumpSharedSpace op(builder, _output_mapinfo);
|
||||
VMThread::execute(&op);
|
||||
|
||||
if (AOTCodeCache::is_on_for_dump() && CDSConfig::is_dumping_final_static_archive()) {
|
||||
@@ -1152,7 +1171,9 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
|
||||
CDSConfig::disable_dumping_aot_code();
|
||||
}
|
||||
|
||||
bool status = write_static_archive(&builder, op.map_info(), op.mapped_heap_info(), op.streamed_heap_info());
|
||||
bool status = write_static_archive(&builder, _output_mapinfo, op.mapped_heap_info(), op.streamed_heap_info());
|
||||
assert(!_output_mapinfo->is_open(), "Must be closed already");
|
||||
_output_mapinfo = nullptr;
|
||||
if (status && CDSConfig::is_dumping_preimage_static_archive()) {
|
||||
tty->print_cr("%s AOTConfiguration recorded: %s",
|
||||
CDSConfig::has_temp_aot_config_file() ? "Temporary" : "", AOTConfiguration);
|
||||
@@ -1173,11 +1194,10 @@ bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
|
||||
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
|
||||
// without runtime relocation.
|
||||
builder->relocate_to_requested();
|
||||
|
||||
map_info->open_as_output();
|
||||
if (!map_info->is_open()) {
|
||||
return false;
|
||||
}
|
||||
map_info->prepare_for_writing();
|
||||
builder->write_archive(map_info, mapped_heap_info, streamed_heap_info);
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -60,6 +60,7 @@ class AOTMetaspace : AllStatic {
|
||||
static char* _requested_base_address;
|
||||
static bool _use_optimized_module_handling;
|
||||
static Array<Method*>* _archived_method_handle_intrinsics;
|
||||
static FileMapInfo* _output_mapinfo;
|
||||
|
||||
public:
|
||||
enum {
|
||||
@@ -185,6 +186,7 @@ public:
|
||||
private:
|
||||
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
|
||||
static void fork_and_dump_final_static_archive(TRAPS);
|
||||
static void open_output_mapinfo();
|
||||
static bool write_static_archive(ArchiveBuilder* builder,
|
||||
FileMapInfo* map_info,
|
||||
ArchiveMappedHeapInfo* mapped_heap_info,
|
||||
|
||||
@@ -526,7 +526,7 @@ void CDSConfig::check_aotmode_record() {
|
||||
bool has_output = !FLAG_IS_DEFAULT(AOTCacheOutput);
|
||||
|
||||
if (!has_output && !has_config) {
|
||||
vm_exit_during_initialization("At least one of AOTCacheOutput and AOTConfiguration must be specified when using -XX:AOTMode=record");
|
||||
vm_exit_during_initialization("At least one of AOTCacheOutput and AOTConfiguration must be specified when using -XX:AOTMode=record");
|
||||
}
|
||||
|
||||
if (has_output) {
|
||||
|
||||
@@ -353,6 +353,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
|
||||
assert(dynamic_info != nullptr, "Sanity");
|
||||
|
||||
dynamic_info->open_as_output();
|
||||
dynamic_info->prepare_for_writing();
|
||||
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr);
|
||||
|
||||
address base = _requested_dynamic_archive_bottom;
|
||||
|
||||
@@ -216,12 +216,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
|
||||
_obj_alignment = ObjectAlignmentInBytes;
|
||||
_compact_strings = CompactStrings;
|
||||
_compact_headers = UseCompactObjectHeaders;
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
if (CDSConfig::is_dumping_heap()) {
|
||||
_object_streaming_mode = HeapShared::is_writing_streaming_mode();
|
||||
_narrow_oop_mode = CompressedOops::mode();
|
||||
_narrow_oop_base = CompressedOops::base();
|
||||
_narrow_oop_shift = CompressedOops::shift();
|
||||
_narrow_oop_mode = AOTMappedHeapWriter::narrow_oop_mode();
|
||||
_narrow_oop_base = AOTMappedHeapWriter::narrow_oop_base();
|
||||
_narrow_oop_shift = AOTMappedHeapWriter::narrow_oop_shift();
|
||||
}
|
||||
#endif
|
||||
_compressed_oops = UseCompressedOops;
|
||||
_compressed_class_ptrs = UseCompressedClassPointers;
|
||||
if (UseCompressedClassPointers) {
|
||||
@@ -777,7 +779,9 @@ void FileMapInfo::open_as_output() {
|
||||
}
|
||||
_fd = fd;
|
||||
_file_open = true;
|
||||
}
|
||||
|
||||
void FileMapInfo::prepare_for_writing() {
|
||||
// Seek past the header. We will write the header after all regions are written
|
||||
// and their CRCs computed.
|
||||
size_t header_bytes = header()->header_size();
|
||||
@@ -911,7 +915,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
if (HeapShared::is_writing_mapping_mode()) {
|
||||
requested_base = (char*)AOTMappedHeapWriter::requested_address();
|
||||
if (UseCompressedOops) {
|
||||
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
|
||||
mapping_offset = (size_t)((address)requested_base - AOTMappedHeapWriter::narrow_oop_base());
|
||||
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -290,7 +290,7 @@ public:
|
||||
|
||||
void log_paths(const char* msg, int start_idx, int end_idx);
|
||||
|
||||
FileMapInfo(const char* full_apth, bool is_static);
|
||||
FileMapInfo(const char* full_path, bool is_static);
|
||||
~FileMapInfo();
|
||||
static void free_current_info();
|
||||
|
||||
@@ -365,6 +365,7 @@ public:
|
||||
// File manipulation.
|
||||
bool open_as_input() NOT_CDS_RETURN_(false);
|
||||
void open_as_output();
|
||||
void prepare_for_writing();
|
||||
void write_header();
|
||||
void write_region(int region, char* base, size_t size,
|
||||
bool read_only, bool allow_exec);
|
||||
|
||||
@@ -631,9 +631,8 @@ void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
|
||||
}
|
||||
|
||||
// Given java_mirror that represents a (primitive or reference) type T,
|
||||
// return the "scratch" version that represents the same type T.
|
||||
// Note that if java_mirror will be returned if it's already a
|
||||
// scratch mirror.
|
||||
// return the "scratch" version that represents the same type T. Note
|
||||
// that java_mirror will be returned if the mirror is already a scratch mirror.
|
||||
//
|
||||
// See java_lang_Class::create_scratch_mirror() for more info.
|
||||
oop HeapShared::scratch_java_mirror(oop java_mirror) {
|
||||
|
||||
@@ -332,7 +332,7 @@ public:
|
||||
// Used by CDSHeapVerifier.
|
||||
OopHandle _orig_referrer;
|
||||
|
||||
// The location of this object inside ArchiveHeapWriter::_buffer
|
||||
// The location of this object inside {AOTMappedHeapWriter, AOTStreamedHeapWriter}::_buffer
|
||||
size_t _buffer_offset;
|
||||
|
||||
// One or more fields in this object are pointing to non-null oops.
|
||||
|
||||
@@ -216,9 +216,6 @@ ciField::ciField(fieldDescriptor *fd) :
|
||||
static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
|
||||
if (holder == nullptr)
|
||||
return false;
|
||||
if (holder->name() == ciSymbols::java_lang_System())
|
||||
// Never trust strangely unstable finals: System.out, etc.
|
||||
return false;
|
||||
// Even if general trusting is disabled, trust system-built closures in these packages.
|
||||
if (holder->is_in_package("java/lang/invoke") || holder->is_in_package("sun/invoke") ||
|
||||
holder->is_in_package("java/lang/reflect") || holder->is_in_package("jdk/internal/reflect") ||
|
||||
@@ -230,15 +227,9 @@ static bool trust_final_non_static_fields(ciInstanceKlass* holder) {
|
||||
// can't be serialized, so there is no hacking of finals going on with them.
|
||||
if (holder->is_hidden())
|
||||
return true;
|
||||
// Trust final fields in all boxed classes
|
||||
if (holder->is_box_klass())
|
||||
return true;
|
||||
// Trust final fields in records
|
||||
if (holder->is_record())
|
||||
return true;
|
||||
// Trust final fields in String
|
||||
if (holder->name() == ciSymbols::java_lang_String())
|
||||
return true;
|
||||
// Trust Atomic*FieldUpdaters: they are very important for performance, and make up one
|
||||
// more reason not to use Unsafe, if their final fields are trusted. See more in JDK-8140483.
|
||||
if (holder->name() == ciSymbols::java_util_concurrent_atomic_AtomicIntegerFieldUpdater_Impl() ||
|
||||
@@ -267,17 +258,7 @@ void ciField::initialize_from(fieldDescriptor* fd) {
|
||||
// not be constant is when the field is a *special* static & final field
|
||||
// whose value may change. The three examples are java.lang.System.in,
|
||||
// java.lang.System.out, and java.lang.System.err.
|
||||
assert(vmClasses::System_klass() != nullptr, "Check once per vm");
|
||||
if (k == vmClasses::System_klass()) {
|
||||
// Check offsets for case 2: System.in, System.out, or System.err
|
||||
if (_offset == java_lang_System::in_offset() ||
|
||||
_offset == java_lang_System::out_offset() ||
|
||||
_offset == java_lang_System::err_offset()) {
|
||||
_is_constant = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
_is_constant = true;
|
||||
_is_constant = !fd->is_mutable_static_final();
|
||||
} else {
|
||||
// An instance field can be constant if it's a final static field or if
|
||||
// it's a final non-static field of a trusted class (classes in
|
||||
|
||||
@@ -605,7 +605,7 @@ bool ciInstanceKlass::is_leaf_type() {
|
||||
if (is_shared()) {
|
||||
return is_final(); // approximately correct
|
||||
} else {
|
||||
return !has_subklass() && (nof_implementors() == 0);
|
||||
return !has_subklass() && (!is_interface() || nof_implementors() == 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -619,6 +619,7 @@ bool ciInstanceKlass::is_leaf_type() {
|
||||
// This is OK, since any dependencies we decide to assert
|
||||
// will be checked later under the Compile_lock.
|
||||
ciInstanceKlass* ciInstanceKlass::implementor() {
|
||||
assert(is_interface(), "required");
|
||||
ciInstanceKlass* impl = _implementor;
|
||||
if (impl == nullptr) {
|
||||
if (is_shared()) {
|
||||
|
||||
@@ -259,6 +259,7 @@ public:
|
||||
|
||||
ciInstanceKlass* unique_implementor() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
assert(is_interface(), "must be");
|
||||
ciInstanceKlass* impl = implementor();
|
||||
return (impl != this ? impl : nullptr);
|
||||
}
|
||||
|
||||
@@ -1241,10 +1241,7 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
|
||||
|
||||
if (!k->is_array_klass()) {
|
||||
// - local static final fields with initial values were initialized at dump time
|
||||
|
||||
// create the init_lock
|
||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
|
||||
set_init_lock(mirror(), r);
|
||||
assert(init_lock(mirror()) != nullptr, "allocated during AOT assembly");
|
||||
|
||||
if (protection_domain.not_null()) {
|
||||
set_protection_domain(mirror(), protection_domain());
|
||||
@@ -1336,11 +1333,6 @@ void java_lang_Class::set_class_data(oop java_class, oop class_data) {
|
||||
java_class->obj_field_put(_classData_offset, class_data);
|
||||
}
|
||||
|
||||
void java_lang_Class::set_reflection_data(oop java_class, oop reflection_data) {
|
||||
assert(_reflectionData_offset != 0, "must be set");
|
||||
java_class->obj_field_put(_reflectionData_offset, reflection_data);
|
||||
}
|
||||
|
||||
void java_lang_Class::set_class_loader(oop java_class, oop loader) {
|
||||
assert(_class_loader_offset != 0, "offsets should have been initialized");
|
||||
java_class->obj_field_put(_class_loader_offset, loader);
|
||||
@@ -1483,7 +1475,6 @@ Klass* java_lang_Class::array_klass_acquire(oop java_class) {
|
||||
return k;
|
||||
}
|
||||
|
||||
|
||||
void java_lang_Class::release_set_array_klass(oop java_class, Klass* klass) {
|
||||
assert(klass->is_klass() && klass->is_array_klass(), "should be array klass");
|
||||
java_class->release_metadata_field_put(_array_klass_offset, klass);
|
||||
@@ -1589,11 +1580,6 @@ void java_lang_Class::set_modifiers(oop the_class_mirror, u2 value) {
|
||||
the_class_mirror->char_field_put(_modifiers_offset, value);
|
||||
}
|
||||
|
||||
int java_lang_Class::raw_access_flags(oop the_class_mirror) {
|
||||
assert(_raw_access_flags_offset != 0, "offsets should have been initialized");
|
||||
return the_class_mirror->char_field(_raw_access_flags_offset);
|
||||
}
|
||||
|
||||
void java_lang_Class::set_raw_access_flags(oop the_class_mirror, u2 value) {
|
||||
assert(_raw_access_flags_offset != 0, "offsets should have been initialized");
|
||||
the_class_mirror->char_field_put(_raw_access_flags_offset, value);
|
||||
|
||||
@@ -273,6 +273,12 @@ class java_lang_Class : AllStatic {
|
||||
static void initialize_mirror_fields(InstanceKlass* ik, Handle mirror, Handle protection_domain,
|
||||
Handle classData, TRAPS);
|
||||
static void set_mirror_module_field(JavaThread* current, Klass* K, Handle mirror, Handle module);
|
||||
|
||||
static void set_modifiers(oop java_class, u2 value);
|
||||
static void set_raw_access_flags(oop java_class, u2 value);
|
||||
static void set_is_primitive(oop java_class);
|
||||
static void release_set_array_klass(oop java_class, Klass* klass);
|
||||
|
||||
public:
|
||||
static void allocate_fixup_lists();
|
||||
static void compute_offsets();
|
||||
@@ -307,12 +313,10 @@ class java_lang_Class : AllStatic {
|
||||
static bool is_instance(oop obj);
|
||||
|
||||
static bool is_primitive(oop java_class);
|
||||
static void set_is_primitive(oop java_class);
|
||||
static BasicType primitive_type(oop java_class);
|
||||
static oop primitive_mirror(BasicType t);
|
||||
// JVM_NewArray support
|
||||
static Klass* array_klass_acquire(oop java_class);
|
||||
static void release_set_array_klass(oop java_class, Klass* klass);
|
||||
|
||||
// compiler support for class operations
|
||||
static int klass_offset() { CHECK_INIT(_klass_offset); }
|
||||
static int array_klass_offset() { CHECK_INIT(_array_klass_offset); }
|
||||
@@ -331,7 +335,6 @@ class java_lang_Class : AllStatic {
|
||||
static objArrayOop signers(oop java_class);
|
||||
static oop class_data(oop java_class);
|
||||
static void set_class_data(oop java_class, oop classData);
|
||||
static void set_reflection_data(oop java_class, oop reflection_data);
|
||||
static int reflection_data_offset() { return _reflectionData_offset; }
|
||||
|
||||
static oop class_loader(oop java_class);
|
||||
@@ -344,10 +347,6 @@ class java_lang_Class : AllStatic {
|
||||
static void set_source_file(oop java_class, oop source_file);
|
||||
|
||||
static int modifiers(oop java_class);
|
||||
static void set_modifiers(oop java_class, u2 value);
|
||||
|
||||
static int raw_access_flags(oop java_class);
|
||||
static void set_raw_access_flags(oop java_class, u2 value);
|
||||
|
||||
static size_t oop_size(oop java_class);
|
||||
static void set_oop_size(HeapWord* java_class, size_t size);
|
||||
|
||||
@@ -26,6 +26,7 @@
|
||||
#include "code/compiledIC.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
@@ -37,8 +38,6 @@
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
const RelocationHolder RelocationHolder::none; // its type is relocInfo::none
|
||||
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_CODE_RELOCINFO_HPP
|
||||
#define SHARE_CODE_RELOCINFO_HPP
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/osInfo.hpp"
|
||||
@@ -32,8 +33,6 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
class CodeBlob;
|
||||
class Metadata;
|
||||
class NativeMovConstReg;
|
||||
|
||||
154
src/hotspot/share/cppstdlib/new.hpp
Normal file
154
src/hotspot/share/cppstdlib/new.hpp
Normal file
@@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_CPPSTDLIB_NEW_HPP
|
||||
#define SHARE_CPPSTDLIB_NEW_HPP
|
||||
|
||||
#include "utilities/compilerWarnings.hpp"
|
||||
|
||||
// HotSpot usage:
|
||||
// Only the following may be used:
|
||||
// * std::nothrow_t, std::nothrow
|
||||
// * std::align_val_t
|
||||
// * The non-allocating forms of `operator new` and `operator new[]` are
|
||||
// implicitly used by the corresponding `new` and `new[]` expressions.
|
||||
// - operator new(size_t, void*) noexcept
|
||||
// - operator new[](size_t, void*) noexcept
|
||||
// Note that the non-allocating forms of `operator delete` and `operator
|
||||
// delete[]` are not used, since they are only invoked by a placement new
|
||||
// expression that fails by throwing an exception. But they might still
|
||||
// end up being referenced in such a situation.
|
||||
|
||||
BEGIN_ALLOW_FORBIDDEN_FUNCTIONS
|
||||
#include "utilities/vmassert_uninstall.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
#include "utilities/vmassert_reinstall.hpp" // don't reorder
|
||||
END_ALLOW_FORBIDDEN_FUNCTIONS
|
||||
|
||||
// Deprecation declarations to forbid use of the default global allocator.
|
||||
// See C++17 21.6.1 Header <new> synopsis.
|
||||
|
||||
namespace std {
|
||||
|
||||
#if 0
|
||||
// We could deprecate exception types, for completeness, but don't bother. We
|
||||
// already have exceptions disabled, and run into compiler bugs when we try.
|
||||
//
|
||||
// gcc -Wattributes => type attributes ignored after type is already defined
|
||||
// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=122167
|
||||
//
|
||||
// clang -Wignored-attributes => attribute declaration must precede definition
|
||||
// The clang warning is https://github.com/llvm/llvm-project/issues/135481,
|
||||
// which should be fixed in clang 21.
|
||||
class [[deprecated]] bad_alloc;
|
||||
class [[deprecated]] bad_array_new_length;
|
||||
#endif // #if 0
|
||||
|
||||
// Forbid new_handler manipulation by HotSpot code, leaving it untouched for
|
||||
// use by application code.
|
||||
[[deprecated]] new_handler get_new_handler() noexcept;
|
||||
[[deprecated]] new_handler set_new_handler(new_handler) noexcept;
|
||||
|
||||
// Prefer HotSpot mechanisms for padding.
|
||||
//
|
||||
// The syntax for redeclaring these for deprecation is tricky, and not
|
||||
// supported by some versions of some compilers. Dispatch on compiler and
|
||||
// version to decide whether to redeclare deprecated.
|
||||
|
||||
#if defined(__clang__)
|
||||
#if __clang_major__ >= 19
|
||||
// clang18 and earlier may accept the declaration but go wrong with uses.
|
||||
// Different warnings and link-time failures are both possible.
|
||||
#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
|
||||
#endif // restrict clang version
|
||||
|
||||
#elif defined(__GNUC__)
|
||||
#if (__GNUC__ > 13) || (__GNUC__ == 13 && __GNUC_MINOR__ >= 2)
|
||||
// g++11.5 accepts the declaration and reports deprecation for uses, but also
|
||||
// has link-time failure for uses. Haven't tested intermediate versions.
|
||||
#define CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES 1
|
||||
#endif // restrict gcc version
|
||||
|
||||
#elif defined(_MSVC)
|
||||
// VS2022-17.13.2 => error C2370: '...': redefinition; different storage class
|
||||
|
||||
#endif // Compiler dispatch
|
||||
|
||||
// Redeclare deprecated if such is supported.
|
||||
#ifdef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
|
||||
[[deprecated]] extern const size_t hardware_destructive_interference_size;
|
||||
[[deprecated]] extern const size_t hardware_constructive_interference_size;
|
||||
#undef CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
|
||||
#endif // CAN_DEPRECATE_HARDWARE_INTERFERENCE_SIZES
|
||||
|
||||
} // namespace std
|
||||
|
||||
// Forbid using the global allocator by HotSpot code.
|
||||
// This doesn't provide complete coverage. Some global allocation and
|
||||
// deallocation functions are implicitly declared in all translation units,
|
||||
// without needing to include <new>; see C++17 6.7.4. So this doesn't remove
|
||||
// the need for the link-time verification that these functions aren't used.
|
||||
//
|
||||
// But don't poison them when compiling gtests. The gtest framework, the
|
||||
// HotSpot wrapper around it (gtestMain.cpp), and even some tests, all have
|
||||
// new/new[] and delete/delete[] expressions that use the default global
|
||||
// allocator. We also don't apply the link-time check for gtests, for the
|
||||
// same reason.
|
||||
#ifndef HOTSPOT_GTEST
|
||||
|
||||
[[deprecated]] void* operator new(std::size_t);
|
||||
[[deprecated]] void* operator new(std::size_t, std::align_val_t);
|
||||
[[deprecated]] void* operator new(std::size_t, const std::nothrow_t&) noexcept;
|
||||
[[deprecated]] void* operator new(std::size_t, std::align_val_t,
|
||||
const std::nothrow_t&) noexcept;
|
||||
|
||||
[[deprecated]] void operator delete(void*) noexcept;
|
||||
[[deprecated]] void operator delete(void*, std::size_t) noexcept;
|
||||
[[deprecated]] void operator delete(void*, std::align_val_t) noexcept;
|
||||
[[deprecated]] void operator delete(void*, std::size_t, std::align_val_t) noexcept;
|
||||
[[deprecated]] void operator delete(void*, const std::nothrow_t&) noexcept;
|
||||
[[deprecated]] void operator delete(void*, std::align_val_t,
|
||||
const std::nothrow_t&) noexcept;
|
||||
|
||||
[[deprecated]] void* operator new[](std::size_t);
|
||||
[[deprecated]] void* operator new[](std::size_t, std::align_val_t);
|
||||
[[deprecated]] void* operator new[](std::size_t, const std::nothrow_t&) noexcept;
|
||||
[[deprecated]] void* operator new[](std::size_t, std::align_val_t,
|
||||
const std::nothrow_t&) noexcept;
|
||||
|
||||
[[deprecated]] void operator delete[](void*) noexcept;
|
||||
[[deprecated]] void operator delete[](void*, std::size_t) noexcept;
|
||||
[[deprecated]] void operator delete[](void*, std::align_val_t) noexcept;
|
||||
[[deprecated]] void operator delete[](void*, std::size_t, std::align_val_t) noexcept;
|
||||
[[deprecated]] void operator delete[](void*, const std::nothrow_t&) noexcept;
|
||||
[[deprecated]] void operator delete[](void*, std::align_val_t,
|
||||
const std::nothrow_t&) noexcept;
|
||||
|
||||
#endif // HOTSPOT_GTEST
|
||||
|
||||
// Allow (don't poison) the non-allocating forms from [new.delete.placement].
|
||||
|
||||
#endif // SHARE_CPPSTDLIB_NEW_HPP
|
||||
@@ -123,6 +123,14 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
|
||||
}
|
||||
}
|
||||
|
||||
size_t G1Allocator::free_bytes_in_retained_old_region() const {
|
||||
if (_retained_old_gc_alloc_region == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return _retained_old_gc_alloc_region->free();
|
||||
}
|
||||
}
|
||||
|
||||
void G1Allocator::init_gc_alloc_regions(G1EvacInfo* evacuation_info) {
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
|
||||
|
||||
@@ -103,7 +103,10 @@ public:
|
||||
void init_gc_alloc_regions(G1EvacInfo* evacuation_info);
|
||||
void release_gc_alloc_regions(G1EvacInfo* evacuation_info);
|
||||
void abandon_gc_alloc_regions();
|
||||
|
||||
bool is_retained_old_region(G1HeapRegion* hr);
|
||||
// Return the amount of free bytes in the current retained old region.
|
||||
size_t free_bytes_in_retained_old_region() const;
|
||||
|
||||
// Node index of current thread.
|
||||
inline uint current_node_index() const;
|
||||
|
||||
@@ -353,6 +353,14 @@ size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
|
||||
return align_up(word_size, G1HeapRegion::GrainWords) / G1HeapRegion::GrainWords;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::allocation_used_bytes(size_t allocation_word_size) {
|
||||
if (is_humongous(allocation_word_size)) {
|
||||
return humongous_obj_size_in_regions(allocation_word_size) * G1HeapRegion::GrainBytes;
|
||||
} else {
|
||||
return allocation_word_size * HeapWordSize;
|
||||
}
|
||||
}
|
||||
|
||||
// If could fit into free regions w/o expansion, try.
|
||||
// Otherwise, if can expand, do so.
|
||||
// Otherwise, if using ex regions might help, try with ex given back.
|
||||
@@ -2955,6 +2963,15 @@ void G1CollectedHeap::abandon_collection_set() {
|
||||
collection_set()->abandon();
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::non_young_occupancy_after_allocation(size_t allocation_word_size) {
|
||||
const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes -
|
||||
_allocator->free_bytes_in_retained_old_region();
|
||||
// Humongous allocations will always be assigned to non-young heap, so consider
|
||||
// that allocation in the result as well. Otherwise the allocation will always
|
||||
// be in young gen, so there is no need to account it here.
|
||||
return cur_occupancy + (is_humongous(allocation_word_size) ? allocation_used_bytes(allocation_word_size) : 0);
|
||||
}
|
||||
|
||||
bool G1CollectedHeap::is_old_gc_alloc_region(G1HeapRegion* hr) {
|
||||
return _allocator->is_retained_old_region(hr);
|
||||
}
|
||||
|
||||
@@ -1032,17 +1032,15 @@ public:
|
||||
inline void old_set_add(G1HeapRegion* hr);
|
||||
inline void old_set_remove(G1HeapRegion* hr);
|
||||
|
||||
size_t non_young_capacity_bytes() {
|
||||
return (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes;
|
||||
}
|
||||
// Returns how much memory there is assigned to non-young heap that can not be
|
||||
// allocated into any more without garbage collection after a hypothetical
|
||||
// allocation of allocation_word_size.
|
||||
size_t non_young_occupancy_after_allocation(size_t allocation_word_size);
|
||||
|
||||
// Determine whether the given region is one that we are using as an
|
||||
// old GC alloc region.
|
||||
bool is_old_gc_alloc_region(G1HeapRegion* hr);
|
||||
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
// "System.gc". This probably implies as full a collection as the
|
||||
// "CollectedHeap" supports.
|
||||
void collect(GCCause::Cause cause) override;
|
||||
|
||||
// Try to perform a collection of the heap with the given cause to allocate allocation_word_size
|
||||
@@ -1229,6 +1227,10 @@ public:
|
||||
// requires.
|
||||
static size_t humongous_obj_size_in_regions(size_t word_size);
|
||||
|
||||
// Returns how much space in bytes an allocation of word_size will use up in the
|
||||
// heap.
|
||||
static size_t allocation_used_bytes(size_t word_size);
|
||||
|
||||
// Print the maximum heap capacity.
|
||||
size_t max_capacity() const override;
|
||||
size_t min_capacity() const;
|
||||
|
||||
@@ -787,23 +787,13 @@ void G1HeapRegion::fill_range_with_dead_objects(HeapWord* start, HeapWord* end)
|
||||
// possible that there is a pinned object that is not any more referenced by
|
||||
// Java code (only by native).
|
||||
//
|
||||
// In this case we must not zap contents of such an array but we can overwrite
|
||||
// the header; since only pinned typearrays are allowed, this fits nicely with
|
||||
// putting filler arrays into the dead range as the object header sizes match and
|
||||
// no user data is overwritten.
|
||||
// In this case we should not zap, because that would overwrite
|
||||
// user-observable data. Memory corresponding to obj-header is safe to
|
||||
// change, since it's not directly user-observable.
|
||||
//
|
||||
// In particular String Deduplication might change the reference to the character
|
||||
// array of the j.l.String after native code obtained a raw reference to it (via
|
||||
// GetStringCritical()).
|
||||
CollectedHeap::fill_with_objects(start, range_size, !has_pinned_objects());
|
||||
HeapWord* current = start;
|
||||
do {
|
||||
// Update the BOT if the a threshold is crossed.
|
||||
size_t obj_size = cast_to_oop(current)->size();
|
||||
update_bot_for_block(current, current + obj_size);
|
||||
|
||||
// Advance to the next object.
|
||||
current += obj_size;
|
||||
guarantee(current <= end, "Should never go past end");
|
||||
} while (current != end);
|
||||
CollectedHeap::fill_with_object(start, range_size, !has_pinned_objects());
|
||||
update_bot_for_block(start, start + range_size);
|
||||
}
|
||||
|
||||
@@ -366,28 +366,24 @@ static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) {
|
||||
}
|
||||
|
||||
size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand, size_t allocation_word_size) {
|
||||
// If the full collection was triggered by an allocation failure, we should account
|
||||
// for the bytes required for this allocation under used_after_gc. This prevents
|
||||
// unnecessary shrinking that would be followed by an expand call to satisfy the
|
||||
// allocation.
|
||||
size_t allocation_bytes = allocation_word_size * HeapWordSize;
|
||||
if (_g1h->is_humongous(allocation_word_size)) {
|
||||
// Humongous objects are allocated in entire regions, we must calculate
|
||||
// required space in terms of full regions, not just the object size.
|
||||
allocation_bytes = G1HeapRegion::align_up_to_region_byte_size(allocation_bytes);
|
||||
}
|
||||
|
||||
const size_t capacity_after_gc = _g1h->capacity();
|
||||
// Capacity, free and used after the GC counted as full regions to
|
||||
// include the waste in the following calculations.
|
||||
const size_t capacity_after_gc = _g1h->capacity();
|
||||
const size_t used_after_gc = capacity_after_gc + allocation_bytes -
|
||||
_g1h->unused_committed_regions_in_bytes() -
|
||||
// Discount space used by current Eden to establish a
|
||||
// situation during Remark similar to at the end of full
|
||||
// GC where eden is empty. During Remark there can be an
|
||||
// arbitrary number of eden regions which would skew the
|
||||
// results.
|
||||
_g1h->eden_regions_count() * G1HeapRegion::GrainBytes;
|
||||
const size_t current_used_after_gc = capacity_after_gc -
|
||||
_g1h->unused_committed_regions_in_bytes() -
|
||||
// Discount space used by current Eden to establish a
|
||||
// situation during Remark similar to at the end of full
|
||||
// GC where eden is empty. During Remark there can be an
|
||||
// arbitrary number of eden regions which would skew the
|
||||
// results.
|
||||
_g1h->eden_regions_count() * G1HeapRegion::GrainBytes;
|
||||
|
||||
// Add pending allocation;
|
||||
const size_t used_after_gc = current_used_after_gc +
|
||||
// If the full collection was triggered by an allocation failure,
|
||||
// account that allocation too. Otherwise we could shrink and then
|
||||
// expand immediately to satisfy the allocation.
|
||||
_g1h->allocation_used_bytes(allocation_word_size);
|
||||
|
||||
size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio);
|
||||
size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
|
||||
|
||||
@@ -44,32 +44,37 @@ void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) {
|
||||
_target_occupancy = new_target_occupancy;
|
||||
}
|
||||
|
||||
void G1IHOPControl::report_statistics(G1NewTracer* new_tracer, size_t non_young_occupancy) {
|
||||
print_log(non_young_occupancy);
|
||||
send_trace_event(new_tracer, non_young_occupancy);
|
||||
}
|
||||
|
||||
void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t additional_buffer_size) {
|
||||
assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s);
|
||||
|
||||
_last_allocation_time_s = allocation_time_s;
|
||||
}
|
||||
|
||||
void G1IHOPControl::print() {
|
||||
void G1IHOPControl::print_log(size_t non_young_occupancy) {
|
||||
assert(_target_occupancy > 0, "Target occupancy still not updated yet.");
|
||||
size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
|
||||
log_debug(gc, ihop)("Basic information (value update), threshold: %zuB (%1.2f), target occupancy: %zuB, current occupancy: %zuB, "
|
||||
log_debug(gc, ihop)("Basic information (value update), threshold: %zuB (%1.2f), target occupancy: %zuB, non-young occupancy: %zuB, "
|
||||
"recent allocation size: %zuB, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms",
|
||||
cur_conc_mark_start_threshold,
|
||||
percent_of(cur_conc_mark_start_threshold, _target_occupancy),
|
||||
_target_occupancy,
|
||||
G1CollectedHeap::heap()->used(),
|
||||
non_young_occupancy,
|
||||
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
|
||||
_last_allocation_time_s * 1000.0,
|
||||
_last_allocation_time_s > 0.0 ? _old_gen_alloc_tracker->last_period_old_gen_bytes() / _last_allocation_time_s : 0.0,
|
||||
last_marking_length_s() * 1000.0);
|
||||
}
|
||||
|
||||
void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
|
||||
void G1IHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) {
|
||||
assert(_target_occupancy > 0, "Target occupancy still not updated yet.");
|
||||
tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(),
|
||||
_target_occupancy,
|
||||
G1CollectedHeap::heap()->used(),
|
||||
non_young_occupancy,
|
||||
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
|
||||
_last_allocation_time_s,
|
||||
last_marking_length_s());
|
||||
@@ -165,27 +170,27 @@ void G1AdaptiveIHOPControl::update_marking_length(double marking_length_s) {
|
||||
_marking_times_s.add(marking_length_s);
|
||||
}
|
||||
|
||||
void G1AdaptiveIHOPControl::print() {
|
||||
G1IHOPControl::print();
|
||||
size_t actual_target = actual_target_threshold();
|
||||
log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target occupancy: %zuB, "
|
||||
"occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, "
|
||||
void G1AdaptiveIHOPControl::print_log(size_t non_young_occupancy) {
|
||||
G1IHOPControl::print_log(non_young_occupancy);
|
||||
size_t actual_threshold = actual_target_threshold();
|
||||
log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target threshold: %zuB, "
|
||||
"non-young occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, "
|
||||
"predicted marking phase length: %1.2fms, prediction active: %s",
|
||||
get_conc_mark_start_threshold(),
|
||||
percent_of(get_conc_mark_start_threshold(), actual_target),
|
||||
actual_target,
|
||||
G1CollectedHeap::heap()->used(),
|
||||
percent_of(get_conc_mark_start_threshold(), actual_threshold),
|
||||
actual_threshold,
|
||||
non_young_occupancy,
|
||||
_last_unrestrained_young_size,
|
||||
predict(&_allocation_rate_s),
|
||||
predict(&_marking_times_s) * 1000.0,
|
||||
have_enough_data_for_prediction() ? "true" : "false");
|
||||
}
|
||||
|
||||
void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer) {
|
||||
G1IHOPControl::send_trace_event(tracer);
|
||||
void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) {
|
||||
G1IHOPControl::send_trace_event(tracer, non_young_occupancy);
|
||||
tracer->report_adaptive_ihop_statistics(get_conc_mark_start_threshold(),
|
||||
actual_target_threshold(),
|
||||
G1CollectedHeap::heap()->used(),
|
||||
non_young_occupancy,
|
||||
_last_unrestrained_young_size,
|
||||
predict(&_allocation_rate_s),
|
||||
predict(&_marking_times_s),
|
||||
|
||||
@@ -55,7 +55,11 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
// Most recent time from the end of the concurrent start to the start of the first
|
||||
// mixed gc.
|
||||
virtual double last_marking_length_s() const = 0;
|
||||
public:
|
||||
|
||||
virtual void print_log(size_t non_young_occupancy);
|
||||
virtual void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
|
||||
|
||||
public:
|
||||
virtual ~G1IHOPControl() { }
|
||||
|
||||
// Get the current non-young occupancy at which concurrent marking should start.
|
||||
@@ -76,8 +80,7 @@ class G1IHOPControl : public CHeapObj<mtGC> {
|
||||
// the first mixed gc.
|
||||
virtual void update_marking_length(double marking_length_s) = 0;
|
||||
|
||||
virtual void print();
|
||||
virtual void send_trace_event(G1NewTracer* tracer);
|
||||
void report_statistics(G1NewTracer* tracer, size_t non_young_occupancy);
|
||||
};
|
||||
|
||||
// The returned concurrent mark starting occupancy threshold is a fixed value
|
||||
@@ -139,6 +142,10 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
|
||||
double last_mutator_period_old_allocation_rate() const;
|
||||
protected:
|
||||
virtual double last_marking_length_s() const { return _marking_times_s.last(); }
|
||||
|
||||
virtual void print_log(size_t non_young_occupancy);
|
||||
virtual void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
|
||||
|
||||
public:
|
||||
G1AdaptiveIHOPControl(double ihop_percent,
|
||||
G1OldGenAllocationTracker const* old_gen_alloc_tracker,
|
||||
@@ -150,9 +157,6 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
|
||||
|
||||
virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
|
||||
virtual void update_marking_length(double marking_length_s);
|
||||
|
||||
virtual void print();
|
||||
virtual void send_trace_event(G1NewTracer* tracer);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1IHOPCONTROL_HPP
|
||||
|
||||
@@ -749,22 +749,14 @@ bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_wor
|
||||
}
|
||||
|
||||
size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
|
||||
|
||||
size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
|
||||
size_t allocation_byte_size = allocation_word_size * HeapWordSize;
|
||||
// For humongous allocations, we need to consider that we actually use full regions
|
||||
// for allocations. So compare the threshold to this size.
|
||||
if (_g1h->is_humongous(allocation_word_size)) {
|
||||
allocation_byte_size = G1HeapRegion::align_up_to_region_byte_size(allocation_byte_size);
|
||||
}
|
||||
size_t marking_request_bytes = cur_used_bytes + allocation_byte_size;
|
||||
size_t non_young_occupancy = _g1h->non_young_occupancy_after_allocation(allocation_word_size);
|
||||
|
||||
bool result = false;
|
||||
if (marking_request_bytes > marking_initiating_used_threshold) {
|
||||
if (non_young_occupancy > marking_initiating_used_threshold) {
|
||||
result = collector_state()->in_young_only_phase();
|
||||
log_debug(gc, ergo, ihop)("%s occupancy: %zuB allocation request: %zuB threshold: %zuB (%1.2f) source: %s",
|
||||
log_debug(gc, ergo, ihop)("%s non-young occupancy: %zuB allocation request: %zuB threshold: %zuB (%1.2f) source: %s",
|
||||
result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
|
||||
cur_used_bytes, allocation_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
|
||||
non_young_occupancy, allocation_word_size * HeapWordSize, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@@ -995,10 +987,10 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
update_young_length_bounds();
|
||||
|
||||
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes);
|
||||
update_ihop_prediction(app_time_ms / 1000.0,
|
||||
G1GCPauseTypeHelper::is_young_only_pause(this_pause));
|
||||
|
||||
_ihop_control->send_trace_event(_g1h->gc_tracer_stw());
|
||||
if (update_ihop_prediction(app_time_ms / 1000.0,
|
||||
G1GCPauseTypeHelper::is_young_only_pause(this_pause))) {
|
||||
_ihop_control->report_statistics(_g1h->gc_tracer_stw(), _g1h->non_young_occupancy_after_allocation(allocation_word_size));
|
||||
}
|
||||
} else {
|
||||
// Any garbage collection triggered as periodic collection resets the time-to-mixed
|
||||
// measurement. Periodic collection typically means that the application is "inactive", i.e.
|
||||
@@ -1045,7 +1037,7 @@ G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* ol
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
bool G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
bool this_gc_was_young_only) {
|
||||
// Always try to update IHOP prediction. Even evacuation failures give information
|
||||
// about e.g. whether to start IHOP earlier next time.
|
||||
@@ -1082,13 +1074,7 @@ void G1Policy::update_ihop_prediction(double mutator_time_s,
|
||||
report = true;
|
||||
}
|
||||
|
||||
if (report) {
|
||||
report_ihop_statistics();
|
||||
}
|
||||
}
|
||||
|
||||
void G1Policy::report_ihop_statistics() {
|
||||
_ihop_control->print();
|
||||
return report;
|
||||
}
|
||||
|
||||
void G1Policy::record_young_gc_pause_end(bool evacuation_failed) {
|
||||
|
||||
@@ -60,10 +60,10 @@ class G1Policy: public CHeapObj<mtGC> {
|
||||
|
||||
static G1IHOPControl* create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
|
||||
const G1Predictions* predictor);
|
||||
// Update the IHOP control with necessary statistics.
|
||||
void update_ihop_prediction(double mutator_time_s,
|
||||
// Update the IHOP control with the necessary statistics. Returns true if there
|
||||
// has been a significant update to the prediction.
|
||||
bool update_ihop_prediction(double mutator_time_s,
|
||||
bool this_gc_was_young_only);
|
||||
void report_ihop_statistics();
|
||||
|
||||
G1Predictions _predictor;
|
||||
G1Analytics* _analytics;
|
||||
|
||||
@@ -98,13 +98,13 @@ void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summar
|
||||
|
||||
void G1NewTracer::report_basic_ihop_statistics(size_t threshold,
|
||||
size_t target_ccupancy,
|
||||
size_t current_occupancy,
|
||||
size_t non_young_occupancy,
|
||||
size_t last_allocation_size,
|
||||
double last_allocation_duration,
|
||||
double last_marking_length) {
|
||||
send_basic_ihop_statistics(threshold,
|
||||
target_ccupancy,
|
||||
current_occupancy,
|
||||
non_young_occupancy,
|
||||
last_allocation_size,
|
||||
last_allocation_duration,
|
||||
last_marking_length);
|
||||
@@ -206,7 +206,7 @@ void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) c
|
||||
|
||||
void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
|
||||
size_t target_occupancy,
|
||||
size_t current_occupancy,
|
||||
size_t non_young_occupancy,
|
||||
size_t last_allocation_size,
|
||||
double last_allocation_duration,
|
||||
double last_marking_length) {
|
||||
@@ -216,7 +216,7 @@ void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
|
||||
evt.set_threshold(threshold);
|
||||
evt.set_targetOccupancy(target_occupancy);
|
||||
evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
|
||||
evt.set_currentOccupancy(current_occupancy);
|
||||
evt.set_currentOccupancy(non_young_occupancy);
|
||||
evt.set_recentMutatorAllocationSize(last_allocation_size);
|
||||
evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
|
||||
evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
|
||||
|
||||
@@ -73,13 +73,13 @@ private:
|
||||
|
||||
void send_basic_ihop_statistics(size_t threshold,
|
||||
size_t target_occupancy,
|
||||
size_t current_occupancy,
|
||||
size_t non_young_occupancy,
|
||||
size_t last_allocation_size,
|
||||
double last_allocation_duration,
|
||||
double last_marking_length);
|
||||
void send_adaptive_ihop_statistics(size_t threshold,
|
||||
size_t internal_target_occupancy,
|
||||
size_t current_occupancy,
|
||||
size_t non_young_occupancy,
|
||||
size_t additional_buffer_size,
|
||||
double predicted_allocation_rate,
|
||||
double predicted_marking_length,
|
||||
|
||||
@@ -25,47 +25,95 @@
|
||||
#include "gc/g1/g1Arguments.hpp"
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "gc/g1/g1YoungGenSizer.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
|
||||
G1YoungGenSizer::G1YoungGenSizer() : _sizer_kind(SizerDefaults),
|
||||
_use_adaptive_sizing(true), _min_desired_young_length(0), _max_desired_young_length(0) {
|
||||
|
||||
precond(!FLAG_IS_ERGO(NewRatio));
|
||||
precond(!FLAG_IS_ERGO(NewSize));
|
||||
precond(!FLAG_IS_ERGO(MaxNewSize));
|
||||
|
||||
// Figure out compatible young gen sizing policies.
|
||||
// This will either use all default, NewRatio or a combination of NewSize and
|
||||
// MaxNewSize. If both ratio and size is user specified NewRatio will be ignored.
|
||||
|
||||
const bool user_specified_NewRatio = !FLAG_IS_DEFAULT(NewRatio);
|
||||
const bool user_specified_NewSize = !FLAG_IS_DEFAULT(NewSize);
|
||||
const bool user_specified_MaxNewSize = !FLAG_IS_DEFAULT(MaxNewSize);
|
||||
|
||||
// MaxNewSize is updated every time the heap is resized (and when initialized),
|
||||
// as such the value of MaxNewSize is only modified if it is also used by the
|
||||
// young generation sizing. (If MaxNewSize is user specified).
|
||||
|
||||
if (!user_specified_NewRatio && !user_specified_NewSize && !user_specified_MaxNewSize) {
|
||||
// Using Defaults.
|
||||
return;
|
||||
}
|
||||
|
||||
if (user_specified_NewRatio && !user_specified_NewSize && !user_specified_MaxNewSize) {
|
||||
// Using NewRatio.
|
||||
_sizer_kind = SizerNewRatio;
|
||||
_use_adaptive_sizing = false;
|
||||
return;
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewRatio)) {
|
||||
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
|
||||
} else {
|
||||
_sizer_kind = SizerNewRatio;
|
||||
_use_adaptive_sizing = false;
|
||||
return;
|
||||
// NewRatio ignored at this point, issue warning if NewRatio was specified
|
||||
// on the command line.
|
||||
log_warning(gc, ergo)("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
|
||||
}
|
||||
|
||||
assert(!FLAG_IS_DEFAULT(InitialHeapSize), "Initial heap size must be selected");
|
||||
if (user_specified_NewSize && NewSize > InitialHeapSize) {
|
||||
// If user specifed NewSize is larger than the InitialHeapSize truncate the value.
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
log_warning(gc, ergo)("NewSize (%zuk) is greater than the initial heap size (%zuk). "
|
||||
"A new NewSize of %zuk will be used.",
|
||||
NewSize/K, InitialHeapSize/K, InitialHeapSize/K);
|
||||
}
|
||||
FLAG_SET_ERGO(NewSize, InitialHeapSize);
|
||||
}
|
||||
|
||||
assert(!FLAG_IS_DEFAULT(MaxHeapSize), "Max heap size must be selected");
|
||||
if (user_specified_MaxNewSize && MaxNewSize > MaxHeapSize) {
|
||||
// If user specifed MaxNewSize is larger than the MaxHeapSize truncate the value.
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("MaxNewSize (%zuk) greater than the entire heap (%zuk). "
|
||||
"A new MaxNewSize of %zuk will be used.",
|
||||
MaxNewSize/K, MaxHeapSize/K, MaxHeapSize/K);
|
||||
}
|
||||
FLAG_SET_ERGO(MaxNewSize, MaxHeapSize);
|
||||
}
|
||||
|
||||
if (NewSize > MaxNewSize) {
|
||||
// Either NewSize, MaxNewSize or both have been specified and are incompatible.
|
||||
// In either case set MaxNewSize to the value of NewSize.
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
log_warning(gc, ergo)("NewSize (%zuk) is greater than the MaxNewSize (%zuk). "
|
||||
"A new max generation size of %zuk will be used.",
|
||||
log_warning(gc, ergo)("NewSize (%zuk) is greater than MaxNewSize (%zuk). "
|
||||
"A new MaxNewSize of %zuk will be used.",
|
||||
NewSize/K, MaxNewSize/K, NewSize/K);
|
||||
}
|
||||
FLAG_SET_ERGO(MaxNewSize, NewSize);
|
||||
}
|
||||
|
||||
if (FLAG_IS_CMDLINE(NewSize)) {
|
||||
_min_desired_young_length = MAX2((uint) (NewSize / G1HeapRegion::GrainBytes),
|
||||
1U);
|
||||
if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
_max_desired_young_length =
|
||||
MAX2((uint) (MaxNewSize / G1HeapRegion::GrainBytes),
|
||||
1U);
|
||||
_sizer_kind = SizerMaxAndNewSize;
|
||||
_use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
|
||||
} else {
|
||||
_sizer_kind = SizerNewSizeOnly;
|
||||
}
|
||||
} else if (FLAG_IS_CMDLINE(MaxNewSize)) {
|
||||
_max_desired_young_length =
|
||||
MAX2((uint) (MaxNewSize / G1HeapRegion::GrainBytes),
|
||||
1U);
|
||||
if (user_specified_NewSize) {
|
||||
_min_desired_young_length = MAX2((uint)(NewSize / G1HeapRegion::GrainBytes), 1U);
|
||||
}
|
||||
|
||||
if (user_specified_MaxNewSize) {
|
||||
_max_desired_young_length = MAX2((uint)(MaxNewSize / G1HeapRegion::GrainBytes), 1U);
|
||||
}
|
||||
|
||||
if (user_specified_NewSize && user_specified_MaxNewSize) {
|
||||
_sizer_kind = SizerMaxAndNewSize;
|
||||
_use_adaptive_sizing = _min_desired_young_length != _max_desired_young_length;
|
||||
} else if (user_specified_NewSize) {
|
||||
_sizer_kind = SizerNewSizeOnly;
|
||||
} else {
|
||||
postcond(user_specified_MaxNewSize);
|
||||
_sizer_kind = SizerMaxNewSizeOnly;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,19 +180,6 @@ bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
|
||||
return AtomicAccess::cmpxchg(top_addr(), expected_top, obj) == expected_top;
|
||||
}
|
||||
|
||||
// Only used by oldgen allocation.
|
||||
bool MutableSpace::needs_expand(size_t word_size) const {
|
||||
// This method can be invoked either outside of safepoint by java threads or
|
||||
// in safepoint by gc workers. Such accesses are synchronized by holding one
|
||||
// of the following locks.
|
||||
assert(Heap_lock->is_locked() || PSOldGenExpand_lock->is_locked(), "precondition");
|
||||
|
||||
// Holding the lock means end is stable. So while top may be advancing
|
||||
// via concurrent allocations, there is no need to order the reads of top
|
||||
// and end here, unlike in cas_allocate.
|
||||
return pointer_delta(end(), top()) < word_size;
|
||||
}
|
||||
|
||||
void MutableSpace::oop_iterate(OopIterateClosure* cl) {
|
||||
HeapWord* obj_addr = bottom();
|
||||
HeapWord* t = top();
|
||||
|
||||
@@ -127,11 +127,6 @@ public:
|
||||
virtual HeapWord* cas_allocate(size_t word_size);
|
||||
// Optional deallocation. Used in NUMA-allocator.
|
||||
bool cas_deallocate(HeapWord *obj, size_t size);
|
||||
// Return true if this space needs to be expanded in order to satisfy an
|
||||
// allocation request of the indicated size. Concurrent allocations and
|
||||
// resizes may change the result of a later call. Used by oldgen allocator.
|
||||
// precondition: holding PSOldGenExpand_lock if not VM thread
|
||||
bool needs_expand(size_t word_size) const;
|
||||
|
||||
// Iteration.
|
||||
void oop_iterate(OopIterateClosure* cl);
|
||||
|
||||
@@ -370,6 +370,55 @@ void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
PSParallelCompact::invoke(clear_all_soft_refs, should_do_max_compaction);
|
||||
}
|
||||
|
||||
bool ParallelScavengeHeap::should_attempt_young_gc() const {
|
||||
const bool ShouldRunYoungGC = true;
|
||||
const bool ShouldRunFullGC = false;
|
||||
|
||||
if (!_young_gen->to_space()->is_empty()) {
|
||||
log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
|
||||
return ShouldRunFullGC;
|
||||
}
|
||||
|
||||
// Check if the predicted promoted bytes will overflow free space in old-gen.
|
||||
PSAdaptiveSizePolicy* policy = _size_policy;
|
||||
|
||||
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
|
||||
size_t promotion_estimate = MIN2(avg_promoted, _young_gen->used_in_bytes());
|
||||
// Total free size after possible old gen expansion
|
||||
size_t free_in_old_gen_with_expansion = _old_gen->max_gen_size() - _old_gen->used_in_bytes();
|
||||
|
||||
log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
|
||||
(size_t) policy->average_promoted_in_bytes(),
|
||||
(size_t) policy->padded_average_promoted_in_bytes());
|
||||
|
||||
if (promotion_estimate >= free_in_old_gen_with_expansion) {
|
||||
log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
|
||||
promotion_estimate, free_in_old_gen_with_expansion);
|
||||
return ShouldRunFullGC;
|
||||
}
|
||||
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
// Also checking OS has enough free memory to commit and expand old-gen.
|
||||
// Otherwise, the recorded gc-pause-time might be inflated to include time
|
||||
// of OS preparing free memory, resulting in inaccurate young-gen resizing.
|
||||
assert(_old_gen->committed().byte_size() >= _old_gen->used_in_bytes(), "inv");
|
||||
// Use uint64_t instead of size_t for 32bit compatibility.
|
||||
uint64_t free_mem_in_os;
|
||||
if (os::free_memory(free_mem_in_os)) {
|
||||
size_t actual_free = (size_t)MIN2(_old_gen->committed().byte_size() - _old_gen->used_in_bytes() + free_mem_in_os,
|
||||
(uint64_t)SIZE_MAX);
|
||||
if (promotion_estimate > actual_free) {
|
||||
log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
|
||||
promotion_estimate, actual_free);
|
||||
return ShouldRunFullGC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No particular reasons to run full-gc, so young-gc.
|
||||
return ShouldRunYoungGC;
|
||||
}
|
||||
|
||||
static bool check_gc_heap_free_limit(size_t free_bytes, size_t capacity_bytes) {
|
||||
return (free_bytes * 100 / capacity_bytes) < GCHeapFreeLimit;
|
||||
}
|
||||
@@ -403,7 +452,16 @@ bool ParallelScavengeHeap::check_gc_overhead_limit() {
|
||||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
|
||||
#ifdef ASSERT
|
||||
assert(Heap_lock->is_locked(), "precondition");
|
||||
if (is_init_completed()) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
|
||||
assert(Thread::current()->is_VM_thread(), "precondition");
|
||||
} else {
|
||||
assert(Thread::current()->is_Java_thread(), "precondition");
|
||||
assert(Heap_lock->owned_by_self(), "precondition");
|
||||
}
|
||||
#endif
|
||||
|
||||
HeapWord* result = young_gen()->expand_and_allocate(size);
|
||||
|
||||
@@ -507,17 +565,18 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::collect_at_safepoint(bool full) {
|
||||
void ParallelScavengeHeap::collect_at_safepoint(bool is_full) {
|
||||
assert(!GCLocker::is_active(), "precondition");
|
||||
bool clear_soft_refs = GCCause::should_clear_all_soft_refs(_gc_cause);
|
||||
|
||||
if (!full) {
|
||||
bool success = PSScavenge::invoke(clear_soft_refs);
|
||||
if (success) {
|
||||
if (!is_full && should_attempt_young_gc()) {
|
||||
bool young_gc_success = PSScavenge::invoke(clear_soft_refs);
|
||||
if (young_gc_success) {
|
||||
return;
|
||||
}
|
||||
// Upgrade to Full-GC if young-gc fails
|
||||
log_debug(gc, heap)("Upgrade to Full-GC since Young-gc failed.");
|
||||
}
|
||||
|
||||
const bool should_do_max_compaction = false;
|
||||
PSParallelCompact::invoke(clear_soft_refs, should_do_max_compaction);
|
||||
}
|
||||
|
||||
@@ -119,6 +119,9 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
void print_tracing_info() const override;
|
||||
void stop() override {};
|
||||
|
||||
// Returns true if a young GC should be attempted, false if a full GC is preferred.
|
||||
bool should_attempt_young_gc() const;
|
||||
|
||||
public:
|
||||
ParallelScavengeHeap() :
|
||||
CollectedHeap(),
|
||||
@@ -206,7 +209,6 @@ public:
|
||||
|
||||
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
|
||||
|
||||
// Support for System.gc()
|
||||
void collect(GCCause::Cause cause) override;
|
||||
|
||||
void collect_at_safepoint(bool full);
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "gc/shared/spaceDecorator.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
@@ -118,13 +119,22 @@ void PSOldGen::initialize_performance_counters() {
|
||||
}
|
||||
|
||||
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
|
||||
#ifdef ASSERT
|
||||
assert(Heap_lock->is_locked(), "precondition");
|
||||
if (is_init_completed()) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
|
||||
assert(Thread::current()->is_VM_thread(), "precondition");
|
||||
} else {
|
||||
assert(Thread::current()->is_Java_thread(), "precondition");
|
||||
assert(Heap_lock->owned_by_self(), "precondition");
|
||||
}
|
||||
#endif
|
||||
|
||||
if (object_space()->needs_expand(word_size)) {
|
||||
if (pointer_delta(object_space()->end(), object_space()->top()) < word_size) {
|
||||
expand(word_size*HeapWordSize);
|
||||
}
|
||||
|
||||
// Reuse the CAS API even though this is VM thread in safepoint. This method
|
||||
// Reuse the CAS API even though this is in a critical section. This method
|
||||
// is not invoked repeatedly, so the CAS overhead should be negligible.
|
||||
return cas_allocate_noexpand(word_size);
|
||||
}
|
||||
@@ -168,7 +178,7 @@ bool PSOldGen::expand_for_allocate(size_t word_size) {
|
||||
// true until we expand, since we have the lock. Other threads may take
|
||||
// the space we need before we can allocate it, regardless of whether we
|
||||
// expand. That's okay, we'll just try expanding again.
|
||||
if (object_space()->needs_expand(word_size)) {
|
||||
if (pointer_delta(object_space()->end(), object_space()->top()) < word_size) {
|
||||
result = expand(word_size*HeapWordSize);
|
||||
}
|
||||
}
|
||||
@@ -192,10 +202,21 @@ void PSOldGen::try_expand_till_size(size_t target_capacity_bytes) {
|
||||
|
||||
bool PSOldGen::expand(size_t bytes) {
|
||||
#ifdef ASSERT
|
||||
if (!Thread::current()->is_VM_thread()) {
|
||||
assert_lock_strong(PSOldGenExpand_lock);
|
||||
// During startup (is_init_completed() == false), expansion can occur for
|
||||
// 1. java-threads invoking heap-allocation (using Heap_lock)
|
||||
// 2. CDS construction by a single thread (using PSOldGenExpand_lock but not needed)
|
||||
//
|
||||
// After startup (is_init_completed() == true), expansion can occur for
|
||||
// 1. GC workers for promoting to old-gen (using PSOldGenExpand_lock)
|
||||
// 2. VM thread to satisfy the pending allocation
|
||||
// Both cases are inside safepoint pause, but are never overlapping.
|
||||
//
|
||||
if (is_init_completed()) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
|
||||
assert(Thread::current()->is_VM_thread() || PSOldGenExpand_lock->owned_by_self(), "precondition");
|
||||
} else {
|
||||
assert(Heap_lock->owned_by_self() || PSOldGenExpand_lock->owned_by_self(), "precondition");
|
||||
}
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
assert(bytes > 0, "precondition");
|
||||
#endif
|
||||
const size_t remaining_bytes = virtual_space()->uncommitted_size();
|
||||
|
||||
@@ -313,12 +313,6 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
|
||||
// Check for potential problems.
|
||||
if (!should_attempt_scavenge()) {
|
||||
log_info(gc, ergo)("Young-gc might fail so skipping");
|
||||
return false;
|
||||
}
|
||||
|
||||
IsSTWGCActiveMark mark;
|
||||
|
||||
_gc_timer.register_gc_start();
|
||||
@@ -336,8 +330,7 @@ bool PSScavenge::invoke(bool clear_soft_refs) {
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
PSAdaptiveSizePolicy* size_policy = heap->size_policy();
|
||||
|
||||
assert(young_gen->to_space()->is_empty(),
|
||||
"Attempt to scavenge with live objects in to_space");
|
||||
assert(young_gen->to_space()->is_empty(), "precondition");
|
||||
|
||||
heap->increment_total_collections();
|
||||
|
||||
@@ -520,59 +513,6 @@ void PSScavenge::clean_up_failed_promotion() {
|
||||
NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();)
|
||||
}
|
||||
|
||||
bool PSScavenge::should_attempt_scavenge() {
|
||||
const bool ShouldRunYoungGC = true;
|
||||
const bool ShouldRunFullGC = false;
|
||||
|
||||
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
|
||||
PSYoungGen* young_gen = heap->young_gen();
|
||||
PSOldGen* old_gen = heap->old_gen();
|
||||
|
||||
if (!young_gen->to_space()->is_empty()) {
|
||||
log_debug(gc, ergo)("To-space is not empty; run full-gc instead.");
|
||||
return ShouldRunFullGC;
|
||||
}
|
||||
|
||||
// Check if the predicted promoted bytes will overflow free space in old-gen.
|
||||
PSAdaptiveSizePolicy* policy = heap->size_policy();
|
||||
|
||||
size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes();
|
||||
size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes());
|
||||
// Total free size after possible old gen expansion
|
||||
size_t free_in_old_gen_with_expansion = old_gen->max_gen_size() - old_gen->used_in_bytes();
|
||||
|
||||
log_trace(gc, ergo)("average_promoted %zu; padded_average_promoted %zu",
|
||||
(size_t) policy->average_promoted_in_bytes(),
|
||||
(size_t) policy->padded_average_promoted_in_bytes());
|
||||
|
||||
if (promotion_estimate >= free_in_old_gen_with_expansion) {
|
||||
log_debug(gc, ergo)("Run full-gc; predicted promotion size >= max free space in old-gen: %zu >= %zu",
|
||||
promotion_estimate, free_in_old_gen_with_expansion);
|
||||
return ShouldRunFullGC;
|
||||
}
|
||||
|
||||
if (UseAdaptiveSizePolicy) {
|
||||
// Also checking OS has enough free memory to commit and expand old-gen.
|
||||
// Otherwise, the recorded gc-pause-time might be inflated to include time
|
||||
// of OS preparing free memory, resulting in inaccurate young-gen resizing.
|
||||
assert(old_gen->committed().byte_size() >= old_gen->used_in_bytes(), "inv");
|
||||
// Use uint64_t instead of size_t for 32bit compatibility.
|
||||
uint64_t free_mem_in_os;
|
||||
if (os::free_memory(free_mem_in_os)) {
|
||||
size_t actual_free = (size_t)MIN2(old_gen->committed().byte_size() - old_gen->used_in_bytes() + free_mem_in_os,
|
||||
(uint64_t)SIZE_MAX);
|
||||
if (promotion_estimate > actual_free) {
|
||||
log_debug(gc, ergo)("Run full-gc; predicted promotion size > free space in old-gen and OS: %zu > %zu",
|
||||
promotion_estimate, actual_free);
|
||||
return ShouldRunFullGC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No particular reasons to run full-gc, so young-gc.
|
||||
return ShouldRunYoungGC;
|
||||
}
|
||||
|
||||
// Adaptive size policy support.
|
||||
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
|
||||
_young_generation_boundary = v;
|
||||
|
||||
@@ -64,8 +64,6 @@ class PSScavenge: AllStatic {
|
||||
|
||||
static void clean_up_failed_promotion();
|
||||
|
||||
static bool should_attempt_scavenge();
|
||||
|
||||
// Private accessors
|
||||
static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; }
|
||||
static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
|
||||
|
||||
@@ -139,9 +139,6 @@ public:
|
||||
// Callback from VM_SerialGCCollect.
|
||||
void collect_at_safepoint(bool full);
|
||||
|
||||
// Perform a full collection of the heap; intended for use in implementing
|
||||
// "System.gc". This implies as full a collection as the CollectedHeap
|
||||
// supports. Caller does not hold the Heap_lock on entry.
|
||||
void collect(GCCause::Cause cause) override;
|
||||
|
||||
// Returns "TRUE" iff "p" points into the committed areas of the heap.
|
||||
|
||||
@@ -22,12 +22,11 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
BufferNode::AllocatorConfig::AllocatorConfig(size_t size)
|
||||
: _buffer_capacity(size)
|
||||
{
|
||||
|
||||
@@ -353,9 +353,7 @@ protected:
|
||||
// collection or expansion activity.
|
||||
virtual size_t unsafe_max_tlab_alloc() const = 0;
|
||||
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
// "System.gc". This probably implies as full a collection as the
|
||||
// "CollectedHeap" supports.
|
||||
// Perform a collection of the heap of a type depending on the given cause.
|
||||
virtual void collect(GCCause::Cause cause) = 0;
|
||||
|
||||
// Perform a full collection
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/interfaceSupport.inline.hpp"
|
||||
@@ -122,7 +122,7 @@ OopStorage::ActiveArray::ActiveArray(size_t size) :
|
||||
{}
|
||||
|
||||
OopStorage::ActiveArray::~ActiveArray() {
|
||||
assert(_refcount == 0, "precondition");
|
||||
assert(_refcount.load_relaxed() == 0, "precondition");
|
||||
}
|
||||
|
||||
OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size,
|
||||
@@ -144,32 +144,32 @@ size_t OopStorage::ActiveArray::size() const {
|
||||
}
|
||||
|
||||
size_t OopStorage::ActiveArray::block_count() const {
|
||||
return _block_count;
|
||||
return _block_count.load_relaxed();
|
||||
}
|
||||
|
||||
size_t OopStorage::ActiveArray::block_count_acquire() const {
|
||||
return AtomicAccess::load_acquire(&_block_count);
|
||||
return _block_count.load_acquire();
|
||||
}
|
||||
|
||||
void OopStorage::ActiveArray::increment_refcount() const {
|
||||
int new_value = AtomicAccess::add(&_refcount, 1);
|
||||
assert(new_value >= 1, "negative refcount %d", new_value - 1);
|
||||
int old_value = _refcount.fetch_then_add(1);
|
||||
assert(old_value >= 0, "negative refcount %d", old_value);
|
||||
}
|
||||
|
||||
bool OopStorage::ActiveArray::decrement_refcount() const {
|
||||
int new_value = AtomicAccess::sub(&_refcount, 1);
|
||||
int new_value = _refcount.sub_then_fetch(1);
|
||||
assert(new_value >= 0, "negative refcount %d", new_value);
|
||||
return new_value == 0;
|
||||
}
|
||||
|
||||
bool OopStorage::ActiveArray::push(Block* block) {
|
||||
size_t index = _block_count;
|
||||
size_t index = _block_count.load_relaxed();
|
||||
if (index < _size) {
|
||||
block->set_active_index(index);
|
||||
*block_ptr(index) = block;
|
||||
// Use a release_store to ensure all the setup is complete before
|
||||
// making the block visible.
|
||||
AtomicAccess::release_store(&_block_count, index + 1);
|
||||
_block_count.release_store(index + 1);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@@ -177,19 +177,19 @@ bool OopStorage::ActiveArray::push(Block* block) {
|
||||
}
|
||||
|
||||
void OopStorage::ActiveArray::remove(Block* block) {
|
||||
assert(_block_count > 0, "array is empty");
|
||||
assert(_block_count.load_relaxed() > 0, "array is empty");
|
||||
size_t index = block->active_index();
|
||||
assert(*block_ptr(index) == block, "block not present");
|
||||
size_t last_index = _block_count - 1;
|
||||
size_t last_index = _block_count.load_relaxed() - 1;
|
||||
Block* last_block = *block_ptr(last_index);
|
||||
last_block->set_active_index(index);
|
||||
*block_ptr(index) = last_block;
|
||||
_block_count = last_index;
|
||||
_block_count.store_relaxed(last_index);
|
||||
}
|
||||
|
||||
void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
|
||||
assert(_block_count == 0, "array must be empty");
|
||||
size_t count = from->_block_count;
|
||||
assert(_block_count.load_relaxed() == 0, "array must be empty");
|
||||
size_t count = from->_block_count.load_relaxed();
|
||||
assert(count <= _size, "precondition");
|
||||
Block* const* from_ptr = from->block_ptr(0);
|
||||
Block** to_ptr = block_ptr(0);
|
||||
@@ -198,7 +198,7 @@ void OopStorage::ActiveArray::copy_from(const ActiveArray* from) {
|
||||
assert(block->active_index() == i, "invariant");
|
||||
*to_ptr++ = block;
|
||||
}
|
||||
_block_count = count;
|
||||
_block_count.store_relaxed(count);
|
||||
}
|
||||
|
||||
// Blocks start with an array of BitsPerWord oop entries. That array
|
||||
@@ -230,14 +230,17 @@ OopStorage::Block::Block(const OopStorage* owner, void* memory) :
|
||||
assert(is_aligned(this, block_alignment), "misaligned block");
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
OopStorage::Block::~Block() {
|
||||
assert(_release_refcount == 0, "deleting block while releasing");
|
||||
assert(_deferred_updates_next == nullptr, "deleting block with deferred update");
|
||||
assert(_release_refcount.load_relaxed() == 0, "deleting block while releasing");
|
||||
assert(_deferred_updates_next.load_relaxed() == nullptr, "deleting block with deferred update");
|
||||
// Clear fields used by block_for_ptr and entry validation, which
|
||||
// might help catch bugs. Volatile to prevent dead-store elimination.
|
||||
const_cast<uintx volatile&>(_allocated_bitmask) = 0;
|
||||
// might help catch bugs.
|
||||
_allocated_bitmask.store_relaxed(0);
|
||||
// Volatile to prevent dead-store elimination.
|
||||
const_cast<intptr_t volatile&>(_owner_address) = 0;
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
size_t OopStorage::Block::allocation_size() {
|
||||
// _data must be first member, so aligning Block aligns _data.
|
||||
@@ -272,16 +275,16 @@ uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
|
||||
bool OopStorage::Block::is_safe_to_delete() const {
|
||||
assert(is_empty(), "precondition");
|
||||
OrderAccess::loadload();
|
||||
return (AtomicAccess::load_acquire(&_release_refcount) == 0) &&
|
||||
(AtomicAccess::load_acquire(&_deferred_updates_next) == nullptr);
|
||||
return ((_release_refcount.load_acquire() == 0) &&
|
||||
(_deferred_updates_next.load_acquire() == nullptr));
|
||||
}
|
||||
|
||||
OopStorage::Block* OopStorage::Block::deferred_updates_next() const {
|
||||
return _deferred_updates_next;
|
||||
return _deferred_updates_next.load_relaxed();
|
||||
}
|
||||
|
||||
void OopStorage::Block::set_deferred_updates_next(Block* block) {
|
||||
_deferred_updates_next = block;
|
||||
_deferred_updates_next.store_relaxed(block);
|
||||
}
|
||||
|
||||
bool OopStorage::Block::contains(const oop* ptr) const {
|
||||
@@ -321,9 +324,8 @@ void OopStorage::Block::atomic_add_allocated(uintx add) {
|
||||
// we can use an atomic add to implement the operation. The assert post
|
||||
// facto verifies the precondition held; if there were any set bits in
|
||||
// common, then after the add at least one of them will be zero.
|
||||
uintx sum = AtomicAccess::add(&_allocated_bitmask, add);
|
||||
assert((sum & add) == add, "some already present: %zu:%zu",
|
||||
sum, add);
|
||||
uintx sum = _allocated_bitmask.add_then_fetch(add);
|
||||
assert((sum & add) == add, "some already present: %zu:%zu", sum, add);
|
||||
}
|
||||
|
||||
oop* OopStorage::Block::allocate() {
|
||||
@@ -452,7 +454,7 @@ oop* OopStorage::allocate() {
|
||||
oop* result = block->allocate();
|
||||
assert(result != nullptr, "allocation failed");
|
||||
assert(!block->is_empty(), "postcondition");
|
||||
AtomicAccess::inc(&_allocation_count); // release updates outside lock.
|
||||
_allocation_count.add_then_fetch(1u); // release updates outside lock.
|
||||
if (block->is_full()) {
|
||||
// Transitioning from not full to full.
|
||||
// Remove full blocks from consideration by future allocates.
|
||||
@@ -490,7 +492,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
|
||||
assert(!is_empty_bitmask(taken), "invariant");
|
||||
} // Drop lock, now that we've taken all available entries from block.
|
||||
size_t num_taken = population_count(taken);
|
||||
AtomicAccess::add(&_allocation_count, num_taken);
|
||||
_allocation_count.add_then_fetch(num_taken);
|
||||
// Fill ptrs from those taken entries.
|
||||
size_t limit = MIN2(num_taken, size);
|
||||
for (size_t i = 0; i < limit; ++i) {
|
||||
@@ -506,7 +508,7 @@ size_t OopStorage::allocate(oop** ptrs, size_t size) {
|
||||
assert(size == limit, "invariant");
|
||||
assert(num_taken == (limit + population_count(taken)), "invariant");
|
||||
block->release_entries(taken, this);
|
||||
AtomicAccess::sub(&_allocation_count, num_taken - limit);
|
||||
_allocation_count.sub_then_fetch(num_taken - limit);
|
||||
}
|
||||
log_trace(oopstorage, ref)("%s: bulk allocate %zu, returned %zu",
|
||||
name(), limit, num_taken - limit);
|
||||
@@ -527,9 +529,9 @@ bool OopStorage::try_add_block() {
|
||||
if (block == nullptr) return false;
|
||||
|
||||
// Add new block to the _active_array, growing if needed.
|
||||
if (!_active_array->push(block)) {
|
||||
if (!_active_array.load_relaxed()->push(block)) {
|
||||
if (expand_active_array()) {
|
||||
guarantee(_active_array->push(block), "push failed after expansion");
|
||||
guarantee(_active_array.load_relaxed()->push(block), "push failed after expansion");
|
||||
} else {
|
||||
log_debug(oopstorage, blocks)("%s: failed active array expand", name());
|
||||
Block::delete_block(*block);
|
||||
@@ -576,7 +578,7 @@ OopStorage::Block* OopStorage::block_for_allocation() {
|
||||
// indicate allocation failure.
|
||||
bool OopStorage::expand_active_array() {
|
||||
assert_lock_strong(_allocation_mutex);
|
||||
ActiveArray* old_array = _active_array;
|
||||
ActiveArray* old_array = _active_array.load_relaxed();
|
||||
size_t new_size = 2 * old_array->size();
|
||||
log_debug(oopstorage, blocks)("%s: expand active array %zu",
|
||||
name(), new_size);
|
||||
@@ -599,7 +601,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
|
||||
// Update new_array refcount to account for the new reference.
|
||||
new_array->increment_refcount();
|
||||
// Install new_array, ensuring its initialization is complete first.
|
||||
AtomicAccess::release_store(&_active_array, new_array);
|
||||
_active_array.release_store(new_array);
|
||||
// Wait for any readers that could read the old array from _active_array.
|
||||
// Can't use GlobalCounter here, because this is called from allocate(),
|
||||
// which may be called in the scope of a GlobalCounter critical section
|
||||
@@ -617,7 +619,7 @@ void OopStorage::replace_active_array(ActiveArray* new_array) {
|
||||
// using it.
|
||||
OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
|
||||
SingleWriterSynchronizer::CriticalSection cs(&_protect_active);
|
||||
ActiveArray* result = AtomicAccess::load_acquire(&_active_array);
|
||||
ActiveArray* result = _active_array.load_acquire();
|
||||
result->increment_refcount();
|
||||
return result;
|
||||
}
|
||||
@@ -625,7 +627,7 @@ OopStorage::ActiveArray* OopStorage::obtain_active_array() const {
|
||||
// Decrement refcount of array and destroy if refcount is zero.
|
||||
void OopStorage::relinquish_block_array(ActiveArray* array) const {
|
||||
if (array->decrement_refcount()) {
|
||||
assert(array != _active_array, "invariant");
|
||||
assert(array != _active_array.load_relaxed(), "invariant");
|
||||
ActiveArray::destroy(array);
|
||||
}
|
||||
}
|
||||
@@ -672,14 +674,14 @@ static void log_release_transitions(uintx releasing,
|
||||
void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
assert(releasing != 0, "preconditon");
|
||||
// Prevent empty block deletion when transitioning to empty.
|
||||
AtomicAccess::inc(&_release_refcount);
|
||||
_release_refcount.add_then_fetch(1u);
|
||||
|
||||
// Atomically update allocated bitmask.
|
||||
uintx old_allocated = _allocated_bitmask;
|
||||
uintx old_allocated = _allocated_bitmask.load_relaxed();
|
||||
while (true) {
|
||||
assert((releasing & ~old_allocated) == 0, "releasing unallocated entries");
|
||||
uintx new_value = old_allocated ^ releasing;
|
||||
uintx fetched = AtomicAccess::cmpxchg(&_allocated_bitmask, old_allocated, new_value);
|
||||
uintx fetched = _allocated_bitmask.compare_exchange(old_allocated, new_value);
|
||||
if (fetched == old_allocated) break; // Successful update.
|
||||
old_allocated = fetched; // Retry with updated bitmask.
|
||||
}
|
||||
@@ -698,12 +700,12 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
// then someone else has made such a claim and the deferred update has not
|
||||
// yet been processed and will include our change, so we don't need to do
|
||||
// anything further.
|
||||
if (AtomicAccess::replace_if_null(&_deferred_updates_next, this)) {
|
||||
if (_deferred_updates_next.compare_exchange(nullptr, this) == nullptr) {
|
||||
// Successfully claimed. Push, with self-loop for end-of-list.
|
||||
Block* head = owner->_deferred_updates;
|
||||
Block* head = owner->_deferred_updates.load_relaxed();
|
||||
while (true) {
|
||||
_deferred_updates_next = (head == nullptr) ? this : head;
|
||||
Block* fetched = AtomicAccess::cmpxchg(&owner->_deferred_updates, head, this);
|
||||
_deferred_updates_next.store_relaxed((head == nullptr) ? this : head);
|
||||
Block* fetched = owner->_deferred_updates.compare_exchange(head, this);
|
||||
if (fetched == head) break; // Successful update.
|
||||
head = fetched; // Retry with updated head.
|
||||
}
|
||||
@@ -720,7 +722,7 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
|
||||
}
|
||||
}
|
||||
// Release hold on empty block deletion.
|
||||
AtomicAccess::dec(&_release_refcount);
|
||||
_release_refcount.sub_then_fetch(1u);
|
||||
}
|
||||
|
||||
// Process one available deferred update. Returns true if one was processed.
|
||||
@@ -729,13 +731,13 @@ bool OopStorage::reduce_deferred_updates() {
|
||||
// Atomically pop a block off the list, if any available.
|
||||
// No ABA issue because this is only called by one thread at a time.
|
||||
// The atomicity is wrto pushes by release().
|
||||
Block* block = AtomicAccess::load_acquire(&_deferred_updates);
|
||||
Block* block = _deferred_updates.load_acquire();
|
||||
while (true) {
|
||||
if (block == nullptr) return false;
|
||||
// Try atomic pop of block from list.
|
||||
Block* tail = block->deferred_updates_next();
|
||||
if (block == tail) tail = nullptr; // Handle self-loop end marker.
|
||||
Block* fetched = AtomicAccess::cmpxchg(&_deferred_updates, block, tail);
|
||||
Block* fetched = _deferred_updates.compare_exchange(block, tail);
|
||||
if (fetched == block) break; // Update successful.
|
||||
block = fetched; // Retry with updated block.
|
||||
}
|
||||
@@ -780,7 +782,7 @@ void OopStorage::release(const oop* ptr) {
|
||||
assert(block != nullptr, "%s: invalid release " PTR_FORMAT, name(), p2i(ptr));
|
||||
log_trace(oopstorage, ref)("%s: releasing " PTR_FORMAT, name(), p2i(ptr));
|
||||
block->release_entries(block->bitmask_for_entry(ptr), this);
|
||||
AtomicAccess::dec(&_allocation_count);
|
||||
_allocation_count.sub_then_fetch(1u);
|
||||
}
|
||||
|
||||
void OopStorage::release(const oop* const* ptrs, size_t size) {
|
||||
@@ -806,7 +808,7 @@ void OopStorage::release(const oop* const* ptrs, size_t size) {
|
||||
}
|
||||
// Release the contiguous entries that are in block.
|
||||
block->release_entries(releasing, this);
|
||||
AtomicAccess::sub(&_allocation_count, count);
|
||||
_allocation_count.sub_then_fetch(count);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -837,7 +839,7 @@ OopStorage::OopStorage(const char* name, MemTag mem_tag) :
|
||||
_mem_tag(mem_tag),
|
||||
_needs_cleanup(false)
|
||||
{
|
||||
_active_array->increment_refcount();
|
||||
_active_array.load_relaxed()->increment_refcount();
|
||||
assert(_active_mutex->rank() < _allocation_mutex->rank(),
|
||||
"%s: active_mutex must have lower rank than allocation_mutex", _name);
|
||||
assert(Service_lock->rank() < _active_mutex->rank(),
|
||||
@@ -852,20 +854,21 @@ void OopStorage::delete_empty_block(const Block& block) {
|
||||
|
||||
OopStorage::~OopStorage() {
|
||||
Block* block;
|
||||
while ((block = _deferred_updates) != nullptr) {
|
||||
_deferred_updates = block->deferred_updates_next();
|
||||
while ((block = _deferred_updates.load_relaxed()) != nullptr) {
|
||||
_deferred_updates.store_relaxed(block->deferred_updates_next());
|
||||
block->set_deferred_updates_next(nullptr);
|
||||
}
|
||||
while ((block = _allocation_list.head()) != nullptr) {
|
||||
_allocation_list.unlink(*block);
|
||||
}
|
||||
bool unreferenced = _active_array->decrement_refcount();
|
||||
ActiveArray* array = _active_array.load_relaxed();
|
||||
bool unreferenced = array->decrement_refcount();
|
||||
assert(unreferenced, "deleting storage while _active_array is referenced");
|
||||
for (size_t i = _active_array->block_count(); 0 < i; ) {
|
||||
block = _active_array->at(--i);
|
||||
for (size_t i = array->block_count(); 0 < i; ) {
|
||||
block = array->at(--i);
|
||||
Block::delete_block(*block);
|
||||
}
|
||||
ActiveArray::destroy(_active_array);
|
||||
ActiveArray::destroy(array);
|
||||
os::free(const_cast<char*>(_name));
|
||||
}
|
||||
|
||||
@@ -894,7 +897,7 @@ bool OopStorage::should_report_num_dead() const {
|
||||
// face of frequent explicit ServiceThread wakeups, hence the defer period.
|
||||
|
||||
// Global cleanup request state.
|
||||
static volatile bool needs_cleanup_requested = false;
|
||||
static Atomic<bool> needs_cleanup_requested{false};
|
||||
|
||||
// Time after which a cleanup is permitted.
|
||||
static jlong cleanup_permit_time = 0;
|
||||
@@ -906,12 +909,11 @@ const jlong cleanup_defer_period = 500 * NANOSECS_PER_MILLISEC;
|
||||
bool OopStorage::has_cleanup_work_and_reset() {
|
||||
assert_lock_strong(Service_lock);
|
||||
|
||||
if (AtomicAccess::load_acquire(&needs_cleanup_requested) &&
|
||||
os::javaTimeNanos() > cleanup_permit_time) {
|
||||
cleanup_permit_time =
|
||||
os::javaTimeNanos() + cleanup_defer_period;
|
||||
if (needs_cleanup_requested.load_acquire() &&
|
||||
(os::javaTimeNanos() > cleanup_permit_time)) {
|
||||
cleanup_permit_time = os::javaTimeNanos() + cleanup_defer_period;
|
||||
// Set the request flag false and return its old value.
|
||||
AtomicAccess::release_store(&needs_cleanup_requested, false);
|
||||
needs_cleanup_requested.release_store(false);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
@@ -923,22 +925,22 @@ bool OopStorage::has_cleanup_work_and_reset() {
|
||||
void OopStorage::record_needs_cleanup() {
|
||||
// Set local flag first, else ServiceThread could wake up and miss
|
||||
// the request.
|
||||
AtomicAccess::release_store(&_needs_cleanup, true);
|
||||
AtomicAccess::release_store_fence(&needs_cleanup_requested, true);
|
||||
_needs_cleanup.release_store(true);
|
||||
needs_cleanup_requested.release_store_fence(true);
|
||||
}
|
||||
|
||||
bool OopStorage::delete_empty_blocks() {
|
||||
// ServiceThread might have oopstorage work, but not for this object.
|
||||
// But check for deferred updates, which might provide cleanup work.
|
||||
if (!AtomicAccess::load_acquire(&_needs_cleanup) &&
|
||||
(AtomicAccess::load_acquire(&_deferred_updates) == nullptr)) {
|
||||
if (!_needs_cleanup.load_acquire() &&
|
||||
(_deferred_updates.load_acquire() == nullptr)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
// Clear the request before processing.
|
||||
AtomicAccess::release_store_fence(&_needs_cleanup, false);
|
||||
_needs_cleanup.release_store_fence(false);
|
||||
|
||||
// Other threads could be adding to the empty block count or the
|
||||
// deferred update list while we're working. Set an upper bound on
|
||||
@@ -977,7 +979,7 @@ bool OopStorage::delete_empty_blocks() {
|
||||
// but don't re-notify, to avoid useless spinning of the
|
||||
// ServiceThread. Instead, iteration completion notifies.
|
||||
if (_concurrent_iteration_count > 0) return true;
|
||||
_active_array->remove(block);
|
||||
_active_array.load_relaxed()->remove(block);
|
||||
}
|
||||
// Remove block from _allocation_list and delete it.
|
||||
_allocation_list.unlink(*block);
|
||||
@@ -1001,8 +1003,9 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
|
||||
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
|
||||
// Block could be a false positive, so get index carefully.
|
||||
size_t index = Block::active_index_safe(block);
|
||||
if ((index < _active_array->block_count()) &&
|
||||
(block == _active_array->at(index)) &&
|
||||
ActiveArray* array = _active_array.load_relaxed();
|
||||
if ((index < array->block_count()) &&
|
||||
(block == array->at(index)) &&
|
||||
block->contains(ptr)) {
|
||||
if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
|
||||
return ALLOCATED_ENTRY;
|
||||
@@ -1015,7 +1018,7 @@ OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
|
||||
}
|
||||
|
||||
size_t OopStorage::allocation_count() const {
|
||||
return _allocation_count;
|
||||
return _allocation_count.load_relaxed();
|
||||
}
|
||||
|
||||
size_t OopStorage::block_count() const {
|
||||
@@ -1084,7 +1087,7 @@ void OopStorage::BasicParState::update_concurrent_iteration_count(int value) {
|
||||
|
||||
bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
|
||||
data->_processed += data->_segment_end - data->_segment_start;
|
||||
size_t start = AtomicAccess::load_acquire(&_next_block);
|
||||
size_t start = _next_block.load_acquire();
|
||||
if (start >= _block_count) {
|
||||
return finish_iteration(data); // No more blocks available.
|
||||
}
|
||||
@@ -1097,11 +1100,11 @@ bool OopStorage::BasicParState::claim_next_segment(IterationData* data) {
|
||||
size_t max_step = 10;
|
||||
size_t remaining = _block_count - start;
|
||||
size_t step = MIN2(max_step, 1 + (remaining / _estimated_thread_count));
|
||||
// AtomicAccess::add with possible overshoot. This can perform better
|
||||
// Atomic add with possible overshoot. This can perform better
|
||||
// than a CAS loop on some platforms when there is contention.
|
||||
// We can cope with the uncertainty by recomputing start/end from
|
||||
// the result of the add, and dealing with potential overshoot.
|
||||
size_t end = AtomicAccess::add(&_next_block, step);
|
||||
size_t end = _next_block.add_then_fetch(step);
|
||||
// _next_block may have changed, so recompute start from result of add.
|
||||
start = end - step;
|
||||
// _next_block may have changed so much that end has overshot.
|
||||
@@ -1128,15 +1131,15 @@ bool OopStorage::BasicParState::finish_iteration(const IterationData* data) cons
|
||||
}
|
||||
|
||||
size_t OopStorage::BasicParState::num_dead() const {
|
||||
return AtomicAccess::load(&_num_dead);
|
||||
return _num_dead.load_relaxed();
|
||||
}
|
||||
|
||||
void OopStorage::BasicParState::increment_num_dead(size_t num_dead) {
|
||||
AtomicAccess::add(&_num_dead, num_dead);
|
||||
_num_dead.add_then_fetch(num_dead);
|
||||
}
|
||||
|
||||
void OopStorage::BasicParState::report_num_dead() const {
|
||||
_storage->report_num_dead(AtomicAccess::load(&_num_dead));
|
||||
_storage->report_num_dead(_num_dead.load_relaxed());
|
||||
}
|
||||
|
||||
const char* OopStorage::name() const { return _name; }
|
||||
@@ -1164,8 +1167,8 @@ bool OopStorage::Block::print_containing(const oop* addr, outputStream* st) {
|
||||
#ifndef PRODUCT
|
||||
|
||||
void OopStorage::print_on(outputStream* st) const {
|
||||
size_t allocations = _allocation_count;
|
||||
size_t blocks = _active_array->block_count();
|
||||
size_t allocations = _allocation_count.load_relaxed();
|
||||
size_t blocks = _active_array.load_relaxed()->block_count();
|
||||
|
||||
double data_size = section_size * section_count;
|
||||
double alloc_percentage = percent_of((double)allocations, blocks * data_size);
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/singleWriterSynchronizer.hpp"
|
||||
@@ -258,15 +259,15 @@ private:
|
||||
|
||||
private:
|
||||
const char* _name;
|
||||
ActiveArray* _active_array;
|
||||
Atomic<ActiveArray*> _active_array;
|
||||
AllocationList _allocation_list;
|
||||
Block* volatile _deferred_updates;
|
||||
Atomic<Block*> _deferred_updates;
|
||||
Mutex* _allocation_mutex;
|
||||
Mutex* _active_mutex;
|
||||
NumDeadCallback _num_dead_callback;
|
||||
|
||||
// Volatile for racy unlocked accesses.
|
||||
volatile size_t _allocation_count;
|
||||
// Atomic for racy unlocked accesses.
|
||||
Atomic<size_t> _allocation_count;
|
||||
|
||||
// Protection for _active_array.
|
||||
mutable SingleWriterSynchronizer _protect_active;
|
||||
@@ -278,7 +279,7 @@ private:
|
||||
MemTag _mem_tag;
|
||||
|
||||
// Flag indicating this storage object is a candidate for empty block deletion.
|
||||
volatile bool _needs_cleanup;
|
||||
Atomic<bool> _needs_cleanup;
|
||||
|
||||
// Clients construct via "create" factory function.
|
||||
OopStorage(const char* name, MemTag mem_tag);
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/count_trailing_zeros.hpp"
|
||||
@@ -42,8 +43,8 @@ class OopStorage::ActiveArray {
|
||||
friend class OopStorage::TestAccess;
|
||||
|
||||
size_t _size;
|
||||
volatile size_t _block_count;
|
||||
mutable volatile int _refcount;
|
||||
Atomic<size_t> _block_count;
|
||||
mutable Atomic<int> _refcount;
|
||||
// Block* _blocks[1]; // Pseudo flexible array member.
|
||||
|
||||
ActiveArray(size_t size);
|
||||
@@ -104,7 +105,7 @@ inline OopStorage::Block** OopStorage::ActiveArray::block_ptr(size_t index) {
|
||||
}
|
||||
|
||||
inline OopStorage::Block* OopStorage::ActiveArray::at(size_t index) const {
|
||||
assert(index < _block_count, "precondition");
|
||||
assert(index < _block_count.load_relaxed(), "precondition");
|
||||
return *block_ptr(index);
|
||||
}
|
||||
|
||||
@@ -135,16 +136,16 @@ class OopStorage::Block /* No base class, to avoid messing up alignment. */ {
|
||||
oop _data[BitsPerWord];
|
||||
static const unsigned _data_pos = 0; // Position of _data.
|
||||
|
||||
volatile uintx _allocated_bitmask; // One bit per _data element.
|
||||
Atomic<uintx> _allocated_bitmask; // One bit per _data element.
|
||||
intptr_t _owner_address;
|
||||
void* _memory; // Unaligned storage containing block.
|
||||
size_t _active_index;
|
||||
AllocationListEntry _allocation_list_entry;
|
||||
Block* volatile _deferred_updates_next;
|
||||
volatile uintx _release_refcount;
|
||||
Atomic<Block*> _deferred_updates_next;
|
||||
Atomic<uintx> _release_refcount;
|
||||
|
||||
Block(const OopStorage* owner, void* memory);
|
||||
~Block();
|
||||
~Block() NOT_DEBUG(= default);
|
||||
|
||||
void check_index(unsigned index) const;
|
||||
unsigned get_index(const oop* ptr) const;
|
||||
@@ -322,7 +323,7 @@ inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
|
||||
}
|
||||
|
||||
inline uintx OopStorage::Block::allocated_bitmask() const {
|
||||
return _allocated_bitmask;
|
||||
return _allocated_bitmask.load_relaxed();
|
||||
}
|
||||
|
||||
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
|
||||
@@ -366,7 +367,7 @@ inline bool OopStorage::iterate_impl(F f, Storage* storage) {
|
||||
// Propagate const/non-const iteration to the block layer, by using
|
||||
// const or non-const blocks as corresponding to Storage.
|
||||
using BlockPtr = std::conditional_t<std::is_const<Storage>::value, const Block*, Block*>;
|
||||
ActiveArray* blocks = storage->_active_array;
|
||||
ActiveArray* blocks = storage->_active_array.load_relaxed();
|
||||
size_t limit = blocks->block_count();
|
||||
for (size_t i = 0; i < limit; ++i) {
|
||||
BlockPtr block = blocks->at(i);
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
#include "gc/shared/oopStorage.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
@@ -131,10 +132,10 @@ class OopStorage::BasicParState {
|
||||
const OopStorage* _storage;
|
||||
ActiveArray* _active_array;
|
||||
size_t _block_count;
|
||||
volatile size_t _next_block;
|
||||
Atomic<size_t> _next_block;
|
||||
uint _estimated_thread_count;
|
||||
bool _concurrent;
|
||||
volatile size_t _num_dead;
|
||||
Atomic<size_t> _num_dead;
|
||||
|
||||
NONCOPYABLE(BasicParState);
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "gc/shared/partialArrayState.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/arena.hpp"
|
||||
@@ -33,8 +34,6 @@
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
PartialArrayState::PartialArrayState(oop src, oop dst,
|
||||
size_t index, size_t length,
|
||||
size_t initial_refcount)
|
||||
|
||||
@@ -198,10 +198,8 @@ void StringDedup::Processor::run(JavaThread* thread) {
|
||||
void StringDedup::Processor::log_statistics() {
|
||||
_total_stat.add(&_cur_stat);
|
||||
Stat::log_summary(&_cur_stat, &_total_stat);
|
||||
if (log_is_enabled(Debug, stringdedup)) {
|
||||
_cur_stat.log_statistics(false);
|
||||
_total_stat.log_statistics(true);
|
||||
Table::log_statistics();
|
||||
}
|
||||
_cur_stat.emit_statistics(false /* total */);
|
||||
_total_stat.emit_statistics(true /* total */);
|
||||
Table::log_statistics();
|
||||
_cur_stat = Stat{};
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/stringdedup/stringDedupStat.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@@ -91,13 +92,6 @@ static double strdedup_elapsed_param_ms(Tickspan t) {
|
||||
}
|
||||
|
||||
void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_stat) {
|
||||
double total_deduped_bytes_percent = 0.0;
|
||||
|
||||
if (total_stat->_new_bytes > 0) {
|
||||
// Avoid division by zero
|
||||
total_deduped_bytes_percent = percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes);
|
||||
}
|
||||
|
||||
log_info(stringdedup)(
|
||||
"Concurrent String Deduplication "
|
||||
"%zu/" STRDEDUP_BYTES_FORMAT_NS " (new), "
|
||||
@@ -106,7 +100,7 @@ void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_sta
|
||||
STRDEDUP_ELAPSED_FORMAT_MS " of " STRDEDUP_ELAPSED_FORMAT_MS,
|
||||
last_stat->_new, STRDEDUP_BYTES_PARAM(last_stat->_new_bytes),
|
||||
last_stat->_deduped, STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes),
|
||||
total_deduped_bytes_percent,
|
||||
percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes),
|
||||
strdedup_elapsed_param_ms(last_stat->_process_elapsed),
|
||||
strdedup_elapsed_param_ms(last_stat->_active_elapsed));
|
||||
}
|
||||
@@ -208,7 +202,7 @@ void StringDedup::Stat::log_times(const char* prefix) const {
|
||||
}
|
||||
}
|
||||
|
||||
void StringDedup::Stat::log_statistics(bool total) const {
|
||||
void StringDedup::Stat::log_statistics() const {
|
||||
double known_percent = percent_of(_known, _inspected);
|
||||
double known_shared_percent = percent_of(_known_shared, _inspected);
|
||||
double new_percent = percent_of(_new, _inspected);
|
||||
@@ -216,7 +210,6 @@ void StringDedup::Stat::log_statistics(bool total) const {
|
||||
double deduped_bytes_percent = percent_of(_deduped_bytes, _new_bytes);
|
||||
double replaced_percent = percent_of(_replaced, _new);
|
||||
double deleted_percent = percent_of(_deleted, _new);
|
||||
log_times(total ? "Total" : "Last");
|
||||
log_debug(stringdedup)(" Inspected: %12zu", _inspected);
|
||||
log_debug(stringdedup)(" Known: %12zu(%5.1f%%)", _known, known_percent);
|
||||
log_debug(stringdedup)(" Shared: %12zu(%5.1f%%)", _known_shared, known_shared_percent);
|
||||
@@ -229,3 +222,40 @@ void StringDedup::Stat::log_statistics(bool total) const {
|
||||
log_debug(stringdedup)(" Skipped: %zu (dead), %zu (incomplete), %zu (shared)",
|
||||
_skipped_dead, _skipped_incomplete, _skipped_shared);
|
||||
}
|
||||
|
||||
void StringDedup::Stat::emit_statistics(bool total) const {
|
||||
if (log_is_enabled(Debug, stringdedup)) {
|
||||
log_times(total ? "Total" : "Last");
|
||||
log_statistics();
|
||||
}
|
||||
|
||||
if (total) {
|
||||
// Send only JFR events about the last stats
|
||||
return;
|
||||
}
|
||||
|
||||
EventStringDeduplication e;
|
||||
if (e.should_commit()) {
|
||||
e.set_starttime(_active_start);
|
||||
Ticks active_end = _active_start;
|
||||
active_end += _active_elapsed;
|
||||
e.set_endtime(active_end);
|
||||
|
||||
e.set_inspected(_inspected);
|
||||
e.set_known(_known);
|
||||
e.set_shared(_known_shared);
|
||||
e.set_newStrings(_new);
|
||||
e.set_newSize(_new_bytes);
|
||||
e.set_replaced(_replaced);
|
||||
e.set_deleted(_deleted);
|
||||
e.set_deduplicated(_deduped);
|
||||
e.set_deduplicatedSize(_deduped_bytes);
|
||||
e.set_skippedDead(_skipped_dead);
|
||||
e.set_skippedIncomplete(_skipped_incomplete);
|
||||
e.set_skippedShared(_skipped_shared);
|
||||
e.set_processing(_process_elapsed);
|
||||
e.set_tableResize(_resize_table_elapsed);
|
||||
e.set_tableCleanup(_cleanup_table_elapsed);
|
||||
e.commit();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -71,6 +71,7 @@ private:
|
||||
void report_phase_end(const char* phase, Tickspan* elapsed);
|
||||
|
||||
void log_times(const char* prefix) const;
|
||||
void log_statistics() const;
|
||||
|
||||
public:
|
||||
Stat();
|
||||
@@ -148,7 +149,7 @@ public:
|
||||
void report_active_end();
|
||||
|
||||
void add(const Stat* const stat);
|
||||
void log_statistics(bool total) const;
|
||||
void emit_statistics(bool total) const;
|
||||
|
||||
static void log_summary(const Stat* last_stat, const Stat* total_stat);
|
||||
};
|
||||
|
||||
@@ -245,20 +245,20 @@ void StringDedup::Table::num_dead_callback(size_t num_dead) {
|
||||
// Lock while modifying dead count and state.
|
||||
MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
|
||||
|
||||
switch (AtomicAccess::load(&_dead_state)) {
|
||||
switch (_dead_state.load_relaxed()) {
|
||||
case DeadState::good:
|
||||
AtomicAccess::store(&_dead_count, num_dead);
|
||||
_dead_count.store_relaxed(num_dead);
|
||||
break;
|
||||
|
||||
case DeadState::wait1:
|
||||
// Set count first, so dedup thread gets this or a later value if it
|
||||
// sees the good state.
|
||||
AtomicAccess::store(&_dead_count, num_dead);
|
||||
AtomicAccess::release_store(&_dead_state, DeadState::good);
|
||||
_dead_count.store_relaxed(num_dead);
|
||||
_dead_state.release_store(DeadState::good);
|
||||
break;
|
||||
|
||||
case DeadState::wait2:
|
||||
AtomicAccess::release_store(&_dead_state, DeadState::wait1);
|
||||
_dead_state.release_store(DeadState::wait1);
|
||||
break;
|
||||
|
||||
case DeadState::cleaning:
|
||||
@@ -423,8 +423,10 @@ size_t StringDedup::Table::_number_of_entries = 0;
|
||||
size_t StringDedup::Table::_grow_threshold;
|
||||
StringDedup::Table::CleanupState* StringDedup::Table::_cleanup_state = nullptr;
|
||||
bool StringDedup::Table::_need_bucket_shrinking = false;
|
||||
volatile size_t StringDedup::Table::_dead_count = 0;
|
||||
volatile StringDedup::Table::DeadState StringDedup::Table::_dead_state = DeadState::good;
|
||||
Atomic<size_t> StringDedup::Table::_dead_count{};
|
||||
|
||||
Atomic<StringDedup::Table::DeadState>
|
||||
StringDedup::Table::_dead_state{DeadState::good};
|
||||
|
||||
void StringDedup::Table::initialize_storage() {
|
||||
assert(_table_storage == nullptr, "storage already created");
|
||||
@@ -477,19 +479,19 @@ void StringDedup::Table::add(TableValue tv, uint hash_code) {
|
||||
}
|
||||
|
||||
bool StringDedup::Table::is_dead_count_good_acquire() {
|
||||
return AtomicAccess::load_acquire(&_dead_state) == DeadState::good;
|
||||
return _dead_state.load_acquire() == DeadState::good;
|
||||
}
|
||||
|
||||
// Should be consistent with cleanup_start_if_needed.
|
||||
bool StringDedup::Table::is_grow_needed() {
|
||||
return is_dead_count_good_acquire() &&
|
||||
((_number_of_entries - AtomicAccess::load(&_dead_count)) > _grow_threshold);
|
||||
((_number_of_entries - _dead_count.load_relaxed()) > _grow_threshold);
|
||||
}
|
||||
|
||||
// Should be consistent with cleanup_start_if_needed.
|
||||
bool StringDedup::Table::is_dead_entry_removal_needed() {
|
||||
return is_dead_count_good_acquire() &&
|
||||
Config::should_cleanup_table(_number_of_entries, AtomicAccess::load(&_dead_count));
|
||||
Config::should_cleanup_table(_number_of_entries, _dead_count.load_relaxed());
|
||||
}
|
||||
|
||||
StringDedup::Table::TableValue
|
||||
@@ -651,7 +653,7 @@ bool StringDedup::Table::cleanup_start_if_needed(bool grow_only, bool force) {
|
||||
// If dead count is good then we can read it once and use it below
|
||||
// without needing any locking. The recorded count could increase
|
||||
// after the read, but that's okay.
|
||||
size_t dead_count = AtomicAccess::load(&_dead_count);
|
||||
size_t dead_count = _dead_count.load_relaxed();
|
||||
// This assertion depends on dead state tracking. Otherwise, concurrent
|
||||
// reference processing could detect some, but a cleanup operation could
|
||||
// remove them before they are reported.
|
||||
@@ -675,8 +677,8 @@ bool StringDedup::Table::cleanup_start_if_needed(bool grow_only, bool force) {
|
||||
|
||||
void StringDedup::Table::set_dead_state_cleaning() {
|
||||
MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
|
||||
AtomicAccess::store(&_dead_count, size_t(0));
|
||||
AtomicAccess::store(&_dead_state, DeadState::cleaning);
|
||||
_dead_count.store_relaxed(0);
|
||||
_dead_state.store_relaxed(DeadState::cleaning);
|
||||
}
|
||||
|
||||
bool StringDedup::Table::start_resizer(bool grow_only, size_t number_of_entries) {
|
||||
@@ -710,7 +712,7 @@ void StringDedup::Table::cleanup_end() {
|
||||
delete _cleanup_state;
|
||||
_cleanup_state = nullptr;
|
||||
MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
|
||||
AtomicAccess::store(&_dead_state, DeadState::wait2);
|
||||
_dead_state.store_relaxed(DeadState::wait2);
|
||||
}
|
||||
|
||||
void StringDedup::Table::verify() {
|
||||
@@ -728,12 +730,16 @@ void StringDedup::Table::verify() {
|
||||
}
|
||||
|
||||
void StringDedup::Table::log_statistics() {
|
||||
if (!log_is_enabled(Debug, stringdedup)) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t dead_count;
|
||||
int dead_state;
|
||||
{
|
||||
MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
|
||||
dead_count = _dead_count;
|
||||
dead_state = static_cast<int>(_dead_state);
|
||||
dead_count = _dead_count.load_relaxed();
|
||||
dead_state = static_cast<int>(_dead_state.load_relaxed());
|
||||
}
|
||||
log_debug(stringdedup)("Table: %zu values in %zu buckets, %zu dead (%d)",
|
||||
_number_of_entries, _number_of_buckets,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "oops/typeArrayOop.hpp"
|
||||
#include "oops/weakHandle.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@@ -86,9 +87,9 @@ private:
|
||||
static CleanupState* _cleanup_state;
|
||||
static bool _need_bucket_shrinking;
|
||||
// These are always written while holding StringDedup_lock, but may be
|
||||
// read by the dedup thread without holding the lock lock.
|
||||
static volatile size_t _dead_count;
|
||||
static volatile DeadState _dead_state;
|
||||
// read by the dedup thread without holding the lock.
|
||||
static Atomic<size_t> _dead_count;
|
||||
static Atomic<DeadState> _dead_state;
|
||||
|
||||
static uint compute_hash(typeArrayOop obj);
|
||||
static size_t hash_to_index(uint hash_code);
|
||||
|
||||
@@ -37,7 +37,6 @@
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
size_t ThreadLocalAllocBuffer::_max_size = 0;
|
||||
int ThreadLocalAllocBuffer::_reserve_for_allocation_prefetch = 0;
|
||||
unsigned int ThreadLocalAllocBuffer::_target_refills = 0;
|
||||
|
||||
ThreadLocalAllocBuffer::ThreadLocalAllocBuffer() :
|
||||
@@ -225,30 +224,6 @@ void ThreadLocalAllocBuffer::startup_initialization() {
|
||||
// abort during VM initialization.
|
||||
_target_refills = MAX2(_target_refills, 2U);
|
||||
|
||||
#ifdef COMPILER2
|
||||
// If the C2 compiler is present, extra space is needed at the end of
|
||||
// TLABs, otherwise prefetching instructions generated by the C2
|
||||
// compiler will fault (due to accessing memory outside of heap).
|
||||
// The amount of space is the max of the number of lines to
|
||||
// prefetch for array and for instance allocations. (Extra space must be
|
||||
// reserved to accommodate both types of allocations.)
|
||||
//
|
||||
// Only SPARC-specific BIS instructions are known to fault. (Those
|
||||
// instructions are generated if AllocatePrefetchStyle==3 and
|
||||
// AllocatePrefetchInstr==1). To be on the safe side, however,
|
||||
// extra space is reserved for all combinations of
|
||||
// AllocatePrefetchStyle and AllocatePrefetchInstr.
|
||||
//
|
||||
// If the C2 compiler is not present, no space is reserved.
|
||||
|
||||
// +1 for rounding up to next cache line, +1 to be safe
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
|
||||
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
|
||||
(int)HeapWordSize;
|
||||
}
|
||||
#endif
|
||||
|
||||
// During jvm startup, the main thread is initialized
|
||||
// before the heap is initialized. So reinitialize it now.
|
||||
guarantee(Thread::current()->is_Java_thread(), "tlab initialization thread not Java thread");
|
||||
@@ -454,8 +429,7 @@ void ThreadLocalAllocStats::publish() {
|
||||
}
|
||||
|
||||
size_t ThreadLocalAllocBuffer::end_reserve() {
|
||||
size_t reserve_size = CollectedHeap::lab_alignment_reserve();
|
||||
return MAX2(reserve_size, (size_t)_reserve_for_allocation_prefetch);
|
||||
return CollectedHeap::lab_alignment_reserve();
|
||||
}
|
||||
|
||||
const HeapWord* ThreadLocalAllocBuffer::start_relaxed() const {
|
||||
|
||||
@@ -58,7 +58,6 @@ private:
|
||||
size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
|
||||
|
||||
static size_t _max_size; // maximum size of any TLAB
|
||||
static int _reserve_for_allocation_prefetch; // Reserve at the end of the TLAB
|
||||
static unsigned _target_refills; // expected number of refills between GCs
|
||||
|
||||
unsigned _number_of_refills;
|
||||
|
||||
@@ -36,7 +36,7 @@
|
||||
do { \
|
||||
if (FLAG_IS_DEFAULT(name) && (name)) { \
|
||||
log_info(gc)("Heuristics ergonomically sets -XX:-" #name); \
|
||||
FLAG_SET_DEFAULT(name, false); \
|
||||
FLAG_SET_ERGO(name, false); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@@ -44,7 +44,7 @@
|
||||
do { \
|
||||
if (FLAG_IS_DEFAULT(name) && !(name)) { \
|
||||
log_info(gc)("Heuristics ergonomically sets -XX:+" #name); \
|
||||
FLAG_SET_DEFAULT(name, true); \
|
||||
FLAG_SET_ERGO(name, true); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@@ -52,7 +52,7 @@
|
||||
do { \
|
||||
if (FLAG_IS_DEFAULT(name)) { \
|
||||
log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value); \
|
||||
FLAG_SET_DEFAULT(name, value); \
|
||||
FLAG_SET_ERGO(name, value); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logTag.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
void ShenandoahPassiveMode::initialize_flags() const {
|
||||
@@ -38,7 +39,10 @@ void ShenandoahPassiveMode::initialize_flags() const {
|
||||
|
||||
// No need for evacuation reserve with Full GC, only for Degenerated GC.
|
||||
if (!ShenandoahDegeneratedGC) {
|
||||
SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahEvacReserve, 0);
|
||||
if (FLAG_IS_DEFAULT(ShenandoahEvacReserve)) {
|
||||
log_info(gc)("Heuristics sets -XX:ShenandoahEvacReserve=0");
|
||||
FLAG_SET_DEFAULT(ShenandoahEvacReserve, 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Disable known barriers by default.
|
||||
|
||||
@@ -106,9 +106,6 @@ private:
|
||||
size_t _used[UIntNumPartitions];
|
||||
size_t _available[UIntNumPartitions];
|
||||
|
||||
// Measured in bytes.
|
||||
size_t _allocated_since_gc_start[UIntNumPartitions];
|
||||
|
||||
// Some notes:
|
||||
// total_region_counts[p] is _capacity[p] / region_size_bytes
|
||||
// retired_regions[p] is total_region_counts[p] - _region_counts[p]
|
||||
|
||||
@@ -378,24 +378,20 @@ HeapWord* ShenandoahCardCluster::first_object_start(const size_t card_index, con
|
||||
// evacuation phase) of young collections. This is never called
|
||||
// during global collections during marking or update refs..
|
||||
// 4. Every allocation under TAMS updates the object start array.
|
||||
#ifdef ASSERT
|
||||
oop obj = cast_to_oop(p);
|
||||
assert(oopDesc::is_oop(obj), "Should be an object");
|
||||
#ifdef ASSERT
|
||||
#define WALK_FORWARD_IN_BLOCK_START true
|
||||
#else
|
||||
#define WALK_FORWARD_IN_BLOCK_START false
|
||||
#endif // ASSERT
|
||||
while (WALK_FORWARD_IN_BLOCK_START && p + obj->size() < left) {
|
||||
while (p + obj->size() < left) {
|
||||
p += obj->size();
|
||||
obj = cast_to_oop(p);
|
||||
assert(oopDesc::is_oop(obj), "Should be an object");
|
||||
assert(Klass::is_valid(obj->klass()), "Not a valid klass ptr");
|
||||
// Check assumptions in previous block comment if this assert fires
|
||||
guarantee(false, "Should never need forward walk in block start");
|
||||
fatal("Should never need forward walk in block start");
|
||||
}
|
||||
#undef WALK_FORWARD_IN_BLOCK_START
|
||||
assert(p <= left, "p should start at or before left end of card");
|
||||
assert(p + obj->size() > left, "obj should end after left end of card");
|
||||
#endif // ASSERT
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
@@ -27,10 +27,9 @@
|
||||
|
||||
#include "gc/z/zDeferredConstructed.hpp"
|
||||
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "cppstdlib/type_traits.hpp"
|
||||
|
||||
#include <new>
|
||||
|
||||
template <typename T>
|
||||
inline ZDeferredConstructed<T>::ZDeferredConstructed()
|
||||
DEBUG_ONLY(: _initialized(false)) {
|
||||
|
||||
@@ -53,7 +53,8 @@ enum {
|
||||
JMM_VERSION_2 = 0x20020000, // JDK 10
|
||||
JMM_VERSION_3 = 0x20030000, // JDK 14
|
||||
JMM_VERSION_4 = 0x20040000, // JDK 21
|
||||
JMM_VERSION = JMM_VERSION_4
|
||||
JMM_VERSION_5 = 0x20050000, // JDK 26
|
||||
JMM_VERSION = JMM_VERSION_5
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
@@ -81,6 +82,7 @@ typedef enum {
|
||||
JMM_GC_TIME_MS = 9, /* Total accumulated time spent in collection */
|
||||
JMM_GC_COUNT = 10, /* Total number of collections */
|
||||
JMM_JVM_UPTIME_MS = 11, /* The JVM uptime in milliseconds */
|
||||
JMM_TOTAL_GC_CPU_TIME = 12, /* Total accumulated GC CPU time */
|
||||
|
||||
JMM_INTERNAL_ATTRIBUTE_INDEX = 100,
|
||||
JMM_CLASS_LOADED_BYTES = 101, /* Number of bytes loaded instance classes */
|
||||
|
||||
@@ -421,7 +421,9 @@ JVM_END
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host swap memory, not the container value.
|
||||
return os::Linux::host_swap();
|
||||
physical_memory_size_type host_swap = 0;
|
||||
(void)os::Linux::host_swap(host_swap); // Discard return value and treat as no swap
|
||||
return static_cast<jlong>(host_swap);
|
||||
#else
|
||||
physical_memory_size_type total_swap_space = 0;
|
||||
// Return value ignored - defaulting to 0 on failure.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user