mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-06 09:29:38 +01:00
Compare commits
92 Commits
jdk-26+8
...
jbr-23.0.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dbbaa9e632 | ||
|
|
82220915e5 | ||
|
|
cc687e87d9 | ||
|
|
dfbb5e3ffa | ||
|
|
9805cbbd25 | ||
|
|
c5f294b439 | ||
|
|
39aab17724 | ||
|
|
e9d86d853f | ||
|
|
821a3aa53e | ||
|
|
8883b15634 | ||
|
|
e4d80b1f6c | ||
|
|
4ca2e48acb | ||
|
|
5cfaec2df8 | ||
|
|
13e53b8e3d | ||
|
|
5b9ecb1786 | ||
|
|
8f5c6e6b02 | ||
|
|
70ad622bc2 | ||
|
|
ca37a482cc | ||
|
|
ae10055b2c | ||
|
|
4e52320979 | ||
|
|
e0dad6d5ae | ||
|
|
bbbfb7ff61 | ||
|
|
2f60d36848 | ||
|
|
b415b98139 | ||
|
|
90d5b5b4c4 | ||
|
|
653c481d71 | ||
|
|
10b28babe5 | ||
|
|
d383365ea4 | ||
|
|
2723ffa8ed | ||
|
|
a4f938c623 | ||
|
|
b6d0ead93f | ||
|
|
272d11a389 | ||
|
|
58dc4c7e0e | ||
|
|
9d744b0e04 | ||
|
|
4410cdc839 | ||
|
|
9a4fc097e0 | ||
|
|
7040de19bd | ||
|
|
e5fbc631ca | ||
|
|
e78c682142 | ||
|
|
d494c21b9b | ||
|
|
87a29629e3 | ||
|
|
e2db30a534 | ||
|
|
32ed61572c | ||
|
|
62d0ee9cc0 | ||
|
|
b5bf9a6605 | ||
|
|
98fd657cfa | ||
|
|
d7b9454205 | ||
|
|
9449a53217 | ||
|
|
b5fbdb2166 | ||
|
|
2086b0f070 | ||
|
|
d1510505c1 | ||
|
|
ae49182985 | ||
|
|
1ea6456172 | ||
|
|
37ebecec88 | ||
|
|
17858b2f07 | ||
|
|
08c7c38342 | ||
|
|
fa7521b29e | ||
|
|
fbcf6d9c4f | ||
|
|
a124e6e5c7 | ||
|
|
0779f0d668 | ||
|
|
bd66b6b6f9 | ||
|
|
3edf379b67 | ||
|
|
10d81a337d | ||
|
|
1dbad8058b | ||
|
|
215149310c | ||
|
|
23f2c97f4c | ||
|
|
e84e0cdf62 | ||
|
|
2243974d29 | ||
|
|
7e6693aeba | ||
|
|
79dd575113 | ||
|
|
12a61bce8d | ||
|
|
d9dd2d19b0 | ||
|
|
b21d7b23c1 | ||
|
|
867312a7e5 | ||
|
|
a4b49253e3 | ||
|
|
86fcbe09f8 | ||
|
|
48997f54c9 | ||
|
|
4e3bfc926e | ||
|
|
d0b4f9baab | ||
|
|
cb3c45a698 | ||
|
|
63e95d8987 | ||
|
|
10f71f7dd4 | ||
|
|
a7964453cf | ||
|
|
5230786a0d | ||
|
|
378cd12f6b | ||
|
|
d96476d8bd | ||
|
|
1a43190e41 | ||
|
|
90bf3a809a | ||
|
|
b17a1c092f | ||
|
|
9e22b6dec3 | ||
|
|
fdbc2b24d3 | ||
|
|
31696a445c |
@@ -1,7 +1,7 @@
|
||||
[general]
|
||||
project=jdk
|
||||
project=jdk-updates
|
||||
jbs=JDK
|
||||
version=23
|
||||
version=23.0.1
|
||||
|
||||
[checks]
|
||||
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists
|
||||
@@ -9,7 +9,7 @@ warning=issuestitle
|
||||
|
||||
[repository]
|
||||
tags=(?:jdk-(?:[1-9]([0-9]*)(?:\.(?:0|[1-9][0-9]*)){0,4})(?:\+(?:(?:[0-9]+))|(?:-ga)))|(?:jdk[4-9](?:u\d{1,3})?-(?:(?:b\d{2,3})|(?:ga)))|(?:hs\d\d(?:\.\d{1,2})?-b\d\d)
|
||||
branches=
|
||||
branches=.*
|
||||
|
||||
[census]
|
||||
version=0
|
||||
|
||||
@@ -28,12 +28,12 @@
|
||||
|
||||
DEFAULT_VERSION_FEATURE=23
|
||||
DEFAULT_VERSION_INTERIM=0
|
||||
DEFAULT_VERSION_UPDATE=0
|
||||
DEFAULT_VERSION_UPDATE=1
|
||||
DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_EXTRA1=0
|
||||
DEFAULT_VERSION_EXTRA2=0
|
||||
DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2024-09-17
|
||||
DEFAULT_VERSION_DATE=2024-10-15
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=67 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
|
||||
@@ -778,7 +778,7 @@ public class FieldGen {
|
||||
result.appendLine("}");
|
||||
|
||||
result.appendLine("@Override");
|
||||
result.appendLine("protected int mult(long[] a, long[] b, long[] r) {");
|
||||
result.appendLine("protected void mult(long[] a, long[] b, long[] r) {");
|
||||
result.incrIndent();
|
||||
for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) {
|
||||
result.appendIndent();
|
||||
@@ -804,9 +804,6 @@ public class FieldGen {
|
||||
}
|
||||
}
|
||||
result.append(");\n");
|
||||
result.appendIndent();
|
||||
result.append("return 0;");
|
||||
result.appendLine();
|
||||
result.decrIndent();
|
||||
result.appendLine("}");
|
||||
|
||||
@@ -836,7 +833,7 @@ public class FieldGen {
|
||||
// }
|
||||
// }
|
||||
result.appendLine("@Override");
|
||||
result.appendLine("protected int square(long[] a, long[] r) {");
|
||||
result.appendLine("protected void square(long[] a, long[] r) {");
|
||||
result.incrIndent();
|
||||
for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) {
|
||||
result.appendIndent();
|
||||
@@ -877,9 +874,6 @@ public class FieldGen {
|
||||
}
|
||||
}
|
||||
result.append(");\n");
|
||||
result.appendIndent();
|
||||
result.append("return 0;");
|
||||
result.appendLine();
|
||||
result.decrIndent();
|
||||
result.appendLine("}");
|
||||
|
||||
|
||||
@@ -1828,10 +1828,12 @@ enum Nf {
|
||||
// Vector unordered indexed load instructions
|
||||
INSN( vluxei8_v, 0b0000111, 0b000, 0b01, 0b0);
|
||||
INSN(vluxei32_v, 0b0000111, 0b110, 0b01, 0b0);
|
||||
INSN(vluxei64_v, 0b0000111, 0b111, 0b01, 0b0);
|
||||
|
||||
// Vector unordered indexed store instructions
|
||||
INSN( vsuxei8_v, 0b0100111, 0b000, 0b01, 0b0);
|
||||
INSN(vsuxei32_v, 0b0100111, 0b110, 0b01, 0b0);
|
||||
INSN(vsuxei64_v, 0b0100111, 0b111, 0b01, 0b0);
|
||||
|
||||
#undef INSN
|
||||
|
||||
|
||||
@@ -4795,12 +4795,11 @@ instruct vcountTrailingZeros(vReg dst, vReg src) %{
|
||||
|
||||
// ------------------------------ Vector Load Gather ---------------------------
|
||||
|
||||
instruct gather_load(vReg dst, indirect mem, vReg idx) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4 ||
|
||||
type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
|
||||
instruct gather_loadS(vReg dst, indirect mem, vReg idx) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4);
|
||||
match(Set dst (LoadVectorGather mem idx));
|
||||
effect(TEMP_DEF dst);
|
||||
format %{ "gather_load $dst, $mem, $idx" %}
|
||||
format %{ "gather_loadS $dst, $mem, $idx" %}
|
||||
ins_encode %{
|
||||
__ vmv1r_v(as_VectorRegister($dst$$reg), as_VectorRegister($idx$$reg));
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
@@ -4813,12 +4812,28 @@ instruct gather_load(vReg dst, indirect mem, vReg idx) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct gather_load_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4 ||
|
||||
type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
|
||||
instruct gather_loadD(vReg dst, indirect mem, vReg idx) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
|
||||
match(Set dst (LoadVectorGather mem idx));
|
||||
effect(TEMP_DEF dst);
|
||||
format %{ "gather_loadD $dst, $mem, $idx" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vzext_vf2(as_VectorRegister($dst$$reg), as_VectorRegister($idx$$reg));
|
||||
__ vsll_vi(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg), (int)sew);
|
||||
__ vluxei64_v(as_VectorRegister($dst$$reg), as_Register($mem$$base),
|
||||
as_VectorRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct gather_loadS_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 4);
|
||||
match(Set dst (LoadVectorGatherMasked mem (Binary idx v0)));
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
format %{ "gather_load_masked $dst, $mem, $idx, $v0\t# KILL $tmp" %}
|
||||
format %{ "gather_loadS_masked $dst, $mem, $idx, $v0\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
__ vmv1r_v(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
@@ -4833,14 +4848,32 @@ instruct gather_load_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vR
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct gather_loadD_masked(vReg dst, indirect mem, vReg idx, vRegMask_V0 v0, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n)) == 8);
|
||||
match(Set dst (LoadVectorGatherMasked mem (Binary idx v0)));
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
format %{ "gather_loadD_masked $dst, $mem, $idx, $v0\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vzext_vf2(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
|
||||
__ vsll_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($tmp$$reg), (int)sew);
|
||||
__ vxor_vv(as_VectorRegister($dst$$reg), as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($dst$$reg));
|
||||
__ vluxei64_v(as_VectorRegister($dst$$reg), as_Register($mem$$base),
|
||||
as_VectorRegister($tmp$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// ------------------------------ Vector Store Scatter -------------------------
|
||||
|
||||
instruct scatter_store(indirect mem, vReg src, vReg idx, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4 ||
|
||||
type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
|
||||
instruct scatter_storeS(indirect mem, vReg src, vReg idx, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4);
|
||||
match(Set mem (StoreVectorScatter mem (Binary src idx)));
|
||||
effect(TEMP tmp);
|
||||
format %{ "scatter_store $mem, $idx, $src\t# KILL $tmp" %}
|
||||
format %{ "scatter_storeS $mem, $idx, $src\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
__ vmv1r_v(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
|
||||
BasicType bt = Matcher::vector_element_basic_type(this, $src);
|
||||
@@ -4853,12 +4886,28 @@ instruct scatter_store(indirect mem, vReg src, vReg idx, vReg tmp) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct scatter_store_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4 ||
|
||||
type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
|
||||
instruct scatter_storeD(indirect mem, vReg src, vReg idx, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
|
||||
match(Set mem (StoreVectorScatter mem (Binary src idx)));
|
||||
effect(TEMP tmp);
|
||||
format %{ "scatter_storeD $mem, $idx, $src\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this, $src);
|
||||
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this, $src));
|
||||
__ vzext_vf2(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
|
||||
__ vsll_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($tmp$$reg), (int)sew);
|
||||
__ vsuxei64_v(as_VectorRegister($src$$reg), as_Register($mem$$base),
|
||||
as_VectorRegister($tmp$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct scatter_storeS_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 4);
|
||||
match(Set mem (StoreVectorScatterMasked mem (Binary src (Binary idx v0))));
|
||||
effect(TEMP tmp);
|
||||
format %{ "scatter_store_masked $mem, $idx, $src, $v0\t# KILL $tmp" %}
|
||||
format %{ "scatter_storeS_masked $mem, $idx, $src, $v0\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
__ vmv1r_v(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
|
||||
BasicType bt = Matcher::vector_element_basic_type(this, $src);
|
||||
@@ -4871,6 +4920,23 @@ instruct scatter_store_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0,
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct scatter_storeD_masked(indirect mem, vReg src, vReg idx, vRegMask_V0 v0, vReg tmp) %{
|
||||
predicate(type2aelembytes(Matcher::vector_element_basic_type(n->in(3)->in(1))) == 8);
|
||||
match(Set mem (StoreVectorScatterMasked mem (Binary src (Binary idx v0))));
|
||||
effect(TEMP tmp);
|
||||
format %{ "scatter_storeD_masked $mem, $idx, $src, $v0\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this, $src);
|
||||
Assembler::SEW sew = Assembler::elemtype_to_sew(bt);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this, $src));
|
||||
__ vzext_vf2(as_VectorRegister($tmp$$reg), as_VectorRegister($idx$$reg));
|
||||
__ vsll_vi(as_VectorRegister($tmp$$reg), as_VectorRegister($tmp$$reg), (int)sew);
|
||||
__ vsuxei64_v(as_VectorRegister($src$$reg), as_Register($mem$$base),
|
||||
as_VectorRegister($tmp$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// ------------------------------ Populate Index to a Vector -------------------
|
||||
|
||||
instruct populateindex(vReg dst, iRegIorL2I src1, iRegIorL2I src2, vReg tmp) %{
|
||||
|
||||
@@ -249,7 +249,6 @@ address StubGenerator::generate_intpoly_montgomeryMult_P256() {
|
||||
const Register tmp = r9;
|
||||
|
||||
montgomeryMultiply(aLimbs, bLimbs, rLimbs, tmp, _masm);
|
||||
__ mov64(rax, 0x1); // Return 1 (Fig. 5, Step 6 [1] skipped in montgomeryMultiply)
|
||||
|
||||
__ leave();
|
||||
__ ret(0);
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "sanitizers/ub.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
@@ -61,6 +62,7 @@ ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
|
||||
|
||||
// Every mapped region is offset by _mapped_heap_delta from its requested address.
|
||||
// See FileMapInfo::heap_region_requested_address().
|
||||
ATTRIBUTE_NO_UBSAN
|
||||
void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
|
||||
assert(!_mapped_heap_relocation_initialized, "only once");
|
||||
if (!UseCompressedOops) {
|
||||
|
||||
@@ -241,9 +241,14 @@ LockedClassesDo::~LockedClassesDo() {
|
||||
|
||||
|
||||
// Iterating over the CLDG needs to be locked because
|
||||
// unloading can remove entries concurrently soon.
|
||||
template <bool keep_alive = true>
|
||||
class ClassLoaderDataGraphIteratorBase : public StackObj {
|
||||
// unloading can remove entries concurrently.
|
||||
// This iterator does not keep the CLD alive.
|
||||
// Any CLD OopHandles (modules, mirrors, resolved refs)
|
||||
// resolved must be treated as no keepalive. And requires
|
||||
// that its CLD's holder is kept alive if they escape the
|
||||
// caller's safepoint or ClassLoaderDataGraph_lock
|
||||
// critical section.
|
||||
class ClassLoaderDataGraph::ClassLoaderDataGraphIterator : public StackObj {
|
||||
ClassLoaderData* _next;
|
||||
Thread* _thread;
|
||||
HandleMark _hm; // clean up handles when this is done.
|
||||
@@ -251,12 +256,8 @@ class ClassLoaderDataGraphIteratorBase : public StackObj {
|
||||
// unless verifying at a safepoint.
|
||||
|
||||
public:
|
||||
ClassLoaderDataGraphIteratorBase() : _next(ClassLoaderDataGraph::_head), _thread(Thread::current()), _hm(_thread) {
|
||||
if (keep_alive) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
} else {
|
||||
assert_at_safepoint();
|
||||
}
|
||||
ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head), _thread(Thread::current()), _hm(_thread) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
}
|
||||
|
||||
ClassLoaderData* get_next() {
|
||||
@@ -266,10 +267,6 @@ public:
|
||||
cld = cld->next();
|
||||
}
|
||||
if (cld != nullptr) {
|
||||
if (keep_alive) {
|
||||
// Keep cld that is being returned alive.
|
||||
Handle(_thread, cld->holder());
|
||||
}
|
||||
_next = cld->next();
|
||||
} else {
|
||||
_next = nullptr;
|
||||
@@ -278,9 +275,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
using ClassLoaderDataGraphIterator = ClassLoaderDataGraphIteratorBase<true /* keep_alive */>;
|
||||
using ClassLoaderDataGraphIteratorNoKeepAlive = ClassLoaderDataGraphIteratorBase<false /* keep_alive */>;
|
||||
|
||||
void ClassLoaderDataGraph::loaded_cld_do(CLDClosure* cl) {
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
@@ -288,13 +282,6 @@ void ClassLoaderDataGraph::loaded_cld_do(CLDClosure* cl) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::loaded_cld_do_no_keepalive(CLDClosure* cl) {
|
||||
ClassLoaderDataGraphIteratorNoKeepAlive iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
cl->do_cld(cld);
|
||||
}
|
||||
}
|
||||
|
||||
// These functions assume that the caller has locked the ClassLoaderDataGraph_lock
|
||||
// if they are not calling the function from a safepoint.
|
||||
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
|
||||
@@ -318,6 +305,16 @@ void ClassLoaderDataGraph::methods_do(void f(Method*)) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::modules_do_keepalive(void f(ModuleEntry*)) {
|
||||
assert_locked_or_safepoint(Module_lock);
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
// Keep the holder alive.
|
||||
(void)cld->holder();
|
||||
cld->modules_do(f);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::modules_do(void f(ModuleEntry*)) {
|
||||
assert_locked_or_safepoint(Module_lock);
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
@@ -334,9 +331,11 @@ void ClassLoaderDataGraph::packages_do(void f(PackageEntry*)) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
|
||||
void ClassLoaderDataGraph::loaded_classes_do_keepalive(KlassClosure* klass_closure) {
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
// Keep the holder alive.
|
||||
(void)cld->holder();
|
||||
cld->loaded_classes_do(klass_closure);
|
||||
}
|
||||
}
|
||||
@@ -346,7 +345,7 @@ void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::verify_dictionary() {
|
||||
ClassLoaderDataGraphIteratorNoKeepAlive iter;
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
if (cld->dictionary() != nullptr) {
|
||||
cld->dictionary()->verify();
|
||||
@@ -354,26 +353,28 @@ void ClassLoaderDataGraph::verify_dictionary() {
|
||||
}
|
||||
}
|
||||
|
||||
#define FOR_ALL_DICTIONARY(X) ClassLoaderDataGraphIterator iter; \
|
||||
while (ClassLoaderData* X = iter.get_next()) \
|
||||
if (X->dictionary() != nullptr)
|
||||
|
||||
void ClassLoaderDataGraph::print_dictionary(outputStream* st) {
|
||||
FOR_ALL_DICTIONARY(cld) {
|
||||
st->print("Dictionary for ");
|
||||
cld->print_value_on(st);
|
||||
st->cr();
|
||||
cld->dictionary()->print_on(st);
|
||||
st->cr();
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData *cld = iter.get_next()) {
|
||||
if (cld->dictionary() != nullptr) {
|
||||
st->print("Dictionary for ");
|
||||
cld->print_value_on(st);
|
||||
st->cr();
|
||||
cld->dictionary()->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::print_table_statistics(outputStream* st) {
|
||||
FOR_ALL_DICTIONARY(cld) {
|
||||
ResourceMark rm; // loader_name_and_id
|
||||
stringStream tempst;
|
||||
tempst.print("System Dictionary for %s class loader", cld->loader_name_and_id());
|
||||
cld->dictionary()->print_table_statistics(st, tempst.freeze());
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData *cld = iter.get_next()) {
|
||||
if (cld->dictionary() != nullptr) {
|
||||
ResourceMark rm; // loader_name_and_id
|
||||
stringStream tempst;
|
||||
tempst.print("System Dictionary for %s class loader", cld->loader_name_and_id());
|
||||
cld->dictionary()->print_table_statistics(st, tempst.freeze());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -550,7 +551,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::verify() {
|
||||
ClassLoaderDataGraphIteratorNoKeepAlive iter;
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
cld->verify();
|
||||
}
|
||||
|
||||
@@ -37,10 +37,10 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
friend class ClassLoaderDataGraphMetaspaceIterator;
|
||||
friend class ClassLoaderDataGraphKlassIteratorAtomic;
|
||||
friend class ClassLoaderDataGraphKlassIteratorStatic;
|
||||
template <bool keep_alive>
|
||||
friend class ClassLoaderDataGraphIteratorBase;
|
||||
friend class VMStructs;
|
||||
private:
|
||||
class ClassLoaderDataGraphIterator;
|
||||
|
||||
// All CLDs (except unlinked CLDs) can be reached by walking _head->_next->...
|
||||
static ClassLoaderData* volatile _head;
|
||||
|
||||
@@ -71,8 +71,12 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
|
||||
static void always_strong_cld_do(CLDClosure* cl);
|
||||
// Iteration through CLDG not by GC.
|
||||
// All the do suffixed functions do not keep the CLD alive. Any CLD OopHandles
|
||||
// (modules, mirrors, resolved refs) resolved must be treated as no keepalive.
|
||||
// And requires that its CLD's holder is kept alive if they escape the
|
||||
// caller's safepoint or ClassLoaderDataGraph_lock critical section.
|
||||
// The do_keepalive suffixed functions will keep all CLDs alive.
|
||||
static void loaded_cld_do(CLDClosure* cl);
|
||||
static void loaded_cld_do_no_keepalive(CLDClosure* cl);
|
||||
// klass do
|
||||
// Walking classes through the ClassLoaderDataGraph include array classes. It also includes
|
||||
// classes that are allocated but not loaded, classes that have errors, and scratch classes
|
||||
@@ -81,9 +85,10 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void classes_do(KlassClosure* klass_closure);
|
||||
static void classes_do(void f(Klass* const));
|
||||
static void methods_do(void f(Method*));
|
||||
static void modules_do_keepalive(void f(ModuleEntry*));
|
||||
static void modules_do(void f(ModuleEntry*));
|
||||
static void packages_do(void f(PackageEntry*));
|
||||
static void loaded_classes_do(KlassClosure* klass_closure);
|
||||
static void loaded_classes_do_keepalive(KlassClosure* klass_closure);
|
||||
static void classes_unloading_do(void f(Klass* const));
|
||||
static bool do_unloading();
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ void ClassLoaderStatsClosure::addEmptyParents(oop cl) {
|
||||
|
||||
void ClassLoaderStatsVMOperation::doit() {
|
||||
ClassLoaderStatsClosure clsc (_out);
|
||||
ClassLoaderDataGraph::loaded_cld_do_no_keepalive(&clsc);
|
||||
ClassLoaderDataGraph::loaded_cld_do(&clsc);
|
||||
clsc.print();
|
||||
}
|
||||
|
||||
|
||||
@@ -788,6 +788,7 @@ int java_lang_Class::_class_loader_offset;
|
||||
int java_lang_Class::_module_offset;
|
||||
int java_lang_Class::_protection_domain_offset;
|
||||
int java_lang_Class::_component_mirror_offset;
|
||||
int java_lang_Class::_init_lock_offset;
|
||||
int java_lang_Class::_signers_offset;
|
||||
int java_lang_Class::_name_offset;
|
||||
int java_lang_Class::_source_file_offset;
|
||||
@@ -911,6 +912,12 @@ void java_lang_Class::initialize_mirror_fields(Klass* k,
|
||||
Handle protection_domain,
|
||||
Handle classData,
|
||||
TRAPS) {
|
||||
// Allocate a simple java object for a lock.
|
||||
// This needs to be a java object because during class initialization
|
||||
// it can be held across a java call.
|
||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
|
||||
set_init_lock(mirror(), r);
|
||||
|
||||
// Set protection domain also
|
||||
set_protection_domain(mirror(), protection_domain());
|
||||
|
||||
@@ -1132,6 +1139,10 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
|
||||
if (!k->is_array_klass()) {
|
||||
// - local static final fields with initial values were initialized at dump time
|
||||
|
||||
// create the init_lock
|
||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
|
||||
set_init_lock(mirror(), r);
|
||||
|
||||
if (protection_domain.not_null()) {
|
||||
set_protection_domain(mirror(), protection_domain());
|
||||
}
|
||||
@@ -1196,6 +1207,15 @@ oop java_lang_Class::component_mirror(oop java_class) {
|
||||
return java_class->obj_field(_component_mirror_offset);
|
||||
}
|
||||
|
||||
oop java_lang_Class::init_lock(oop java_class) {
|
||||
assert(_init_lock_offset != 0, "must be set");
|
||||
return java_class->obj_field(_init_lock_offset);
|
||||
}
|
||||
void java_lang_Class::set_init_lock(oop java_class, oop init_lock) {
|
||||
assert(_init_lock_offset != 0, "must be set");
|
||||
java_class->obj_field_put(_init_lock_offset, init_lock);
|
||||
}
|
||||
|
||||
objArrayOop java_lang_Class::signers(oop java_class) {
|
||||
assert(_signers_offset != 0, "must be set");
|
||||
return (objArrayOop)java_class->obj_field(_signers_offset);
|
||||
@@ -1415,12 +1435,18 @@ void java_lang_Class::compute_offsets() {
|
||||
InstanceKlass* k = vmClasses::Class_klass();
|
||||
CLASS_FIELDS_DO(FIELD_COMPUTE_OFFSET);
|
||||
|
||||
// Init lock is a C union with component_mirror. Only instanceKlass mirrors have
|
||||
// init_lock and only ArrayKlass mirrors have component_mirror. Since both are oops
|
||||
// GC treats them the same.
|
||||
_init_lock_offset = _component_mirror_offset;
|
||||
|
||||
CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void java_lang_Class::serialize_offsets(SerializeClosure* f) {
|
||||
f->do_bool(&_offsets_computed);
|
||||
f->do_u4((u4*)&_init_lock_offset);
|
||||
|
||||
CLASS_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -226,6 +226,7 @@ class java_lang_Class : AllStatic {
|
||||
static int _static_oop_field_count_offset;
|
||||
|
||||
static int _protection_domain_offset;
|
||||
static int _init_lock_offset;
|
||||
static int _signers_offset;
|
||||
static int _class_loader_offset;
|
||||
static int _module_offset;
|
||||
@@ -240,6 +241,7 @@ class java_lang_Class : AllStatic {
|
||||
static GrowableArray<Klass*>* _fixup_mirror_list;
|
||||
static GrowableArray<Klass*>* _fixup_module_field_list;
|
||||
|
||||
static void set_init_lock(oop java_class, oop init_lock);
|
||||
static void set_protection_domain(oop java_class, oop protection_domain);
|
||||
static void set_class_loader(oop java_class, oop class_loader);
|
||||
static void set_component_mirror(oop java_class, oop comp_mirror);
|
||||
@@ -292,6 +294,10 @@ class java_lang_Class : AllStatic {
|
||||
|
||||
// Support for embedded per-class oops
|
||||
static oop protection_domain(oop java_class);
|
||||
static oop init_lock(oop java_class);
|
||||
static void clear_init_lock(oop java_class) {
|
||||
set_init_lock(java_class, nullptr);
|
||||
}
|
||||
static oop component_mirror(oop java_class);
|
||||
static objArrayOop signers(oop java_class);
|
||||
static void set_signers(oop java_class, objArrayOop signers);
|
||||
|
||||
@@ -177,7 +177,7 @@ class SystemDictionary : AllStatic {
|
||||
|
||||
static void classes_do(MetaspaceClosure* it);
|
||||
// Iterate over all methods in all klasses
|
||||
|
||||
// Will not keep metadata alive. See ClassLoaderDataGraph::methods_do.
|
||||
static void methods_do(void f(Method*));
|
||||
|
||||
// Garbage collection support
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "classfile/stackMapTableFormat.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/verifier.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
@@ -212,6 +213,11 @@ bool Verifier::verify(InstanceKlass* klass, bool should_verify_class, TRAPS) {
|
||||
exception_name == vmSymbols::java_lang_ClassFormatError())) {
|
||||
log_info(verification)("Fail over class verification to old verifier for: %s", klass->external_name());
|
||||
log_info(class, init)("Fail over class verification to old verifier for: %s", klass->external_name());
|
||||
// Exclude any classes that fail over during dynamic dumping
|
||||
if (CDSConfig::is_dumping_dynamic_archive()) {
|
||||
SystemDictionaryShared::warn_excluded(klass, "Failed over class verification while dynamic dumping");
|
||||
SystemDictionaryShared::set_excluded(klass);
|
||||
}
|
||||
message_buffer = NEW_RESOURCE_ARRAY(char, message_buffer_len);
|
||||
exception_message = message_buffer;
|
||||
exception_name = inference_verify(
|
||||
|
||||
@@ -529,8 +529,8 @@ class methodHandle;
|
||||
/* support for sun.security.util.math.intpoly.MontgomeryIntegerPolynomialP256 */ \
|
||||
do_class(sun_security_util_math_intpoly_MontgomeryIntegerPolynomialP256, "sun/security/util/math/intpoly/MontgomeryIntegerPolynomialP256") \
|
||||
do_intrinsic(_intpoly_montgomeryMult_P256, sun_security_util_math_intpoly_MontgomeryIntegerPolynomialP256, intPolyMult_name, intPolyMult_signature, F_R) \
|
||||
do_name(intPolyMult_name, "mult") \
|
||||
do_signature(intPolyMult_signature, "([J[J[J)I") \
|
||||
do_name(intPolyMult_name, "multImpl") \
|
||||
do_signature(intPolyMult_signature, "([J[J[J)V") \
|
||||
\
|
||||
do_class(sun_security_util_math_intpoly_IntegerPolynomial, "sun/security/util/math/intpoly/IntegerPolynomial") \
|
||||
do_intrinsic(_intpoly_assign, sun_security_util_math_intpoly_IntegerPolynomial, intPolyAssign_name, intPolyAssign_signature, F_S) \
|
||||
|
||||
@@ -557,6 +557,7 @@ class SerializeClosure;
|
||||
template(bool_array_signature, "[Z") \
|
||||
template(byte_array_signature, "[B") \
|
||||
template(char_array_signature, "[C") \
|
||||
template(int_array_signature, "[I") \
|
||||
template(runnable_signature, "Ljava/lang/Runnable;") \
|
||||
template(continuation_signature, "Ljdk/internal/vm/Continuation;") \
|
||||
template(continuationscope_signature, "Ljdk/internal/vm/ContinuationScope;") \
|
||||
|
||||
@@ -761,7 +761,7 @@ DirectiveSet* DirectivesStack::getMatchingDirective(const methodHandle& method,
|
||||
if (dir->is_default_directive() || dir->match(method)) {
|
||||
match = dir->get_for(comp);
|
||||
assert(match != nullptr, "Consistency");
|
||||
if (match->EnableOption) {
|
||||
if (match->EnableOption || dir->is_default_directive()) {
|
||||
// The directiveSet for this compile is also enabled -> success
|
||||
dir->inc_refcount();
|
||||
break;
|
||||
|
||||
@@ -172,13 +172,9 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
nmethod* nm = cb->as_nmethod();
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
if (!bs_nm->is_armed(nm)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(!nm->is_osr_method(), "Should not reach here");
|
||||
// Called upon first entry after being armed
|
||||
bool may_enter = bs_nm->nmethod_entry_barrier(nm);
|
||||
assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
|
||||
|
||||
// In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
|
||||
// are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
|
||||
@@ -188,11 +184,11 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
// it can be made conditional on the nmethod_patching_type.
|
||||
OrderAccess::cross_modify_fence();
|
||||
|
||||
// Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
|
||||
// Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
|
||||
// a very rare event.
|
||||
if (DeoptimizeNMethodBarriersALot) {
|
||||
if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
|
||||
static volatile uint32_t counter=0;
|
||||
if (Atomic::add(&counter, 1u) % 3 == 0) {
|
||||
if (Atomic::add(&counter, 1u) % 10 == 0) {
|
||||
may_enter = false;
|
||||
}
|
||||
}
|
||||
@@ -205,15 +201,6 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
}
|
||||
|
||||
bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
|
||||
// This check depends on the invariant that all nmethods that are deoptimized / made not entrant
|
||||
// are NOT disarmed.
|
||||
// This invariant is important because a method can be deoptimized after the method have been
|
||||
// resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
|
||||
// a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
|
||||
if (!is_armed(nm)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(nm->is_osr_method(), "Should not reach here");
|
||||
log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
|
||||
bool result = nmethod_entry_barrier(nm);
|
||||
|
||||
@@ -132,7 +132,7 @@ bool VM_GC_Operation::doit_prologue() {
|
||||
void VM_GC_Operation::doit_epilogue() {
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
if (Universe::has_reference_pending_list()) {
|
||||
Heap_lock->notify_all();
|
||||
}
|
||||
|
||||
@@ -36,13 +36,19 @@
|
||||
#include "runtime/threadWXSetters.inline.hpp"
|
||||
|
||||
bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
|
||||
if (!is_armed(nm)) {
|
||||
// Some other thread got here first and healed the oops
|
||||
// and disarmed the nmethod. No need to continue.
|
||||
return true;
|
||||
}
|
||||
|
||||
ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
|
||||
assert(lock != nullptr, "Must be");
|
||||
ShenandoahReentrantLocker locker(lock);
|
||||
|
||||
if (!is_armed(nm)) {
|
||||
// Some other thread got here first and healed the oops
|
||||
// and disarmed the nmethod.
|
||||
// Some other thread managed to complete while we were
|
||||
// waiting for lock. No need to continue.
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -32,40 +32,49 @@
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
|
||||
// These are inline variants of Thread::SpinAcquire with optional blocking in VM.
|
||||
|
||||
class ShenandoahNoBlockOp : public StackObj {
|
||||
public:
|
||||
ShenandoahNoBlockOp(JavaThread* java_thread) {
|
||||
assert(java_thread == nullptr, "Should not pass anything");
|
||||
}
|
||||
};
|
||||
|
||||
void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
|
||||
Thread* thread = Thread::current();
|
||||
if (allow_block_for_safepoint && thread->is_Java_thread()) {
|
||||
contended_lock_internal<ThreadBlockInVM>(JavaThread::cast(thread));
|
||||
contended_lock_internal<true>(JavaThread::cast(thread));
|
||||
} else {
|
||||
contended_lock_internal<ShenandoahNoBlockOp>(nullptr);
|
||||
contended_lock_internal<false>(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename BlockOp>
|
||||
template<bool ALLOW_BLOCK>
|
||||
void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
|
||||
int ctr = 0;
|
||||
int yields = 0;
|
||||
assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
|
||||
// Spin this much on multi-processor, do not spin on multi-processor.
|
||||
int ctr = os::is_MP() ? 0xFF : 0;
|
||||
// Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
|
||||
while (Atomic::load(&_state) == locked ||
|
||||
Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
|
||||
if ((++ctr & 0xFFF) == 0) {
|
||||
BlockOp block(java_thread);
|
||||
if (yields > 5) {
|
||||
os::naked_short_sleep(1);
|
||||
if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
|
||||
// Lightly contended, spin a little if no safepoint is pending.
|
||||
SpinPause();
|
||||
ctr--;
|
||||
} else if (ALLOW_BLOCK) {
|
||||
ThreadBlockInVM block(java_thread);
|
||||
if (SafepointSynchronize::is_synchronizing()) {
|
||||
// If safepoint is pending, we want to block and allow safepoint to proceed.
|
||||
// Normally, TBIVM above would block us in its destructor.
|
||||
//
|
||||
// But that blocking only happens when TBIVM knows the thread poll is armed.
|
||||
// There is a window between announcing a safepoint and arming the thread poll
|
||||
// during which trying to continuously enter TBIVM is counter-productive.
|
||||
// Under high contention, we may end up going in circles thousands of times.
|
||||
// To avoid it, we wait here until local poll is armed and then proceed
|
||||
// to TBVIM exit for blocking. We do not SpinPause, but yield to let
|
||||
// VM thread to arm the poll sooner.
|
||||
while (SafepointSynchronize::is_synchronizing() &&
|
||||
!SafepointMechanism::local_poll_armed(java_thread)) {
|
||||
os::naked_yield();
|
||||
}
|
||||
} else {
|
||||
os::naked_yield();
|
||||
yields++;
|
||||
}
|
||||
} else {
|
||||
SpinPause();
|
||||
os::naked_yield();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,20 +37,22 @@ private:
|
||||
shenandoah_padding(0);
|
||||
volatile LockState _state;
|
||||
shenandoah_padding(1);
|
||||
volatile Thread* _owner;
|
||||
Thread* volatile _owner;
|
||||
shenandoah_padding(2);
|
||||
|
||||
template<typename BlockOp>
|
||||
template<bool ALLOW_BLOCK>
|
||||
void contended_lock_internal(JavaThread* java_thread);
|
||||
|
||||
public:
|
||||
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
|
||||
|
||||
void lock(bool allow_block_for_safepoint) {
|
||||
assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
|
||||
|
||||
// Try to lock fast, or dive into contended lock handling.
|
||||
if (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
|
||||
if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
|
||||
(Atomic::cmpxchg(&_state, unlocked, locked) != unlocked)) {
|
||||
// 1. Java thread, and there is a pending safepoint. Dive into contended locking
|
||||
// immediately without trying anything else, and block.
|
||||
// 2. Fast lock fails, dive into contended lock handling.
|
||||
contended_lock(allow_block_for_safepoint);
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ void VM_ShenandoahOperation::doit_epilogue() {
|
||||
assert(!ShenandoahHeap::heap()->has_gc_state_changed(), "GC State was not synchronized to java threads.");
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
}
|
||||
|
||||
bool VM_ShenandoahReferenceOperation::doit_prologue() {
|
||||
|
||||
@@ -134,7 +134,7 @@ public:
|
||||
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
}
|
||||
|
||||
bool gc_locked() const {
|
||||
|
||||
@@ -524,6 +524,10 @@ static bool rule_major_allocation_rate(const ZDirectorStats& stats) {
|
||||
}
|
||||
|
||||
static double calculate_young_to_old_worker_ratio(const ZDirectorStats& stats) {
|
||||
if (!stats._old_stats._cycle._is_time_trustable) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
const double young_gc_time = gc_time(stats._young_stats);
|
||||
const double old_gc_time = gc_time(stats._old_stats);
|
||||
const size_t reclaimed_per_young_gc = stats._young_stats._stat_heap._reclaimed_avg;
|
||||
|
||||
@@ -456,7 +456,7 @@ public:
|
||||
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
}
|
||||
|
||||
bool success() const {
|
||||
|
||||
@@ -1810,7 +1810,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
// the interpreter or runtime performs a serialized check of
|
||||
// the relevant ResolvedIndyEntry::method field. This is done by the caller
|
||||
// of this method, via CPC::set_dynamic_call, which uses
|
||||
// a lock to do the final serialization of updates
|
||||
// an ObjectLocker to do the final serialization of updates
|
||||
// to ResolvedIndyEntry state, including method.
|
||||
|
||||
// Log dynamic info to CDS classlist.
|
||||
|
||||
@@ -592,10 +592,13 @@ bool OopMapCache::has_cleanup_work() {
|
||||
return Atomic::load(&_old_entries) != nullptr;
|
||||
}
|
||||
|
||||
void OopMapCache::trigger_cleanup() {
|
||||
if (has_cleanup_work()) {
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
void OopMapCache::try_trigger_cleanup() {
|
||||
// See we can take the lock for the notification without blocking.
|
||||
// This allows triggering the cleanup from GC paths, that can hold
|
||||
// the service lock for e.g. oop iteration in service thread.
|
||||
if (has_cleanup_work() && Service_lock->try_lock_without_rank_check()) {
|
||||
Service_lock->notify_all();
|
||||
Service_lock->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -183,8 +183,8 @@ class OopMapCache : public CHeapObj<mtClass> {
|
||||
// Check if we need to clean up old entries
|
||||
static bool has_cleanup_work();
|
||||
|
||||
// Request cleanup if work is needed
|
||||
static void trigger_cleanup();
|
||||
// Request cleanup if work is needed and notification is currently possible
|
||||
static void try_trigger_cleanup();
|
||||
|
||||
// Clean up the old entries
|
||||
static void cleanup();
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunk.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkRotation.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
||||
@@ -425,3 +426,7 @@ JVM_END
|
||||
JVM_ENTRY_NO_ENV(void, jfr_unregister_stack_filter(JNIEnv* env, jclass jvm, jlong id))
|
||||
JfrStackFilterRegistry::remove(id);
|
||||
JVM_END
|
||||
|
||||
NO_TRANSITION(jlong, jfr_nanos_now(JNIEnv* env, jclass jvm))
|
||||
return JfrChunk::nanos_now();
|
||||
NO_TRANSITION_END
|
||||
|
||||
@@ -165,6 +165,8 @@ jlong JNICALL jfr_register_stack_filter(JNIEnv* env, jclass jvm, jobjectArray cl
|
||||
|
||||
jlong JNICALL jfr_unregister_stack_filter(JNIEnv* env, jclass jvm, jlong id);
|
||||
|
||||
jlong JNICALL jfr_nanos_now(JNIEnv* env, jclass jvm);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -100,7 +100,8 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
|
||||
(char*)"hostTotalSwapMemory", (char*)"()J", (void*) jfr_host_total_swap_memory,
|
||||
(char*)"emitDataLoss", (char*)"(J)V", (void*)jfr_emit_data_loss,
|
||||
(char*)"registerStackFilter", (char*)"([Ljava/lang/String;[Ljava/lang/String;)J", (void*)jfr_register_stack_filter,
|
||||
(char*)"unregisterStackFilter", (char*)"(J)V", (void*)jfr_unregister_stack_filter
|
||||
(char*)"unregisterStackFilter", (char*)"(J)V", (void*)jfr_unregister_stack_filter,
|
||||
(char*)"nanosNow", (char*)"()J", (void*)jfr_nanos_now
|
||||
};
|
||||
|
||||
const size_t method_array_length = sizeof(method) / sizeof(JNINativeMethod);
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/support/jfrMethodLookup.hpp"
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
@@ -272,11 +273,30 @@ static void install_stack_traces(const ObjectSampler* sampler) {
|
||||
iterate_samples(installer);
|
||||
}
|
||||
|
||||
// Resets the blob write states from the previous epoch.
|
||||
static void reset_blob_write_state(const ObjectSampler* sampler, JavaThread* jt) {
|
||||
assert(sampler != nullptr, "invariant");
|
||||
const ObjectSample* sample = sampler->last_resolved();
|
||||
while (sample != nullptr) {
|
||||
if (sample->has_stacktrace()) {
|
||||
sample->stacktrace()->reset_write_state();
|
||||
}
|
||||
if (sample->has_thread()) {
|
||||
sample->thread()->reset_write_state();
|
||||
}
|
||||
if (sample->has_type_set()) {
|
||||
sample->type_set()->reset_write_state();
|
||||
}
|
||||
sample = sample->next();
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
|
||||
assert(sampler != nullptr, "invariant");
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
JavaThread* const thread = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);)
|
||||
reset_blob_write_state(sampler, thread);
|
||||
if (!ObjectSampler::has_unresolved_entry()) {
|
||||
return;
|
||||
}
|
||||
@@ -326,38 +346,34 @@ void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrChe
|
||||
}
|
||||
}
|
||||
|
||||
static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
|
||||
if (reset) {
|
||||
blob->reset_write_state();
|
||||
return;
|
||||
}
|
||||
static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer) {
|
||||
blob->exclusive_write(writer);
|
||||
}
|
||||
|
||||
static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
if (sample->has_type_set()) {
|
||||
write_blob(sample->type_set(), writer, reset);
|
||||
write_blob(sample->type_set(), writer);
|
||||
}
|
||||
}
|
||||
|
||||
static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
assert(sample->has_thread(), "invariant");
|
||||
if (sample->is_virtual_thread() || has_thread_exited(sample->thread_id())) {
|
||||
write_blob(sample->thread(), writer, reset);
|
||||
write_blob(sample->thread(), writer);
|
||||
}
|
||||
}
|
||||
|
||||
static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
if (sample->has_stacktrace()) {
|
||||
write_blob(sample->stacktrace(), writer, reset);
|
||||
write_blob(sample->stacktrace(), writer);
|
||||
}
|
||||
}
|
||||
|
||||
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
assert(sample != nullptr, "invariant");
|
||||
write_stacktrace_blob(sample, writer, reset);
|
||||
write_thread_blob(sample, writer, reset);
|
||||
write_type_set_blob(sample, writer, reset);
|
||||
write_stacktrace_blob(sample, writer);
|
||||
write_thread_blob(sample, writer);
|
||||
write_type_set_blob(sample, writer);
|
||||
}
|
||||
|
||||
class BlobWriter {
|
||||
@@ -365,18 +381,14 @@ class BlobWriter {
|
||||
const ObjectSampler* _sampler;
|
||||
JfrCheckpointWriter& _writer;
|
||||
const jlong _last_sweep;
|
||||
bool _reset;
|
||||
public:
|
||||
BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
|
||||
_sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false) {}
|
||||
_sampler(sampler), _writer(writer), _last_sweep(last_sweep) {}
|
||||
void sample_do(ObjectSample* sample) {
|
||||
if (sample->is_alive_and_older_than(_last_sweep)) {
|
||||
write_blobs(sample, _writer, _reset);
|
||||
write_blobs(sample, _writer);
|
||||
}
|
||||
}
|
||||
void set_reset() {
|
||||
_reset = true;
|
||||
}
|
||||
};
|
||||
|
||||
static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
|
||||
@@ -385,9 +397,6 @@ static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thre
|
||||
JfrCheckpointWriter writer(thread, false);
|
||||
BlobWriter cbw(sampler, writer, last_sweep);
|
||||
iterate_samples(cbw, true);
|
||||
// reset blob write states
|
||||
cbw.set_reset();
|
||||
iterate_samples(cbw, true);
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
|
||||
@@ -403,67 +412,17 @@ void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge
|
||||
}
|
||||
}
|
||||
|
||||
// A linked list of saved type set blobs for the epoch.
|
||||
// The link consist of a reference counted handle.
|
||||
static JfrBlobHandle saved_type_set_blobs;
|
||||
|
||||
static void release_state_for_previous_epoch() {
|
||||
// decrements the reference count and the list is reinitialized
|
||||
saved_type_set_blobs = JfrBlobHandle();
|
||||
}
|
||||
|
||||
class BlobInstaller {
|
||||
public:
|
||||
~BlobInstaller() {
|
||||
release_state_for_previous_epoch();
|
||||
}
|
||||
void sample_do(ObjectSample* sample) {
|
||||
if (!sample->is_dead()) {
|
||||
sample->set_type_set(saved_type_set_blobs);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void install_type_set_blobs() {
|
||||
if (saved_type_set_blobs.valid()) {
|
||||
BlobInstaller installer;
|
||||
iterate_samples(installer);
|
||||
}
|
||||
}
|
||||
|
||||
static void save_type_set_blob(JfrCheckpointWriter& writer) {
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrBlobHandle blob = writer.copy();
|
||||
if (saved_type_set_blobs.valid()) {
|
||||
saved_type_set_blobs->set_next(blob);
|
||||
} else {
|
||||
saved_type_set_blobs = blob;
|
||||
}
|
||||
}
|
||||
|
||||
// This routine has exclusive access to the sampler instance on entry.
|
||||
void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
|
||||
void ObjectSampleCheckpoint::on_type_set(JavaThread* jt) {
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
|
||||
assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
|
||||
if (!ObjectSampler::has_unresolved_entry()) {
|
||||
return;
|
||||
}
|
||||
const ObjectSample* const last = ObjectSampler::sampler()->last();
|
||||
ObjectSample* const last = ObjectSampler::sampler()->last();
|
||||
assert(last != nullptr, "invariant");
|
||||
assert(last != ObjectSampler::sampler()->last_resolved(), "invariant");
|
||||
if (writer.has_data()) {
|
||||
save_type_set_blob(writer);
|
||||
}
|
||||
install_type_set_blobs();
|
||||
JfrReferenceCountedStorage::install(last, ObjectSampler::sampler()->last_resolved());
|
||||
ObjectSampler::sampler()->set_last_resolved(last);
|
||||
}
|
||||
|
||||
// This routine does NOT have exclusive access to the sampler instance on entry.
|
||||
void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (writer.has_data() && ObjectSampler::has_unresolved_entry()) {
|
||||
save_type_set_blob(writer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -50,8 +50,7 @@ class ObjectSampleCheckpoint : AllStatic {
|
||||
static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
|
||||
static void clear();
|
||||
public:
|
||||
static void on_type_set(JfrCheckpointWriter& writer);
|
||||
static void on_type_set_unload(JfrCheckpointWriter& writer);
|
||||
static void on_type_set(JavaThread* jt);
|
||||
static void on_thread_exit(traceid tid);
|
||||
static void on_rotation(const ObjectSampler* sampler);
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -233,7 +233,7 @@ class ObjectSample : public JfrCHeapObj {
|
||||
return _type_set.valid();
|
||||
}
|
||||
|
||||
void set_type_set(const JfrBlobHandle& ref) {
|
||||
void install_type_set(const JfrBlobHandle& ref) {
|
||||
if (_type_set != ref) {
|
||||
if (_type_set.valid()) {
|
||||
_type_set->set_next(ref);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -37,6 +37,7 @@
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/storage/jfrEpochStorage.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
||||
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
|
||||
#include "jfr/support/jfrDeprecationManager.hpp"
|
||||
@@ -589,12 +590,14 @@ void JfrCheckpointManager::clear_type_set() {
|
||||
MutexLocker module_lock(Module_lock);
|
||||
JfrTypeSet::clear(&writer, &leakp_writer);
|
||||
}
|
||||
JfrDeprecationManager::on_type_set(leakp_writer, nullptr, thread);
|
||||
// We placed a blob in the Deprecated subsystem by moving the information
|
||||
// from the leakp writer. For the real writer, the data will not be
|
||||
// committed, because the JFR system is yet to be started.
|
||||
// Therefore, the writer is cancelled before its destructor is run,
|
||||
// to avoid writing unnecessary information into the checkpoint system.
|
||||
JfrAddRefCountedBlob add_blob(leakp_writer);
|
||||
JfrDeprecationManager::on_type_set(nullptr, thread);
|
||||
// We installed a blob in the JfrReferenceCountedStorage subsystem
|
||||
// by moving the information from the leakp writer.
|
||||
// For the real writer, the data will not be committed,
|
||||
// because the JFR system is yet to be started.
|
||||
// Therefore, we cancel the writer before its destructor is run
|
||||
// to avoid writing invalid information into the checkpoint system.
|
||||
writer.cancel();
|
||||
}
|
||||
|
||||
@@ -613,11 +616,11 @@ void JfrCheckpointManager::write_type_set() {
|
||||
MutexLocker module_lock(thread, Module_lock);
|
||||
JfrTypeSet::serialize(&writer, &leakp_writer, false, false);
|
||||
}
|
||||
JfrAddRefCountedBlob add_blob(leakp_writer);
|
||||
if (LeakProfiler::is_running()) {
|
||||
ObjectSampleCheckpoint::on_type_set(leakp_writer);
|
||||
ObjectSampleCheckpoint::on_type_set(thread);
|
||||
}
|
||||
// Place this call after ObjectSampleCheckpoint::on_type_set.
|
||||
JfrDeprecationManager::on_type_set(leakp_writer, _chunkwriter, thread);
|
||||
JfrDeprecationManager::on_type_set(_chunkwriter, thread);
|
||||
}
|
||||
write();
|
||||
}
|
||||
@@ -626,10 +629,7 @@ void JfrCheckpointManager::on_unloading_classes() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
JfrCheckpointWriter writer(Thread::current());
|
||||
JfrTypeSet::on_unloading_classes(&writer);
|
||||
if (LeakProfiler::is_running()) {
|
||||
ObjectSampleCheckpoint::on_type_set_unload(writer);
|
||||
}
|
||||
JfrDeprecationManager::on_type_set_unload(writer);
|
||||
JfrAddRefCountedBlob add_blob(writer, false /* move */, false /* reset */);
|
||||
}
|
||||
|
||||
static size_t flush_type_set(Thread* thread) {
|
||||
|
||||
@@ -54,6 +54,7 @@ struct JfrCheckpointContext {
|
||||
};
|
||||
|
||||
class JfrCheckpointWriter : public JfrCheckpointWriterBase {
|
||||
friend class JfrAddRefCountedBlob;
|
||||
friend class JfrCheckpointManager;
|
||||
friend class JfrDeprecationManager;
|
||||
friend class JfrSerializerRegistration;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,7 +35,7 @@ static const u2 JFR_VERSION_MAJOR = 2;
|
||||
static const u2 JFR_VERSION_MINOR = 1;
|
||||
|
||||
// strictly monotone
|
||||
static jlong nanos_now() {
|
||||
jlong JfrChunk::nanos_now() {
|
||||
static jlong last = 0;
|
||||
|
||||
jlong seconds;
|
||||
@@ -47,8 +47,6 @@ static jlong nanos_now() {
|
||||
const jlong now = seconds * 1000000000 + nanos;
|
||||
if (now > last) {
|
||||
last = now;
|
||||
} else {
|
||||
++last;
|
||||
}
|
||||
return last;
|
||||
}
|
||||
@@ -147,7 +145,7 @@ void JfrChunk::update_start_ticks() {
|
||||
}
|
||||
|
||||
void JfrChunk::update_start_nanos() {
|
||||
const jlong now = nanos_now();
|
||||
const jlong now = JfrChunk::nanos_now();
|
||||
assert(now >= _start_nanos, "invariant");
|
||||
assert(now >= _last_update_nanos, "invariant");
|
||||
_start_nanos = _last_update_nanos = now;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -34,6 +34,8 @@ const u1 PAD = 0;
|
||||
class JfrChunk : public JfrCHeapObj {
|
||||
friend class JfrChunkWriter;
|
||||
friend class JfrChunkHeadWriter;
|
||||
public:
|
||||
static jlong nanos_now();
|
||||
private:
|
||||
char* _path;
|
||||
int64_t _start_ticks;
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/support/jfrDeprecationManager.hpp"
|
||||
|
||||
// Currently only two subsystems use type set blobs. Save a blob only if either has an unresolved entry.
|
||||
static inline bool save_blob_predicate() {
|
||||
return JfrDeprecationManager::has_unresolved_entry() || ObjectSampler::has_unresolved_entry();
|
||||
}
|
||||
|
||||
JfrAddRefCountedBlob::JfrAddRefCountedBlob(JfrCheckpointWriter& writer, bool move /* true */, bool reset /* true */) : _reset(reset) {
|
||||
if (writer.has_data()) {
|
||||
if (save_blob_predicate()) {
|
||||
JfrReferenceCountedStorage::save_blob(writer, move);
|
||||
} else if (move) {
|
||||
writer.cancel();
|
||||
}
|
||||
}
|
||||
DEBUG_ONLY(if (reset) JfrReferenceCountedStorage::set_scope();)
|
||||
}
|
||||
|
||||
JfrAddRefCountedBlob::~JfrAddRefCountedBlob() {
|
||||
if (_reset) {
|
||||
JfrReferenceCountedStorage::reset();
|
||||
}
|
||||
}
|
||||
|
||||
JfrBlobHandle JfrReferenceCountedStorage::_type_sets = JfrBlobHandle();
|
||||
DEBUG_ONLY(bool JfrReferenceCountedStorage::_scope = false;)
|
||||
|
||||
void JfrReferenceCountedStorage::save_blob(JfrCheckpointWriter& writer, bool move /* false */) {
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrBlobHandle blob = move ? writer.move() : writer.copy();
|
||||
if (_type_sets.valid()) {
|
||||
_type_sets->set_next(blob);
|
||||
return;
|
||||
}
|
||||
_type_sets = blob;
|
||||
}
|
||||
|
||||
void JfrReferenceCountedStorage::reset() {
|
||||
assert(_scope, "invariant");
|
||||
if (_type_sets.valid()) {
|
||||
_type_sets = JfrBlobHandle();
|
||||
}
|
||||
DEBUG_ONLY(_scope = false;)
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void JfrReferenceCountedStorage::set_scope() {
|
||||
assert(!_scope, "invariant");
|
||||
_scope = true;
|
||||
}
|
||||
#endif
|
||||
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRREFERENCECOUNTEDSTORAGE_HPP
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFRREFERENCECOUNTEDSTORAGE_HPP
|
||||
|
||||
#include "jfr/utilities/jfrBlob.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class JfrCheckpointWriter;
|
||||
|
||||
// RAII helper class for adding blobs to the storage.
|
||||
class JfrAddRefCountedBlob : public StackObj {
|
||||
private:
|
||||
bool _reset;
|
||||
public:
|
||||
JfrAddRefCountedBlob(JfrCheckpointWriter& writer, bool move = true, bool reset = true);
|
||||
~JfrAddRefCountedBlob();
|
||||
};
|
||||
|
||||
// The debug aid 'scope' implies the proper RAII save construct is placed on stack.
|
||||
// This is a necessary condition for installing reference counted storage to nodes.
|
||||
class JfrReferenceCountedStorage : AllStatic {
|
||||
friend class JfrAddRefCountedBlob;
|
||||
private:
|
||||
static JfrBlobHandle _type_sets; // linked-list of blob handles saved during epoch.
|
||||
DEBUG_ONLY(static bool _scope;)
|
||||
|
||||
static void save_blob(JfrCheckpointWriter& writer, bool move = false);
|
||||
static void reset();
|
||||
DEBUG_ONLY(static void set_scope();)
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
static void install(T* node, const T* end) {
|
||||
assert(_scope, "invariant");
|
||||
if (_type_sets.valid()) {
|
||||
while (node != end) {
|
||||
node->install_type_set(_type_sets);
|
||||
node = node->next();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFRREFERENCECOUNTEDSTORAGE_HPP
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -116,8 +116,8 @@ bool JfrDeprecatedStackTraceWriter::process(const JfrDeprecatedEdge* edge) {
|
||||
return true;
|
||||
}
|
||||
|
||||
JfrDeprecatedEventWriter::JfrDeprecatedEventWriter(JfrChunkWriter& cw, bool stacktrace) :
|
||||
_now(JfrTicks::now()),_cw(cw), _for_removal(only_for_removal()), _stacktrace(stacktrace), _did_write(false) {}
|
||||
JfrDeprecatedEventWriter::JfrDeprecatedEventWriter(JfrChunkWriter& cw, JfrCheckpointWriter& tsw, bool stacktrace) :
|
||||
_now(JfrTicks::now()),_cw(cw), _tsw(tsw), _for_removal(only_for_removal()), _stacktrace(stacktrace) {}
|
||||
|
||||
static size_t calculate_event_size(const JfrDeprecatedEdge* edge, JfrChunkWriter& cw, const JfrTicks& now, bool stacktrace) {
|
||||
assert(edge != nullptr, "invariant");
|
||||
@@ -141,14 +141,31 @@ static void write_event(const JfrDeprecatedEdge* edge, JfrChunkWriter& cw, const
|
||||
cw.write(edge->for_removal());
|
||||
}
|
||||
|
||||
static void write_type_set(const JfrDeprecatedEdge* edge, JfrCheckpointWriter& tsw) {
|
||||
if (!edge->has_type_set()) {
|
||||
return;
|
||||
}
|
||||
edge->type_set()->exclusive_write(tsw);
|
||||
}
|
||||
|
||||
bool JfrDeprecatedEventWriter::process(const JfrDeprecatedEdge* edge) {
|
||||
assert(edge != nullptr, "invariant");
|
||||
if (_for_removal && !edge->for_removal()) {
|
||||
return true;
|
||||
}
|
||||
write_event(edge, _cw,_now, _stacktrace);
|
||||
if (!_did_write) {
|
||||
_did_write = true;
|
||||
}
|
||||
write_event(edge, _cw, _now, _stacktrace);
|
||||
write_type_set(edge, _tsw);
|
||||
return true;
|
||||
}
|
||||
|
||||
JfrDeprecatedEventClear::JfrDeprecatedEventClear() {}
|
||||
|
||||
bool JfrDeprecatedEventClear::process(const JfrDeprecatedEdge* edge) {
|
||||
assert(edge != nullptr, "invariant");
|
||||
if (!edge->has_type_set()) {
|
||||
return true;
|
||||
}
|
||||
edge->type_set()->reset_write_state();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -56,12 +56,17 @@ class JfrDeprecatedEventWriter : public StackObj {
|
||||
private:
|
||||
JfrTicks _now;
|
||||
JfrChunkWriter& _cw;
|
||||
JfrCheckpointWriter& _tsw;
|
||||
bool _for_removal;
|
||||
bool _stacktrace;
|
||||
bool _did_write;
|
||||
public:
|
||||
JfrDeprecatedEventWriter(JfrChunkWriter& cw, bool stacktrace);
|
||||
bool did_write() const { return _did_write; }
|
||||
JfrDeprecatedEventWriter(JfrChunkWriter& cw, JfrCheckpointWriter& tsw, bool stacktrace);
|
||||
bool process(const JfrDeprecatedEdge* edge);
|
||||
};
|
||||
|
||||
class JfrDeprecatedEventClear : public StackObj {
|
||||
public:
|
||||
JfrDeprecatedEventClear();
|
||||
bool process(const JfrDeprecatedEdge* edge);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/support/jfrDeprecationEventWriter.hpp"
|
||||
#include "jfr/support/jfrDeprecationManager.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
@@ -66,6 +67,7 @@ static inline traceid load_traceid(const Method* method) {
|
||||
JfrDeprecatedEdge::JfrDeprecatedEdge(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* jt) :
|
||||
_invocation_time(JfrTicks::now()),
|
||||
_stacktrace(),
|
||||
_type_set(),
|
||||
_next(nullptr),
|
||||
_deprecated_ik(method->method_holder()),
|
||||
_deprecated_methodid(load_traceid(method)),
|
||||
@@ -94,11 +96,25 @@ const JfrBlobHandle& JfrDeprecatedEdge::stacktrace() const {
|
||||
return _stacktrace;
|
||||
}
|
||||
|
||||
bool JfrDeprecatedEdge::has_type_set() const {
|
||||
return _type_set.valid();
|
||||
}
|
||||
|
||||
const JfrBlobHandle& JfrDeprecatedEdge::type_set() const {
|
||||
assert(has_type_set(), "invariant");
|
||||
return _type_set;
|
||||
}
|
||||
|
||||
void JfrDeprecatedEdge::install_type_set(const JfrBlobHandle& type_set) {
|
||||
assert(!has_type_set(), "invariant");
|
||||
_type_set = type_set;
|
||||
}
|
||||
|
||||
typedef JfrLinkedList<JfrDeprecatedEdge> DeprecatedEdgeList;
|
||||
|
||||
static DeprecatedEdgeList _list; // Newly constructed edges are concurrently added to this list.
|
||||
static DeprecatedEdgeList _pending_list; // During epoch rotation (safepoint) entries in _list are moved onto _pending_list
|
||||
static DeprecatedEdgeList _resolved_list; // Fully resolved edges (event and stacktrace blobs).
|
||||
static DeprecatedEdgeList _resolved_list; // Fully resolved edges (event, stacktrace and typeset blobs).
|
||||
|
||||
static JfrDeprecatedEdge* allocate_edge(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* jt) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
|
||||
@@ -225,10 +241,6 @@ static void transfer_list() {
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_level_setting_update(int64_t new_level) {
|
||||
JfrDeprecatedEventWriterState::on_level_setting_update(new_level);
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_safepoint_clear() {
|
||||
assert(!_enqueue_klasses, "invariant");
|
||||
// We are now starting JFR, so begin enqueuing tagged klasses.
|
||||
@@ -270,6 +282,23 @@ static void add_to_leakp_set(const JfrDeprecatedEdge* edge) {
|
||||
static DeprecatedEdgeList::NodePtr _pending_head = nullptr;
|
||||
static DeprecatedEdgeList::NodePtr _pending_tail = nullptr;
|
||||
|
||||
inline DeprecatedEdgeList::NodePtr pending_head() {
|
||||
return Atomic::load(&_pending_head);
|
||||
}
|
||||
|
||||
// The test for a pending head can be read concurrently from a thread doing class unloading.
|
||||
inline static bool has_pending_head() {
|
||||
return pending_head() != nullptr;
|
||||
}
|
||||
|
||||
inline static bool no_pending_head() {
|
||||
return !has_pending_head();
|
||||
}
|
||||
|
||||
inline static void set_pending_head(DeprecatedEdgeList::NodePtr head) {
|
||||
Atomic::store(&_pending_head, head);
|
||||
}
|
||||
|
||||
class PendingListProcessor {
|
||||
private:
|
||||
JfrCheckpointWriter& _writer;
|
||||
@@ -281,66 +310,57 @@ class PendingListProcessor {
|
||||
JfrDeprecatedStackTraceWriter::install_stacktrace_blob(edge, _writer, _jt);
|
||||
assert(edge->has_stacktrace(), "invariant");
|
||||
add_to_leakp_set(edge);
|
||||
if (_pending_head == nullptr) {
|
||||
_pending_head = edge;
|
||||
if (no_pending_head()) {
|
||||
set_pending_head(edge);
|
||||
}
|
||||
_pending_tail = edge;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void JfrDeprecationManager::prepare_type_set(JavaThread* jt) {
|
||||
_pending_head = nullptr;
|
||||
// Resets the pending head and tail.
|
||||
// Resets blob write states for nodes on the resolved list, dirtied in the previous epoch.
|
||||
static void reset_type_set_blobs() {
|
||||
set_pending_head(nullptr);
|
||||
_pending_tail = nullptr;
|
||||
if (_resolved_list.is_nonempty()) {
|
||||
JfrDeprecatedEventClear clear;
|
||||
_resolved_list.iterate(clear);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::prepare_type_set(JavaThread* jt) {
|
||||
reset_type_set_blobs();
|
||||
if (_pending_list.is_nonempty()) {
|
||||
JfrKlassUnloading::sort(true);
|
||||
JfrCheckpointWriter writer(true /* prev epoch */, jt, false /* header */);
|
||||
PendingListProcessor plp(writer, jt);
|
||||
_pending_list.iterate(plp);
|
||||
assert(_pending_head != nullptr, "invariant");
|
||||
assert(has_pending_head(), "invariant");
|
||||
assert(_pending_tail != nullptr, "invariant");
|
||||
assert(_pending_tail->next() == nullptr, "invariant");
|
||||
// Excise already resolved edges to link them.
|
||||
_pending_tail->set_next(_resolved_list.cut());
|
||||
// Re-insertion.
|
||||
_resolved_list.add_list(_pending_head);
|
||||
_resolved_list.add_list(pending_head());
|
||||
_pending_list.clear();
|
||||
}
|
||||
assert(_pending_list.is_empty(), "invariant");
|
||||
}
|
||||
|
||||
// A linked-list of blob handles.
|
||||
static JfrBlobHandle type_set_blobs;
|
||||
|
||||
static inline void write_type_set_blobs(JfrCheckpointWriter& writer) {
|
||||
type_set_blobs->write(writer);
|
||||
}
|
||||
|
||||
static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
|
||||
if (type_set_blobs.valid()) {
|
||||
type_set_blobs->set_next(blob);
|
||||
} else {
|
||||
type_set_blobs = blob;
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_type_set_unload(JfrCheckpointWriter& writer) {
|
||||
if (writer.has_data()) {
|
||||
save_type_set_blob(writer, true);
|
||||
}
|
||||
bool JfrDeprecationManager::has_unresolved_entry() {
|
||||
return _list.is_nonempty() || has_pending_head() || _pending_list.is_nonempty();
|
||||
}
|
||||
|
||||
static inline bool has_stacktrace() {
|
||||
return JfrEventSetting::has_stacktrace(JfrDeprecatedInvocationEvent);
|
||||
}
|
||||
|
||||
static inline bool write_events(JfrChunkWriter& cw) {
|
||||
static inline void write_events(JfrChunkWriter& cw, Thread* thread, bool on_error) {
|
||||
assert(_resolved_list.is_nonempty(), "invariant");
|
||||
JfrDeprecatedEventWriter ebw(cw, has_stacktrace());
|
||||
JfrCheckpointWriter type_set_writer(!on_error, thread, false);
|
||||
JfrDeprecatedEventWriter ebw(cw, type_set_writer, has_stacktrace());
|
||||
_resolved_list.iterate(ebw);
|
||||
return ebw.did_write();
|
||||
}
|
||||
|
||||
static inline void write_stacktraces(JfrChunkWriter& cw) {
|
||||
@@ -349,34 +369,30 @@ static inline void write_stacktraces(JfrChunkWriter& cw) {
|
||||
_resolved_list.iterate(scw);
|
||||
}
|
||||
|
||||
static inline void write_type_sets(Thread* thread, bool on_error) {
|
||||
JfrCheckpointWriter writer(!on_error, thread, false);
|
||||
write_type_set_blobs(writer);
|
||||
}
|
||||
|
||||
// First, we consolidate all stacktrace blobs into a single TYPE_STACKTRACE checkpoint and serialize it to the chunk.
|
||||
// Secondly, we serialize all events to the chunk.
|
||||
// Thirdly, the type set blobs are written into the JfrCheckpoint system, to be serialized to the chunk
|
||||
// just after we return from here.
|
||||
// First, we consolidate all stack trace blobs into a single TYPE_STACKTRACE checkpoint
|
||||
// and serialize it to the chunk. Then, all events are serialized, and unique type set blobs
|
||||
// written into the JfrCheckpoint system to be serialized to the chunk upon return.
|
||||
void JfrDeprecationManager::write_edges(JfrChunkWriter& cw, Thread* thread, bool on_error /* false */) {
|
||||
if (_resolved_list.is_nonempty() && JfrEventSetting::is_enabled(JfrDeprecatedInvocationEvent)) {
|
||||
if (has_stacktrace()) {
|
||||
write_stacktraces(cw);
|
||||
}
|
||||
if (write_events(cw)) {
|
||||
write_type_sets(thread, on_error);
|
||||
}
|
||||
write_events(cw, thread, on_error);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_type_set(JfrCheckpointWriter& writer, JfrChunkWriter* cw, Thread* thread) {
|
||||
void JfrDeprecationManager::on_type_set(JfrChunkWriter* cw, Thread* thread) {
|
||||
assert(_pending_list.is_empty(), "invariant");
|
||||
if (_pending_head != nullptr) {
|
||||
save_type_set_blob(writer);
|
||||
} else {
|
||||
writer.cancel();
|
||||
if (has_pending_head()) {
|
||||
assert(_pending_tail != nullptr, "invariant");
|
||||
// Install type set blobs for the pending, i.e. unresolved nodes.
|
||||
JfrReferenceCountedStorage::install(pending_head(), _pending_tail->next());
|
||||
}
|
||||
if (cw != nullptr) {
|
||||
write_edges(*cw, thread);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_level_setting_update(int64_t new_level) {
|
||||
JfrDeprecatedEventWriterState::on_level_setting_update(new_level);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -42,6 +42,7 @@ class JfrDeprecatedEdge : public CHeapObj<mtTracing> {
|
||||
private:
|
||||
JfrTicks _invocation_time;
|
||||
JfrBlobHandle _stacktrace;
|
||||
JfrBlobHandle _type_set;
|
||||
JfrDeprecatedEdge* _next;
|
||||
InstanceKlass* _deprecated_ik;
|
||||
traceid _deprecated_methodid;
|
||||
@@ -58,7 +59,7 @@ class JfrDeprecatedEdge : public CHeapObj<mtTracing> {
|
||||
public:
|
||||
JfrDeprecatedEdge(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* jt);
|
||||
|
||||
const JfrDeprecatedEdge* next() const { return _next; }
|
||||
JfrDeprecatedEdge* next() const { return _next; }
|
||||
void set_next(JfrDeprecatedEdge* edge) { _next = edge; }
|
||||
|
||||
bool has_event() const;
|
||||
@@ -68,6 +69,10 @@ class JfrDeprecatedEdge : public CHeapObj<mtTracing> {
|
||||
const JfrBlobHandle& stacktrace() const;
|
||||
void install_stacktrace_blob(JavaThread* jt);
|
||||
|
||||
bool has_type_set() const;
|
||||
const JfrBlobHandle& type_set() const;
|
||||
void install_type_set(const JfrBlobHandle& type_set);
|
||||
|
||||
const InstanceKlass* deprecated_ik() const { return _deprecated_ik; }
|
||||
traceid deprecated_methodid() const { return _deprecated_methodid; }
|
||||
|
||||
@@ -89,11 +94,11 @@ class JfrDeprecationManager : AllStatic {
|
||||
static void on_safepoint_write();
|
||||
static void on_recorder_stop();
|
||||
static void prepare_type_set(JavaThread* jt);
|
||||
static void on_type_set(JfrCheckpointWriter& writer, JfrChunkWriter* cw, Thread* thread);
|
||||
static void on_type_set_unload(JfrCheckpointWriter& writer);
|
||||
static void on_type_set(JfrChunkWriter* cw, Thread* thread);
|
||||
static void write_edges(JfrChunkWriter& cw, Thread* thread, bool on_error = false);
|
||||
static void on_link(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* thread);
|
||||
static void on_level_setting_update(int64_t new_level);
|
||||
static bool has_unresolved_entry();
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRDEPRECATIONMANAGER_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -47,13 +47,13 @@ class JfrIntrinsicSupport : AllStatic {
|
||||
#define JFR_HAVE_INTRINSICS
|
||||
|
||||
#define JFR_TEMPLATES(template) \
|
||||
template(jdk_jfr_internal_HiddenWait, "jdk/jfr/internal/HiddenWait") \
|
||||
template(jdk_jfr_internal_JVM, "jdk/jfr/internal/JVM") \
|
||||
template(jdk_jfr_internal_event_EventWriterFactory, "jdk/jfr/internal/event/EventWriterFactory") \
|
||||
template(jdk_jfr_internal_event_EventConfiguration_signature, "Ljdk/jfr/internal/event/EventConfiguration;") \
|
||||
template(getEventWriter_signature, "()Ljdk/jfr/internal/event/EventWriter;") \
|
||||
template(eventConfiguration_name, "eventConfiguration") \
|
||||
template(commit_name, "commit") \
|
||||
template(jfr_chunk_rotation_monitor, "jdk/jfr/internal/JVM$ChunkRotationMonitor") \
|
||||
|
||||
#define JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
||||
do_intrinsic(_counterTime, jdk_jfr_internal_JVM, counterTime_name, void_long_signature, F_SN) \
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@@ -173,7 +174,7 @@ void ConstantPoolCache::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
|
||||
}
|
||||
if (invoke_code == Bytecodes::_invokestatic) {
|
||||
assert(method->method_holder()->is_initialized() ||
|
||||
method->method_holder()->is_init_thread(JavaThread::current()),
|
||||
method->method_holder()->is_reentrant_initialization(JavaThread::current()),
|
||||
"invalid class initialization state for invoke_static");
|
||||
|
||||
if (!VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
@@ -268,11 +269,20 @@ ResolvedMethodEntry* ConstantPoolCache::set_method_handle(int method_index, cons
|
||||
// A losing writer waits on the lock until the winner writes the method and leaves
|
||||
// the lock, so that when the losing writer returns, he can use the linked
|
||||
// cache entry.
|
||||
|
||||
// Lock fields to write
|
||||
Bytecodes::Code invoke_code = Bytecodes::_invokehandle;
|
||||
MutexLocker ml(constant_pool()->pool_holder()->init_monitor());
|
||||
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
|
||||
|
||||
JavaThread* current = JavaThread::current();
|
||||
objArrayHandle resolved_references(current, constant_pool()->resolved_references());
|
||||
// Use the resolved_references() lock for this cpCache entry.
|
||||
// resolved_references are created for all classes with Invokedynamic, MethodHandle
|
||||
// or MethodType constant pool cache entries.
|
||||
assert(resolved_references() != nullptr,
|
||||
"a resolved_references array should have been created for this class");
|
||||
ObjectLocker ol(resolved_references, current);
|
||||
|
||||
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
|
||||
if (method_entry->is_resolved(invoke_code)) {
|
||||
return method_entry;
|
||||
}
|
||||
@@ -310,7 +320,6 @@ ResolvedMethodEntry* ConstantPoolCache::set_method_handle(int method_index, cons
|
||||
// Store appendix, if any.
|
||||
if (has_appendix) {
|
||||
const int appendix_index = method_entry->resolved_references_index();
|
||||
objArrayOop resolved_references = constant_pool()->resolved_references();
|
||||
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
|
||||
assert(resolved_references->obj_at(appendix_index) == nullptr, "init just once");
|
||||
resolved_references->obj_at_put(appendix_index, appendix());
|
||||
@@ -555,7 +564,14 @@ bool ConstantPoolCache::save_and_throw_indy_exc(
|
||||
assert(PENDING_EXCEPTION->is_a(vmClasses::LinkageError_klass()),
|
||||
"No LinkageError exception");
|
||||
|
||||
MutexLocker ml(THREAD, cpool->pool_holder()->init_monitor());
|
||||
// Use the resolved_references() lock for this cpCache entry.
|
||||
// resolved_references are created for all classes with Invokedynamic, MethodHandle
|
||||
// or MethodType constant pool cache entries.
|
||||
JavaThread* current = THREAD;
|
||||
objArrayHandle resolved_references(current, cpool->resolved_references());
|
||||
assert(resolved_references() != nullptr,
|
||||
"a resolved_references array should have been created for this class");
|
||||
ObjectLocker ol(resolved_references, current);
|
||||
|
||||
// if the indy_info is resolved or the indy_resolution_failed flag is set then another
|
||||
// thread either succeeded in resolving the method or got a LinkageError
|
||||
@@ -578,11 +594,21 @@ bool ConstantPoolCache::save_and_throw_indy_exc(
|
||||
|
||||
oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
|
||||
ResourceMark rm;
|
||||
MutexLocker ml(constant_pool()->pool_holder()->init_monitor());
|
||||
|
||||
// Use the resolved_references() lock for this cpCache entry.
|
||||
// resolved_references are created for all classes with Invokedynamic, MethodHandle
|
||||
// or MethodType constant pool cache entries.
|
||||
JavaThread* current = JavaThread::current();
|
||||
constantPoolHandle cp(current, constant_pool());
|
||||
|
||||
objArrayHandle resolved_references(current, cp->resolved_references());
|
||||
assert(resolved_references() != nullptr,
|
||||
"a resolved_references array should have been created for this class");
|
||||
ObjectLocker ol(resolved_references, current);
|
||||
assert(index >= 0, "Indy index must be positive at this point");
|
||||
|
||||
if (resolved_indy_entry_at(index)->method() != nullptr) {
|
||||
return constant_pool()->resolved_reference_from_indy(index);
|
||||
return cp->resolved_reference_from_indy(index);
|
||||
}
|
||||
|
||||
if (resolved_indy_entry_at(index)->resolution_failed()) {
|
||||
@@ -590,9 +616,7 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
|
||||
// resolution. Ignore our success and throw their exception.
|
||||
guarantee(index >= 0, "Invalid indy index");
|
||||
int encoded_index = ResolutionErrorTable::encode_indy_index(index);
|
||||
JavaThread* THREAD = JavaThread::current(); // For exception macros.
|
||||
constantPoolHandle cp(THREAD, constant_pool());
|
||||
ConstantPool::throw_resolution_error(cp, encoded_index, THREAD);
|
||||
ConstantPool::throw_resolution_error(cp, encoded_index, current);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -616,7 +640,6 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
|
||||
|
||||
if (has_appendix) {
|
||||
const int appendix_index = resolved_indy_entry_at(index)->resolved_references_index();
|
||||
objArrayOop resolved_references = constant_pool()->resolved_references();
|
||||
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
|
||||
assert(resolved_references->obj_at(appendix_index) == nullptr, "init just once");
|
||||
resolved_references->obj_at_put(appendix_index, appendix());
|
||||
|
||||
@@ -86,6 +86,7 @@
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/reflection.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "services/classLoadingService.hpp"
|
||||
#include "services/finalizerService.hpp"
|
||||
@@ -497,9 +498,6 @@ Array<int>* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) {
|
||||
return vtable_indices;
|
||||
}
|
||||
|
||||
static Monitor* create_init_monitor(const char* name) {
|
||||
return new Monitor(Mutex::safepoint, name);
|
||||
}
|
||||
|
||||
InstanceKlass::InstanceKlass() {
|
||||
assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS");
|
||||
@@ -517,7 +515,6 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, KlassKind kind, Refe
|
||||
_nest_host_index(0),
|
||||
_init_state(allocated),
|
||||
_reference_type(reference_type),
|
||||
_init_monitor(create_init_monitor("InstanceKlassInitMonitor_lock")),
|
||||
_init_thread(nullptr)
|
||||
{
|
||||
set_vtable_length(parser.vtable_size());
|
||||
@@ -745,6 +742,28 @@ objArrayOop InstanceKlass::signers() const {
|
||||
return java_lang_Class::signers(java_mirror());
|
||||
}
|
||||
|
||||
oop InstanceKlass::init_lock() const {
|
||||
// return the init lock from the mirror
|
||||
oop lock = java_lang_Class::init_lock(java_mirror());
|
||||
// Prevent reordering with any access of initialization state
|
||||
OrderAccess::loadload();
|
||||
assert(lock != nullptr || !is_not_initialized(), // initialized or in_error state
|
||||
"only fully initialized state can have a null lock");
|
||||
return lock;
|
||||
}
|
||||
|
||||
// Set the initialization lock to null so the object can be GC'ed. Any racing
|
||||
// threads to get this lock will see a null lock and will not lock.
|
||||
// That's okay because they all check for initialized state after getting
|
||||
// the lock and return.
|
||||
void InstanceKlass::fence_and_clear_init_lock() {
|
||||
// make sure previous stores are all done, notably the init_state.
|
||||
OrderAccess::storestore();
|
||||
java_lang_Class::clear_init_lock(java_mirror());
|
||||
assert(!is_not_initialized(), "class must be initialized now");
|
||||
}
|
||||
|
||||
|
||||
// See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
|
||||
// process. The step comments refers to the procedure described in that section.
|
||||
// Note: implementation moved to static method to expose the this pointer.
|
||||
@@ -772,49 +791,6 @@ void InstanceKlass::link_class(TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
void InstanceKlass::check_link_state_and_wait(JavaThread* current) {
|
||||
MonitorLocker ml(current, _init_monitor);
|
||||
|
||||
bool debug_logging_enabled = log_is_enabled(Debug, class, init);
|
||||
|
||||
// Another thread is linking this class, wait.
|
||||
while (is_being_linked() && !is_init_thread(current)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" waiting for linking of %s by thread \"%s\"",
|
||||
current->name(), external_name(), init_thread_name());
|
||||
}
|
||||
ml.wait();
|
||||
}
|
||||
|
||||
// This thread is recursively linking this class, continue
|
||||
if (is_being_linked() && is_init_thread(current)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" recursively linking %s",
|
||||
current->name(), external_name());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// If this class wasn't linked already, set state to being_linked
|
||||
if (!is_linked()) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" linking %s",
|
||||
current->name(), external_name());
|
||||
}
|
||||
set_init_state(being_linked);
|
||||
set_init_thread(current);
|
||||
} else {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" found %s already linked",
|
||||
current->name(), external_name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called to verify that a class can link during initialization, without
|
||||
// throwing a VerifyError.
|
||||
bool InstanceKlass::link_class_or_fail(TRAPS) {
|
||||
@@ -893,8 +869,9 @@ bool InstanceKlass::link_class_impl(TRAPS) {
|
||||
|
||||
// verification & rewriting
|
||||
{
|
||||
LockLinkState init_lock(this, jt);
|
||||
|
||||
HandleMark hm(THREAD);
|
||||
Handle h_init_lock(THREAD, init_lock());
|
||||
ObjectLocker ol(h_init_lock, jt);
|
||||
// rewritten will have been set if loader constraint error found
|
||||
// on an earlier link attempt
|
||||
// don't verify or rewrite if already rewritten
|
||||
@@ -952,7 +929,21 @@ bool InstanceKlass::link_class_impl(TRAPS) {
|
||||
// In case itable verification is ever added.
|
||||
// itable().verify(tty, true);
|
||||
#endif
|
||||
set_initialization_state_and_notify(linked, THREAD);
|
||||
if (UseVtableBasedCHA && Universe::is_fully_initialized()) {
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Now mark all code that assumes the class is not linked.
|
||||
// Set state under the Compile_lock also.
|
||||
MutexLocker ml(THREAD, Compile_lock);
|
||||
|
||||
set_init_state(linked);
|
||||
CodeCache::mark_dependents_on(&deopt_scope, this);
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
} else {
|
||||
set_init_state(linked);
|
||||
}
|
||||
if (JvmtiExport::should_post_class_prepare()) {
|
||||
JvmtiExport::post_class_prepare(THREAD, this);
|
||||
}
|
||||
@@ -1082,7 +1073,6 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
DTRACE_CLASSINIT_PROBE(required, -1);
|
||||
|
||||
bool wait = false;
|
||||
bool throw_error = false;
|
||||
|
||||
JavaThread* jt = THREAD;
|
||||
|
||||
@@ -1091,24 +1081,27 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
// refer to the JVM book page 47 for description of steps
|
||||
// Step 1
|
||||
{
|
||||
MonitorLocker ml(jt, _init_monitor);
|
||||
Handle h_init_lock(THREAD, init_lock());
|
||||
ObjectLocker ol(h_init_lock, jt);
|
||||
|
||||
// Step 2
|
||||
while (is_being_initialized() && !is_init_thread(jt)) {
|
||||
// If we were to use wait() instead of waitInterruptibly() then
|
||||
// we might end up throwing IE from link/symbol resolution sites
|
||||
// that aren't expected to throw. This would wreak havoc. See 6320309.
|
||||
while (is_being_initialized() && !is_reentrant_initialization(jt)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(jt);
|
||||
log_debug(class, init)("Thread \"%s\" waiting for initialization of %s by thread \"%s\"",
|
||||
jt->name(), external_name(), init_thread_name());
|
||||
}
|
||||
|
||||
wait = true;
|
||||
jt->set_class_to_be_initialized(this);
|
||||
ml.wait();
|
||||
ol.wait_uninterruptibly(jt);
|
||||
jt->set_class_to_be_initialized(nullptr);
|
||||
}
|
||||
|
||||
// Step 3
|
||||
if (is_being_initialized() && is_init_thread(jt)) {
|
||||
if (is_being_initialized() && is_reentrant_initialization(jt)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(jt);
|
||||
log_debug(class, init)("Thread \"%s\" recursively initializing %s",
|
||||
@@ -1136,7 +1129,19 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
log_debug(class, init)("Thread \"%s\" found %s is in error state",
|
||||
jt->name(), external_name());
|
||||
}
|
||||
throw_error = true;
|
||||
|
||||
DTRACE_CLASSINIT_PROBE_WAIT(erroneous, -1, wait);
|
||||
ResourceMark rm(THREAD);
|
||||
Handle cause(THREAD, get_initialization_error(THREAD));
|
||||
|
||||
stringStream ss;
|
||||
ss.print("Could not initialize class %s", external_name());
|
||||
if (cause.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), ss.as_string());
|
||||
} else {
|
||||
THROW_MSG_CAUSE(vmSymbols::java_lang_NoClassDefFoundError(),
|
||||
ss.as_string(), cause);
|
||||
}
|
||||
} else {
|
||||
|
||||
// Step 6
|
||||
@@ -1150,22 +1155,6 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
// Throw error outside lock
|
||||
if (throw_error) {
|
||||
DTRACE_CLASSINIT_PROBE_WAIT(erroneous, -1, wait);
|
||||
ResourceMark rm(THREAD);
|
||||
Handle cause(THREAD, get_initialization_error(THREAD));
|
||||
|
||||
stringStream ss;
|
||||
ss.print("Could not initialize class %s", external_name());
|
||||
if (cause.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), ss.as_string());
|
||||
} else {
|
||||
THROW_MSG_CAUSE(vmSymbols::java_lang_NoClassDefFoundError(),
|
||||
ss.as_string(), cause);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 7
|
||||
// Next, if C is a class rather than an interface, initialize it's super class and super
|
||||
// interfaces.
|
||||
@@ -1223,7 +1212,7 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
|
||||
// Step 9
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
set_initialization_state_and_notify(fully_initialized, THREAD);
|
||||
set_initialization_state_and_notify(fully_initialized, CHECK);
|
||||
debug_only(vtable().verify(tty, true);)
|
||||
}
|
||||
else {
|
||||
@@ -1256,43 +1245,26 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
}
|
||||
|
||||
|
||||
void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaThread* current) {
|
||||
MonitorLocker ml(current, _init_monitor);
|
||||
|
||||
if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) {
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Now mark all code that assumes the class is not linked.
|
||||
// Set state under the Compile_lock also.
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
|
||||
CodeCache::mark_dependents_on(&deopt_scope, this);
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
|
||||
Handle h_init_lock(THREAD, init_lock());
|
||||
if (h_init_lock() != nullptr) {
|
||||
ObjectLocker ol(h_init_lock, THREAD);
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
fence_and_clear_init_lock();
|
||||
ol.notify_all(CHECK);
|
||||
} else {
|
||||
assert(h_init_lock() != nullptr, "The initialization state should never be set twice");
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
}
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
// Update hierarchy. This is done before the new klass has been added to the SystemDictionary. The Compile_lock
|
||||
// is grabbed, to ensure that the compiler is not using the class hierarchy.
|
||||
void InstanceKlass::add_to_hierarchy(JavaThread* current) {
|
||||
void InstanceKlass::add_to_hierarchy_impl(JavaThread* current) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "must NOT be at safepoint");
|
||||
|
||||
// In case we are not using CHA based vtables we need to make sure the loaded
|
||||
// deopt is completed before anyone links this class.
|
||||
// Linking is done with _init_monitor held, by loading and deopting with it
|
||||
// held we make sure the deopt is completed before linking.
|
||||
if (!UseVtableBasedCHA) {
|
||||
init_monitor()->lock();
|
||||
}
|
||||
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
@@ -1314,12 +1286,26 @@ void InstanceKlass::add_to_hierarchy(JavaThread* current) {
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
}
|
||||
|
||||
if (!UseVtableBasedCHA) {
|
||||
init_monitor()->unlock();
|
||||
void InstanceKlass::add_to_hierarchy(JavaThread* current) {
|
||||
|
||||
if (UseVtableBasedCHA || !Universe::is_fully_initialized()) {
|
||||
add_to_hierarchy_impl(current);
|
||||
} else {
|
||||
// In case we are not using CHA based vtables we need to make sure the loaded
|
||||
// deopt is completed before anyone links this class.
|
||||
// Linking is done with init_lock held, by loading and deopting with it
|
||||
// held we make sure the deopt is completed before linking.
|
||||
Handle h_init_lock(current, init_lock());
|
||||
ObjectLocker ol(h_init_lock, current);
|
||||
add_to_hierarchy_impl(current);
|
||||
|
||||
// This doesn't need a notify because the wait is only on the class initialization path.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
InstanceKlass* InstanceKlass::implementor() const {
|
||||
InstanceKlass* volatile* ik = adr_implementor();
|
||||
if (ik == nullptr) {
|
||||
@@ -2586,7 +2572,6 @@ void InstanceKlass::remove_unshareable_info() {
|
||||
_nest_host = nullptr;
|
||||
init_shared_package_entry();
|
||||
_dep_context_last_cleaned = 0;
|
||||
_init_monitor = nullptr;
|
||||
|
||||
remove_unshareable_flags();
|
||||
}
|
||||
@@ -2690,9 +2675,6 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
|
||||
if (DiagnoseSyncOnValueBasedClasses && has_value_based_class_annotation()) {
|
||||
set_is_value_based();
|
||||
}
|
||||
|
||||
// restore the monitor
|
||||
_init_monitor = create_init_monitor("InstanceKlassInitMonitorRestored_lock");
|
||||
}
|
||||
|
||||
// Check if a class or any of its supertypes has a version older than 50.
|
||||
@@ -2788,9 +2770,6 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) {
|
||||
methods_do(method_release_C_heap_structures);
|
||||
}
|
||||
|
||||
// Destroy the init_monitor
|
||||
delete _init_monitor;
|
||||
|
||||
// Deallocate oop map cache
|
||||
if (_oop_map_cache != nullptr) {
|
||||
delete _oop_map_cache;
|
||||
@@ -3482,7 +3461,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
|
||||
#define BULLET " - "
|
||||
|
||||
static const char* state_names[] = {
|
||||
"allocated", "loaded", "being_linked", "linked", "being_initialized", "fully_initialized", "initialization_error"
|
||||
"allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
|
||||
};
|
||||
|
||||
static void print_vtable(intptr_t* start, int len, outputStream* st) {
|
||||
@@ -4132,17 +4111,13 @@ void JNIid::verify(Klass* holder) {
|
||||
}
|
||||
|
||||
void InstanceKlass::set_init_state(ClassState state) {
|
||||
if (state > loaded) {
|
||||
assert_lock_strong(_init_monitor);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
bool good_state = is_shared() ? (_init_state <= state)
|
||||
: (_init_state < state);
|
||||
bool link_failed = _init_state == being_linked && state == loaded;
|
||||
assert(good_state || state == allocated || link_failed, "illegal state transition");
|
||||
assert(good_state || state == allocated, "illegal state transition");
|
||||
#endif
|
||||
assert(_init_thread == nullptr, "should be cleared before state change");
|
||||
Atomic::store(&_init_state, state);
|
||||
_init_state = state;
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
|
||||
@@ -152,7 +152,6 @@ class InstanceKlass: public Klass {
|
||||
enum ClassState : u1 {
|
||||
allocated, // allocated (but not yet linked)
|
||||
loaded, // loaded and inserted in class hierarchy (but not linked yet)
|
||||
being_linked, // currently running verifier and rewriter
|
||||
linked, // successfully linked/verified (but not initialized yet)
|
||||
being_initialized, // currently running class initializer
|
||||
fully_initialized, // initialized (successful final state)
|
||||
@@ -226,14 +225,20 @@ class InstanceKlass: public Klass {
|
||||
|
||||
volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change
|
||||
|
||||
// _is_marked_dependent can be set concurrently, thus cannot be part of the
|
||||
// _misc_flags.
|
||||
bool _is_marked_dependent; // used for marking during flushing and deoptimization
|
||||
|
||||
// Class states are defined as ClassState (see above).
|
||||
// Place the _init_state here to utilize the unused 2-byte after
|
||||
// _idnum_allocated_count.
|
||||
volatile ClassState _init_state; // state of class
|
||||
|
||||
u1 _reference_type; // reference type
|
||||
u1 _reference_type; // reference type
|
||||
|
||||
// State is set either at parse time or while executing, atomically to not disturb other state
|
||||
InstanceKlassFlags _misc_flags;
|
||||
|
||||
Monitor* _init_monitor; // mutual exclusion to _init_state and _init_thread.
|
||||
JavaThread* volatile _init_thread; // Pointer to current thread doing initialization (to handle recursive initialization)
|
||||
|
||||
OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
|
||||
@@ -497,41 +502,23 @@ public:
|
||||
TRAPS);
|
||||
|
||||
JavaThread* init_thread() { return Atomic::load(&_init_thread); }
|
||||
// We can safely access the name as long as we hold the _init_monitor.
|
||||
const char* init_thread_name() {
|
||||
assert(_init_monitor->owned_by_self(), "Must hold _init_monitor here");
|
||||
return init_thread()->name_raw();
|
||||
}
|
||||
|
||||
public:
|
||||
// initialization state
|
||||
bool is_loaded() const { return init_state() >= loaded; }
|
||||
bool is_linked() const { return init_state() >= linked; }
|
||||
bool is_being_linked() const { return init_state() == being_linked; }
|
||||
bool is_initialized() const { return init_state() == fully_initialized; }
|
||||
bool is_not_initialized() const { return init_state() < being_initialized; }
|
||||
bool is_being_initialized() const { return init_state() == being_initialized; }
|
||||
bool is_in_error_state() const { return init_state() == initialization_error; }
|
||||
bool is_init_thread(JavaThread *thread) { return thread == init_thread(); }
|
||||
ClassState init_state() const { return Atomic::load(&_init_state); }
|
||||
bool is_loaded() const { return _init_state >= loaded; }
|
||||
bool is_linked() const { return _init_state >= linked; }
|
||||
bool is_initialized() const { return _init_state == fully_initialized; }
|
||||
bool is_not_initialized() const { return _init_state < being_initialized; }
|
||||
bool is_being_initialized() const { return _init_state == being_initialized; }
|
||||
bool is_in_error_state() const { return _init_state == initialization_error; }
|
||||
bool is_reentrant_initialization(Thread *thread) { return thread == _init_thread; }
|
||||
ClassState init_state() const { return _init_state; }
|
||||
const char* init_state_name() const;
|
||||
bool is_rewritten() const { return _misc_flags.rewritten(); }
|
||||
|
||||
class LockLinkState : public StackObj {
|
||||
InstanceKlass* _ik;
|
||||
JavaThread* _current;
|
||||
public:
|
||||
LockLinkState(InstanceKlass* ik, JavaThread* current) : _ik(ik), _current(current) {
|
||||
ik->check_link_state_and_wait(current);
|
||||
}
|
||||
~LockLinkState() {
|
||||
if (!_ik->is_linked()) {
|
||||
// Reset to loaded if linking failed.
|
||||
_ik->set_initialization_state_and_notify(loaded, _current);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// is this a sealed class
|
||||
bool is_sealed() const;
|
||||
|
||||
@@ -829,7 +816,7 @@ public:
|
||||
|
||||
// initialization
|
||||
void call_class_initializer(TRAPS);
|
||||
void set_initialization_state_and_notify(ClassState state, JavaThread* current);
|
||||
void set_initialization_state_and_notify(ClassState state, TRAPS);
|
||||
|
||||
// OopMapCache support
|
||||
OopMapCache* oop_map_cache() { return _oop_map_cache; }
|
||||
@@ -841,6 +828,10 @@ public:
|
||||
void set_jni_ids(JNIid* ids) { _jni_ids = ids; }
|
||||
JNIid* jni_id_for(int offset);
|
||||
|
||||
private:
|
||||
void add_to_hierarchy_impl(JavaThread* current);
|
||||
|
||||
public:
|
||||
// maintenance of deoptimization dependencies
|
||||
inline DependencyContext dependencies();
|
||||
void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
|
||||
@@ -1055,7 +1046,7 @@ public:
|
||||
public:
|
||||
u2 idnum_allocated_count() const { return _idnum_allocated_count; }
|
||||
|
||||
private:
|
||||
private:
|
||||
// initialization state
|
||||
void set_init_state(ClassState state);
|
||||
void set_rewritten() { _misc_flags.set_rewritten(true); }
|
||||
@@ -1072,6 +1063,12 @@ public:
|
||||
jmethodID update_jmethod_id(jmethodID* jmeths, Method* method, int idnum);
|
||||
|
||||
public:
|
||||
// Lock for (1) initialization; (2) access to the ConstantPool of this class.
|
||||
// Must be one per class and it has to be a VM internal object so java code
|
||||
// cannot lock it (like the mirror).
|
||||
// It has to be an object not a Mutex because it's held through java calls.
|
||||
oop init_lock() const;
|
||||
|
||||
// Returns the array class for the n'th dimension
|
||||
virtual ArrayKlass* array_klass(int n, TRAPS);
|
||||
virtual ArrayKlass* array_klass_or_null(int n);
|
||||
@@ -1081,10 +1078,9 @@ public:
|
||||
virtual ArrayKlass* array_klass_or_null();
|
||||
|
||||
static void clean_initialization_error_table();
|
||||
|
||||
Monitor* init_monitor() const { return _init_monitor; }
|
||||
private:
|
||||
void check_link_state_and_wait(JavaThread* current);
|
||||
void fence_and_clear_init_lock();
|
||||
|
||||
bool link_class_impl (TRAPS);
|
||||
bool verify_code (TRAPS);
|
||||
void initialize_impl (TRAPS);
|
||||
|
||||
@@ -1950,6 +1950,22 @@ bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNod
|
||||
|
||||
}
|
||||
|
||||
// Check that all locks/unlocks associated with object come from balanced regions.
|
||||
bool AbstractLockNode::is_balanced() {
|
||||
Node* obj = obj_node();
|
||||
for (uint j = 0; j < obj->outcnt(); j++) {
|
||||
Node* n = obj->raw_out(j);
|
||||
if (n->is_AbstractLock() &&
|
||||
n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
|
||||
BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock();
|
||||
if (n_box->is_unbalanced()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"};
|
||||
|
||||
const char * AbstractLockNode::kind_as_string() const {
|
||||
@@ -2056,6 +2072,8 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
int unlocks = 0;
|
||||
if (Verbose) {
|
||||
tty->print_cr("=== Locks coarsening ===");
|
||||
tty->print("Obj: ");
|
||||
obj_node()->dump();
|
||||
}
|
||||
for (int i = 0; i < lock_ops.length(); i++) {
|
||||
AbstractLockNode* lock = lock_ops.at(i);
|
||||
@@ -2064,6 +2082,8 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
else
|
||||
unlocks++;
|
||||
if (Verbose) {
|
||||
tty->print("Box %d: ", i);
|
||||
box_node()->dump();
|
||||
tty->print(" %d: ", i);
|
||||
lock->dump();
|
||||
}
|
||||
|
||||
@@ -1154,6 +1154,10 @@ public:
|
||||
void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
|
||||
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
|
||||
|
||||
// Check that all locks/unlocks associated with object come from balanced regions.
|
||||
// They can become unbalanced after coarsening optimization or on OSR entry.
|
||||
bool is_balanced();
|
||||
|
||||
// locking does not modify its arguments
|
||||
virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; }
|
||||
|
||||
|
||||
@@ -574,18 +574,23 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
|
||||
// CmpP/N used by the If controlling the cast.
|
||||
if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
|
||||
Node* iff = use->in(0)->in(0);
|
||||
if (iff->Opcode() == Op_If && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
|
||||
// We may have Opaque4 node between If and Bool nodes.
|
||||
// Bail out in such case - we need to preserve Opaque4 for correct
|
||||
// processing predicates after loop opts.
|
||||
bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
|
||||
if (can_reduce) {
|
||||
Node* iff_cmp = iff->in(1)->in(1);
|
||||
int opc = iff_cmp->Opcode();
|
||||
if ((opc == Op_CmpP || opc == Op_CmpN) && !can_reduce_cmp(n, iff_cmp)) {
|
||||
can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
|
||||
}
|
||||
if (!can_reduce) {
|
||||
#ifndef PRODUCT
|
||||
if (TraceReduceAllocationMerges) {
|
||||
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
|
||||
n->dump(5);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
if (TraceReduceAllocationMerges) {
|
||||
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
|
||||
n->dump(5);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -651,7 +656,12 @@ Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
|
||||
if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
|
||||
con = _igvn->zerocon(t->basic_type());
|
||||
} else {
|
||||
Node* curr_cmp = curr_ctrl->in(0)->in(1)->in(1); // true/false -> if -> bool -> cmp
|
||||
// can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
|
||||
assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
|
||||
Node* bol = curr_ctrl->in(0)->in(1);
|
||||
assert(bol->is_Bool(), "unexpected node %s", bol->Name());
|
||||
Node* curr_cmp = bol->in(1);
|
||||
assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
|
||||
con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
|
||||
}
|
||||
|
||||
@@ -3501,12 +3511,11 @@ bool ConnectionGraph::not_global_escape(Node *n) {
|
||||
// and locked code region (identified by BoxLockNode) is balanced:
|
||||
// all compiled code paths have corresponding Lock/Unlock pairs.
|
||||
bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
|
||||
BoxLockNode* box = alock->box_node()->as_BoxLock();
|
||||
if (!box->is_unbalanced() && not_global_escape(alock->obj_node())) {
|
||||
if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
|
||||
if (EliminateNestedLocks) {
|
||||
// We can mark whole locking region as Local only when only
|
||||
// one object is used for locking.
|
||||
box->set_local();
|
||||
alock->box_node()->as_BoxLock()->set_local();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -143,21 +143,24 @@ void IdealGraphPrinter::init(const char* file_name, bool use_multiple_files, boo
|
||||
_depth = 0;
|
||||
_current_method = nullptr;
|
||||
_network_stream = nullptr;
|
||||
_append = append;
|
||||
|
||||
if (file_name != nullptr) {
|
||||
init_file_stream(file_name, use_multiple_files, append);
|
||||
init_file_stream(file_name, use_multiple_files);
|
||||
} else {
|
||||
init_network_stream();
|
||||
}
|
||||
_xml = new (mtCompiler) xmlStream(_output);
|
||||
if (!append) {
|
||||
if (!_append) {
|
||||
head(TOP_ELEMENT);
|
||||
}
|
||||
}
|
||||
|
||||
// Destructor, close file or network stream
|
||||
IdealGraphPrinter::~IdealGraphPrinter() {
|
||||
tail(TOP_ELEMENT);
|
||||
if (!_append) {
|
||||
tail(TOP_ELEMENT);
|
||||
}
|
||||
|
||||
// tty->print_cr("Walk time: %d", (int)_walk_time.milliseconds());
|
||||
// tty->print_cr("Output time: %d", (int)_output_time.milliseconds());
|
||||
@@ -860,10 +863,10 @@ void IdealGraphPrinter::print(const char *name, Node *node) {
|
||||
_xml->flush();
|
||||
}
|
||||
|
||||
void IdealGraphPrinter::init_file_stream(const char* file_name, bool use_multiple_files, bool append) {
|
||||
void IdealGraphPrinter::init_file_stream(const char* file_name, bool use_multiple_files) {
|
||||
ThreadCritical tc;
|
||||
if (use_multiple_files && _file_count != 0) {
|
||||
assert(!append, "append should only be used for debugging with a single file");
|
||||
assert(!_append, "append should only be used for debugging with a single file");
|
||||
ResourceMark rm;
|
||||
stringStream st;
|
||||
const char* dot = strrchr(file_name, '.');
|
||||
@@ -875,10 +878,10 @@ void IdealGraphPrinter::init_file_stream(const char* file_name, bool use_multipl
|
||||
}
|
||||
_output = new (mtCompiler) fileStream(st.as_string(), "w");
|
||||
} else {
|
||||
_output = new (mtCompiler) fileStream(file_name, append ? "a" : "w");
|
||||
_output = new (mtCompiler) fileStream(file_name, _append ? "a" : "w");
|
||||
}
|
||||
if (use_multiple_files) {
|
||||
assert(!append, "append should only be used for debugging with a single file");
|
||||
assert(!_append, "append should only be used for debugging with a single file");
|
||||
_file_count++;
|
||||
}
|
||||
}
|
||||
@@ -909,9 +912,16 @@ void IdealGraphPrinter::update_compiled_method(ciMethod* current_method) {
|
||||
assert(C != nullptr, "must already be set");
|
||||
if (current_method != _current_method) {
|
||||
// If a different method, end the old and begin with the new one.
|
||||
end_method();
|
||||
_current_method = nullptr;
|
||||
begin_method();
|
||||
if (_append) {
|
||||
// Do not call `end_method` if we are appending, just update `_current_method`,
|
||||
// because `begin_method` is not called in the constructor in append mode.
|
||||
_current_method = current_method;
|
||||
} else {
|
||||
// End the old method and begin a new one.
|
||||
// Don't worry about `_current_method`, `end_method` will clear it.
|
||||
end_method();
|
||||
begin_method();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,7 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
bool _traverse_outs;
|
||||
Compile *C;
|
||||
double _max_freq;
|
||||
bool _append;
|
||||
|
||||
void print_method(ciMethod* method, int bci, InlineTree* tree);
|
||||
void print_inline_tree(InlineTree* tree);
|
||||
@@ -118,7 +119,7 @@ class IdealGraphPrinter : public CHeapObj<mtCompiler> {
|
||||
void head(const char *name);
|
||||
void text(const char *s);
|
||||
void init(const char* file_name, bool use_multiple_files, bool append);
|
||||
void init_file_stream(const char* file_name, bool use_multiple_files, bool append);
|
||||
void init_file_stream(const char* file_name, bool use_multiple_files);
|
||||
void init_network_stream();
|
||||
IdealGraphPrinter();
|
||||
~IdealGraphPrinter();
|
||||
|
||||
@@ -5463,42 +5463,72 @@ void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_
|
||||
uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
|
||||
}
|
||||
|
||||
// Common checks for array sorting intrinsics arguments.
|
||||
// Returns `true` if checks passed.
|
||||
bool LibraryCallKit::check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt) {
|
||||
// check address of the class
|
||||
if (elementType == nullptr || elementType->is_top()) {
|
||||
return false; // dead path
|
||||
}
|
||||
const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
|
||||
if (elem_klass == nullptr) {
|
||||
return false; // dead path
|
||||
}
|
||||
// java_mirror_type() returns non-null for compile-time Class constants only
|
||||
ciType* elem_type = elem_klass->java_mirror_type();
|
||||
if (elem_type == nullptr) {
|
||||
return false;
|
||||
}
|
||||
bt = elem_type->basic_type();
|
||||
// Disable the intrinsic if the CPU does not support SIMD sort
|
||||
if (!Matcher::supports_simd_sort(bt)) {
|
||||
return false;
|
||||
}
|
||||
// check address of the array
|
||||
if (obj == nullptr || obj->is_top()) {
|
||||
return false; // dead path
|
||||
}
|
||||
const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
|
||||
if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM) {
|
||||
return false; // failed input validation
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------inline_array_partition-----------------------
|
||||
bool LibraryCallKit::inline_array_partition() {
|
||||
address stubAddr = StubRoutines::select_array_partition_function();
|
||||
if (stubAddr == nullptr) {
|
||||
return false; // Intrinsic's stub is not implemented on this platform
|
||||
}
|
||||
assert(callee()->signature()->size() == 9, "arrayPartition has 8 parameters (one long)");
|
||||
|
||||
Node* elementType = null_check(argument(0));
|
||||
// no receiver because it is a static method
|
||||
Node* elementType = argument(0);
|
||||
Node* obj = argument(1);
|
||||
Node* offset = argument(2);
|
||||
Node* offset = argument(2); // long
|
||||
Node* fromIndex = argument(4);
|
||||
Node* toIndex = argument(5);
|
||||
Node* indexPivot1 = argument(6);
|
||||
Node* indexPivot2 = argument(7);
|
||||
// PartitionOperation: argument(8) is ignored
|
||||
|
||||
Node* pivotIndices = nullptr;
|
||||
BasicType bt = T_ILLEGAL;
|
||||
|
||||
if (!check_array_sort_arguments(elementType, obj, bt)) {
|
||||
return false;
|
||||
}
|
||||
null_check(obj);
|
||||
// If obj is dead, only null-path is taken.
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
// Set the original stack and the reexecute bit for the interpreter to reexecute
|
||||
// the bytecode that invokes DualPivotQuicksort.partition() if deoptimization happens.
|
||||
{ PreserveReexecuteState preexecs(this);
|
||||
jvms()->set_should_reexecute(true);
|
||||
|
||||
const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
|
||||
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
|
||||
BasicType bt = elem_type->basic_type();
|
||||
// Disable the intrinsic if the CPU does not support SIMD sort
|
||||
if (!Matcher::supports_simd_sort(bt)) {
|
||||
return false;
|
||||
}
|
||||
address stubAddr = nullptr;
|
||||
stubAddr = StubRoutines::select_array_partition_function();
|
||||
// stub not loaded
|
||||
if (stubAddr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
// get the address of the array
|
||||
const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
|
||||
if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM ) {
|
||||
return false; // failed input validation
|
||||
}
|
||||
Node* obj_adr = make_unsafe_address(obj, offset);
|
||||
|
||||
// create the pivotIndices array of type int and size = 2
|
||||
@@ -5531,31 +5561,29 @@ bool LibraryCallKit::inline_array_partition() {
|
||||
|
||||
//------------------------------inline_array_sort-----------------------
|
||||
bool LibraryCallKit::inline_array_sort() {
|
||||
address stubAddr = StubRoutines::select_arraysort_function();
|
||||
if (stubAddr == nullptr) {
|
||||
return false; // Intrinsic's stub is not implemented on this platform
|
||||
}
|
||||
assert(callee()->signature()->size() == 7, "arraySort has 6 parameters (one long)");
|
||||
|
||||
Node* elementType = null_check(argument(0));
|
||||
// no receiver because it is a static method
|
||||
Node* elementType = argument(0);
|
||||
Node* obj = argument(1);
|
||||
Node* offset = argument(2);
|
||||
Node* offset = argument(2); // long
|
||||
Node* fromIndex = argument(4);
|
||||
Node* toIndex = argument(5);
|
||||
// SortOperation: argument(6) is ignored
|
||||
|
||||
const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
|
||||
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
|
||||
BasicType bt = elem_type->basic_type();
|
||||
// Disable the intrinsic if the CPU does not support SIMD sort
|
||||
if (!Matcher::supports_simd_sort(bt)) {
|
||||
BasicType bt = T_ILLEGAL;
|
||||
|
||||
if (!check_array_sort_arguments(elementType, obj, bt)) {
|
||||
return false;
|
||||
}
|
||||
address stubAddr = nullptr;
|
||||
stubAddr = StubRoutines::select_arraysort_function();
|
||||
//stub not loaded
|
||||
if (stubAddr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// get address of the array
|
||||
const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
|
||||
if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM ) {
|
||||
return false; // failed input validation
|
||||
null_check(obj);
|
||||
// If obj is dead, only null-path is taken.
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
Node* obj_adr = make_unsafe_address(obj, offset);
|
||||
|
||||
@@ -7554,8 +7582,6 @@ bool LibraryCallKit::inline_intpoly_montgomeryMult_P256() {
|
||||
OptoRuntime::intpoly_montgomeryMult_P256_Type(),
|
||||
stubAddr, stubName, TypePtr::BOTTOM,
|
||||
a_start, b_start, r_start);
|
||||
Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
|
||||
set_result(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -279,6 +279,7 @@ class LibraryCallKit : public GraphKit {
|
||||
JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
|
||||
void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards, int saved_reexecute_sp,
|
||||
uint new_idx);
|
||||
bool check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt);
|
||||
bool inline_array_sort();
|
||||
bool inline_array_partition();
|
||||
typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
|
||||
|
||||
@@ -46,7 +46,7 @@ private:
|
||||
Eliminated // All lock/unlock in region were eliminated
|
||||
} _kind;
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
const char* _kind_name[6] = {
|
||||
"Regular",
|
||||
"Local",
|
||||
@@ -124,7 +124,9 @@ public:
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void format( PhaseRegAlloc *, outputStream *st ) const;
|
||||
virtual void dump_spec(outputStream *st) const { st->print(" Lock %d",_slot); }
|
||||
virtual void dump_spec(outputStream *st) const {
|
||||
st->print(" Lock slot: %d, Kind: %s", _slot, _kind_name[(int)_kind]);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@@ -2571,7 +2571,7 @@ Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
|
||||
const TypeInt* init_t = phase->type(in(Init) )->is_int();
|
||||
const TypeInt* limit_t = phase->type(in(Limit))->is_int();
|
||||
int stride_p;
|
||||
jlong stride_p;
|
||||
jlong lim, ini;
|
||||
julong max;
|
||||
if (stride_con > 0) {
|
||||
@@ -2580,10 +2580,10 @@ Node *LoopLimitNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
ini = init_t->_lo;
|
||||
max = (julong)max_jint;
|
||||
} else {
|
||||
stride_p = -stride_con;
|
||||
stride_p = -(jlong)stride_con;
|
||||
lim = init_t->_hi;
|
||||
ini = limit_t->_lo;
|
||||
max = (julong)min_jint;
|
||||
max = (julong)(juint)min_jint; // double cast to get 0x0000000080000000, not 0xffffffff80000000
|
||||
}
|
||||
julong range = lim - ini + stride_p;
|
||||
if (range <= max) {
|
||||
|
||||
@@ -795,18 +795,25 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
||||
// Avoid duplicated float compare.
|
||||
if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
|
||||
|
||||
// Ignore cost if CMOVE can be moved outside the loop.
|
||||
if (used_inside_loop && cost >= ConditionalMoveLimit) {
|
||||
return nullptr;
|
||||
float infrequent_prob = PROB_UNLIKELY_MAG(3);
|
||||
// Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
|
||||
if (used_inside_loop) {
|
||||
if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
|
||||
|
||||
// BlockLayoutByFrequency optimization moves infrequent branch
|
||||
// from hot path. No point in CMOV'ing in such case (110 is used
|
||||
// instead of 100 to take into account not exactness of float value).
|
||||
if (BlockLayoutByFrequency) {
|
||||
infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
|
||||
}
|
||||
}
|
||||
// Check for highly predictable branch. No point in CMOV'ing if
|
||||
// we are going to predict accurately all the time.
|
||||
constexpr float infrequent_prob = PROB_UNLIKELY_MAG(2);
|
||||
if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
|
||||
//keep going
|
||||
} else if (iff->_prob < infrequent_prob || iff->_prob > (1.0f - infrequent_prob)) {
|
||||
} else if (iff->_prob < infrequent_prob ||
|
||||
iff->_prob > (1.0f - infrequent_prob))
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// --------------
|
||||
// Now replace all Phis with CMOV's
|
||||
@@ -2987,52 +2994,101 @@ RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
|
||||
return reg;
|
||||
}
|
||||
|
||||
//------------------------------ insert_cmpi_loop_exit -------------------------------------
|
||||
// Clone a signed compare loop exit from an unsigned compare and
|
||||
// insert it before the unsigned cmp on the stay-in-loop path.
|
||||
// All new nodes inserted in the dominator tree between the original
|
||||
// if and it's projections. The original if test is replaced with
|
||||
// a constant to force the stay-in-loop path.
|
||||
// Idea
|
||||
// ----
|
||||
// Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
|
||||
// require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
|
||||
// with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
|
||||
// loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
|
||||
// test alone instead:
|
||||
//
|
||||
// This is done to make sure that the original if and it's projections
|
||||
// still dominate the same set of control nodes, that the ctrl() relation
|
||||
// from data nodes to them is preserved, and that their loop nesting is
|
||||
// preserved.
|
||||
// Before Partial Peeling:
|
||||
// Loop:
|
||||
// <peeled section>
|
||||
// Split off signed loop exit test
|
||||
// <-- CUT HERE -->
|
||||
// Unchanged unsigned loop exit test
|
||||
// <rest of unpeeled section>
|
||||
// goto Loop
|
||||
//
|
||||
// before
|
||||
// if(i <u limit) unsigned compare loop exit
|
||||
// After Partial Peeling:
|
||||
// <cloned peeled section>
|
||||
// Cloned split off signed loop exit test
|
||||
// Loop:
|
||||
// Unchanged unsigned loop exit test
|
||||
// <rest of unpeeled section>
|
||||
// <peeled section>
|
||||
// Split off signed loop exit test
|
||||
// goto Loop
|
||||
//
|
||||
// Details
|
||||
// -------
|
||||
// Before:
|
||||
// if (i <u limit) Unsigned loop exit condition
|
||||
// / |
|
||||
// v v
|
||||
// exit-proj stay-in-loop-proj
|
||||
//
|
||||
// after
|
||||
// if(stay-in-loop-const) original if
|
||||
// / |
|
||||
// / v
|
||||
// / if(i < limit) new signed test
|
||||
// Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
|
||||
// before the CmpU on the stay-in-loop path and keep both tests:
|
||||
//
|
||||
// if (i <u limit) Signed loop exit test
|
||||
// / |
|
||||
// / if (i <u limit) Unsigned loop exit test
|
||||
// / / |
|
||||
// / / v
|
||||
// / / if(i <u limit) new cloned unsigned test
|
||||
// / / / |
|
||||
// v v v |
|
||||
// region |
|
||||
// | |
|
||||
// dum-if |
|
||||
// / | |
|
||||
// ether | |
|
||||
// v v
|
||||
// v v v
|
||||
// exit-region stay-in-loop-proj
|
||||
//
|
||||
// Implementation
|
||||
// --------------
|
||||
// We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
|
||||
// loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
|
||||
// exit tests is preserved, and their loop nesting is correct.
|
||||
//
|
||||
// To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
|
||||
// test above it and kill the original unsigned loop exit test by setting it's condition to a constant
|
||||
// (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
|
||||
//
|
||||
// if (stay-in-loop-const) Killed original unsigned loop exit test
|
||||
// / |
|
||||
// / v
|
||||
// / if (i < limit) Split off signed loop exit test
|
||||
// / / |
|
||||
// / / v
|
||||
// / / if (i <u limit) Cloned unsigned loop exit test
|
||||
// / / / |
|
||||
// v v v |
|
||||
// exit-region |
|
||||
// | |
|
||||
// dummy-if |
|
||||
// / | |
|
||||
// dead | |
|
||||
// v v
|
||||
// exit-proj stay-in-loop-proj
|
||||
//
|
||||
IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
|
||||
// Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
|
||||
// loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
|
||||
//
|
||||
// Requirements
|
||||
// ------------
|
||||
// Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
|
||||
// the same as before with only a single unsigned test. This is only possible if certain requirements are met.
|
||||
// Otherwise, we need to bail out (see comments in the code below).
|
||||
IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
|
||||
const bool Signed = true;
|
||||
const bool Unsigned = false;
|
||||
|
||||
BoolNode* bol = if_cmpu->in(1)->as_Bool();
|
||||
if (bol->_test._test != BoolTest::lt) return nullptr;
|
||||
if (bol->_test._test != BoolTest::lt) {
|
||||
return nullptr;
|
||||
}
|
||||
CmpNode* cmpu = bol->in(1)->as_Cmp();
|
||||
if (cmpu->Opcode() != Op_CmpU) return nullptr;
|
||||
assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
|
||||
|
||||
int stride = stride_of_possible_iv(if_cmpu);
|
||||
if (stride == 0) return nullptr;
|
||||
if (stride == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* lp_proj = stay_in_loop(if_cmpu, loop);
|
||||
guarantee(lp_proj != nullptr, "null loop node");
|
||||
@@ -3044,14 +3100,93 @@ IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *lo
|
||||
// We therefore can't add a single exit condition.
|
||||
return nullptr;
|
||||
}
|
||||
// The loop exit condition is !(i <u limit) ==> (i < 0 || i >= limit).
|
||||
// Split out the exit condition (i < 0) for stride < 0 or (i >= limit) for stride > 0.
|
||||
Node* limit = nullptr;
|
||||
// The unsigned loop exit condition is
|
||||
// !(i <u limit)
|
||||
// = i >=u limit
|
||||
//
|
||||
// First, we note that for any x for which
|
||||
// 0 <= x <= INT_MAX
|
||||
// we can convert x to an unsigned int and still get the same guarantee:
|
||||
// 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
|
||||
// 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
|
||||
//
|
||||
// With that in mind, if
|
||||
// limit >= 0 (COND)
|
||||
// then the unsigned loop exit condition
|
||||
// i >=u limit (ULE)
|
||||
// is equivalent to
|
||||
// i < 0 || i >= limit (SLE-full)
|
||||
// because either i is negative and therefore always greater than MAX_INT when converting to unsigned
|
||||
// (uint) i >=u MAX_INT >= limit >= 0
|
||||
// or otherwise
|
||||
// i >= limit >= 0
|
||||
// holds due to (LEMMA).
|
||||
//
|
||||
// For completeness, a counterexample with limit < 0:
|
||||
// Assume i = -3 and limit = -2:
|
||||
// i < 0
|
||||
// -2 < 0
|
||||
// is true and thus also "i < 0 || i >= limit". But
|
||||
// i >=u limit
|
||||
// -3 >=u -2
|
||||
// is false.
|
||||
Node* limit = cmpu->in(2);
|
||||
const TypeInt* type_limit = _igvn.type(limit)->is_int();
|
||||
if (type_limit->_lo < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
|
||||
// stride < 0:
|
||||
// i < 0 (SLE = SLE-negative)
|
||||
// stride > 0:
|
||||
// i >= limit (SLE = SLE-positive)
|
||||
// such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
|
||||
//
|
||||
// Loop:
|
||||
// <peeled section>
|
||||
// i >= limit (SLE-positive)
|
||||
// <-- CUT HERE -->
|
||||
// i >=u limit (ULE)
|
||||
// <rest of unpeeled section>
|
||||
// goto Loop
|
||||
//
|
||||
// We exit the loop if:
|
||||
// (SLE) is true OR (ULE) is true
|
||||
// However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
|
||||
// exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
|
||||
// (SLE) IMPLIES (ULE)
|
||||
// This indeed holds when (COND) is given:
|
||||
// - stride > 0:
|
||||
// i >= limit // (SLE = SLE-positive)
|
||||
// i >= limit >= 0 // (COND)
|
||||
// i >=u limit >= 0 // (LEMMA)
|
||||
// which is the unsigned loop exit condition (ULE).
|
||||
// - stride < 0:
|
||||
// i < 0 // (SLE = SLE-negative)
|
||||
// (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
|
||||
// MAX_INT >= limit >= 0 // (COND)
|
||||
// MAX_INT >=u limit >= 0 // (LEMMA)
|
||||
// and thus from (NEG) and (LEMMA):
|
||||
// i >=u limit
|
||||
// which is the unsigned loop exit condition (ULE).
|
||||
//
|
||||
//
|
||||
// After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
|
||||
// <cloned peeled section>
|
||||
// i >= limit (SLE-positive)
|
||||
// Loop:
|
||||
// i >=u limit (ULE)
|
||||
// <rest of unpeeled section>
|
||||
// <peeled section>
|
||||
// i >= limit (SLE-positive)
|
||||
// goto Loop
|
||||
Node* rhs_cmpi;
|
||||
if (stride > 0) {
|
||||
limit = cmpu->in(2);
|
||||
rhs_cmpi = limit; // For i >= limit
|
||||
} else {
|
||||
limit = _igvn.makecon(TypeInt::ZERO);
|
||||
set_ctrl(limit, C->root());
|
||||
rhs_cmpi = _igvn.makecon(TypeInt::ZERO); // For i < 0
|
||||
set_ctrl(rhs_cmpi, C->root());
|
||||
}
|
||||
// Create a new region on the exit path
|
||||
RegionNode* reg = insert_region_before_proj(lp_exit);
|
||||
@@ -3059,7 +3194,7 @@ IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *lo
|
||||
|
||||
// Clone the if-cmpu-true-false using a signed compare
|
||||
BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
|
||||
ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
|
||||
ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
|
||||
reg->add_req(cmpi_exit);
|
||||
|
||||
// Clone the if-cmpu-true-false
|
||||
|
||||
@@ -2045,7 +2045,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* box, Node* obj) {
|
||||
|
||||
//-----------------------mark_eliminated_locking_nodes-----------------------
|
||||
void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
|
||||
if (alock->box_node()->as_BoxLock()->is_unbalanced()) {
|
||||
if (!alock->is_balanced()) {
|
||||
return; // Can't do any more elimination for this locking region
|
||||
}
|
||||
if (EliminateNestedLocks) {
|
||||
|
||||
@@ -2984,6 +2984,9 @@ StoreNode* MergePrimitiveArrayStores::run() {
|
||||
type2aelembytes(bt) != _store->memory_size()) {
|
||||
return nullptr;
|
||||
}
|
||||
if (_store->is_unsafe_access()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// The _store must be the "last" store in a chain. If we find a use we could merge with
|
||||
// then that use or a store further down is the "last" store.
|
||||
@@ -3017,11 +3020,13 @@ bool MergePrimitiveArrayStores::is_compatible_store(const StoreNode* other_store
|
||||
int opc = _store->Opcode();
|
||||
assert(opc == Op_StoreB || opc == Op_StoreC || opc == Op_StoreI, "precondition");
|
||||
assert(_store->adr_type()->isa_aryptr() != nullptr, "must be array store");
|
||||
assert(!_store->is_unsafe_access(), "no unsafe accesses");
|
||||
|
||||
if (other_store == nullptr ||
|
||||
_store->Opcode() != other_store->Opcode() ||
|
||||
other_store->adr_type() == nullptr ||
|
||||
other_store->adr_type()->isa_aryptr() == nullptr) {
|
||||
other_store->adr_type()->isa_aryptr() == nullptr ||
|
||||
other_store->is_unsafe_access()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -1414,8 +1414,8 @@ const TypeFunc* OptoRuntime::intpoly_montgomeryMult_P256_Type() {
|
||||
|
||||
// result type needed
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms + 0] = TypeInt::INT; // carry bits in output
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
|
||||
fields[TypeFunc::Parms + 0] = nullptr; // void
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
@@ -1434,7 +1434,7 @@ const TypeFunc* OptoRuntime::intpoly_assign_Type() {
|
||||
|
||||
// result type needed
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms + 0] = NULL; // void
|
||||
fields[TypeFunc::Parms + 0] = nullptr; // void
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
@@ -417,6 +417,10 @@ VPointer::VPointer(const MemNode* mem, const VLoop& vloop,
|
||||
#ifdef ASSERT
|
||||
_debug_invar(nullptr), _debug_negate_invar(false), _debug_invar_scale(nullptr),
|
||||
#endif
|
||||
_has_int_index_after_convI2L(false),
|
||||
_int_index_after_convI2L_offset(0),
|
||||
_int_index_after_convI2L_invar(nullptr),
|
||||
_int_index_after_convI2L_scale(0),
|
||||
_nstack(nstack), _analyze_only(analyze_only), _stack_idx(0)
|
||||
#ifndef PRODUCT
|
||||
, _tracer(vloop.is_trace_pointer_analysis())
|
||||
@@ -496,6 +500,11 @@ VPointer::VPointer(const MemNode* mem, const VLoop& vloop,
|
||||
return;
|
||||
}
|
||||
|
||||
if (!is_safe_to_use_as_simple_form(base, adr)) {
|
||||
assert(!valid(), "does not have simple form");
|
||||
return;
|
||||
}
|
||||
|
||||
_base = base;
|
||||
_adr = adr;
|
||||
assert(valid(), "Usable");
|
||||
@@ -509,6 +518,10 @@ VPointer::VPointer(VPointer* p) :
|
||||
#ifdef ASSERT
|
||||
_debug_invar(nullptr), _debug_negate_invar(false), _debug_invar_scale(nullptr),
|
||||
#endif
|
||||
_has_int_index_after_convI2L(false),
|
||||
_int_index_after_convI2L_offset(0),
|
||||
_int_index_after_convI2L_invar(nullptr),
|
||||
_int_index_after_convI2L_scale(0),
|
||||
_nstack(p->_nstack), _analyze_only(p->_analyze_only), _stack_idx(p->_stack_idx)
|
||||
#ifndef PRODUCT
|
||||
, _tracer(p->_tracer._is_trace_alignment)
|
||||
@@ -531,6 +544,354 @@ int VPointer::invar_factor() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// We would like to make decisions about aliasing (i.e. removing memory edges) and adjacency
|
||||
// (i.e. which loads/stores can be packed) based on the simple form:
|
||||
//
|
||||
// s_pointer = adr + offset + invar + scale * ConvI2L(iv)
|
||||
//
|
||||
// However, we parse the compound-long-int form:
|
||||
//
|
||||
// c_pointer = adr + long_offset + long_invar + long_scale * ConvI2L(int_index)
|
||||
// int_index = int_offset + int_invar + int_scale * iv
|
||||
//
|
||||
// In general, the simple and the compound-long-int form do not always compute the same pointer
|
||||
// at runtime. For example, the simple form would give a different result due to an overflow
|
||||
// in the int_index.
|
||||
//
|
||||
// Example:
|
||||
// For both forms, we have:
|
||||
// iv = 0
|
||||
// scale = 1
|
||||
//
|
||||
// We now account the offset and invar once to the long part and once to the int part:
|
||||
// Pointer 1 (long offset and long invar):
|
||||
// long_offset = min_int
|
||||
// long_invar = min_int
|
||||
// int_offset = 0
|
||||
// int_invar = 0
|
||||
//
|
||||
// Pointer 2 (int offset and int invar):
|
||||
// long_offset = 0
|
||||
// long_invar = 0
|
||||
// int_offset = min_int
|
||||
// int_invar = min_int
|
||||
//
|
||||
// This gives us the following pointers:
|
||||
// Compound-long-int form pointers:
|
||||
// Form:
|
||||
// c_pointer = adr + long_offset + long_invar + long_scale * ConvI2L(int_offset + int_invar + int_scale * iv)
|
||||
//
|
||||
// Pointers:
|
||||
// c_pointer1 = adr + min_int + min_int + 1 * ConvI2L(0 + 0 + 1 * 0)
|
||||
// = adr + min_int + min_int
|
||||
// = adr - 2^32
|
||||
//
|
||||
// c_pointer2 = adr + 0 + 0 + 1 * ConvI2L(min_int + min_int + 1 * 0)
|
||||
// = adr + ConvI2L(min_int + min_int)
|
||||
// = adr + 0
|
||||
// = adr
|
||||
//
|
||||
// Simple form pointers:
|
||||
// Form:
|
||||
// s_pointer = adr + offset + invar + scale * ConvI2L(iv)
|
||||
// s_pointer = adr + (long_offset + int_offset) + (long_invar + int_invar) + (long_scale * int_scale) * ConvI2L(iv)
|
||||
//
|
||||
// Pointers:
|
||||
// s_pointer1 = adr + (min_int + 0 ) + (min_int + 0 ) + 1 * 0
|
||||
// = adr + min_int + min_int
|
||||
// = adr - 2^32
|
||||
// s_pointer2 = adr + (0 + min_int ) + (0 + min_int ) + 1 * 0
|
||||
// = adr + min_int + min_int
|
||||
// = adr - 2^32
|
||||
//
|
||||
// We see that the two addresses are actually 2^32 bytes apart (derived from the c_pointers), but their simple form look identical.
|
||||
//
|
||||
// Hence, we need to determine in which cases it is safe to make decisions based on the simple
|
||||
// form, rather than the compound-long-int form. If we cannot prove that using the simple form
|
||||
// is safe (i.e. equivalent to the compound-long-int form), then we do not get a valid VPointer,
|
||||
// and the associated memop cannot be vectorized.
|
||||
bool VPointer::is_safe_to_use_as_simple_form(Node* base, Node* adr) const {
|
||||
#ifndef _LP64
|
||||
// On 32-bit platforms, there is never an explicit int_index with ConvI2L for the iv. Thus, the
|
||||
// parsed pointer form is always the simple form, with int operations:
|
||||
//
|
||||
// pointer = adr + offset + invar + scale * iv
|
||||
//
|
||||
assert(!_has_int_index_after_convI2L, "32-bit never has an int_index with ConvI2L for the iv");
|
||||
return true;
|
||||
#else
|
||||
|
||||
// Array accesses that are not Unsafe always have a RangeCheck which ensures that there is no
|
||||
// int_index overflow. This implies that the conversion to long can be done separately:
|
||||
//
|
||||
// ConvI2L(int_index) = ConvI2L(int_offset) + ConvI2L(int_invar) + ConvI2L(scale) * ConvI2L(iv)
|
||||
//
|
||||
// And hence, the simple form is guaranteed to be identical to the compound-long-int form at
|
||||
// runtime and the VPointer is safe/valid to be used.
|
||||
const TypeAryPtr* ary_ptr_t = _mem->adr_type()->isa_aryptr();
|
||||
if (ary_ptr_t != nullptr) {
|
||||
if (!_mem->is_unsafe_access()) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// We did not find the int_index. Just to be safe, reject this VPointer.
|
||||
if (!_has_int_index_after_convI2L) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int int_offset = _int_index_after_convI2L_offset;
|
||||
Node* int_invar = _int_index_after_convI2L_invar;
|
||||
int int_scale = _int_index_after_convI2L_scale;
|
||||
int long_scale = _scale / int_scale;
|
||||
|
||||
// If "int_index = iv", then the simple form is identical to the compound-long-int form.
|
||||
//
|
||||
// int_index = int_offset + int_invar + int_scale * iv
|
||||
// = 0 0 1 * iv
|
||||
// = iv
|
||||
if (int_offset == 0 && int_invar == nullptr && int_scale == 1) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Intuition: What happens if the int_index overflows? Let us look at two pointers on the "overflow edge":
|
||||
//
|
||||
// pointer1 = adr + ConvI2L(int_index1)
|
||||
// pointer2 = adr + ConvI2L(int_index2)
|
||||
//
|
||||
// int_index1 = max_int + 0 = max_int -> very close to but before the overflow
|
||||
// int_index2 = max_int + 1 = min_int -> just enough to get the overflow
|
||||
//
|
||||
// When looking at the difference of pointer1 and pointer2, we notice that it is very large
|
||||
// (almost 2^32). Since arrays have at most 2^31 elements, chances are high that pointer2 is
|
||||
// an actual out-of-bounds access at runtime. These would normally be prevented by range checks
|
||||
// at runtime. However, if the access was done by using Unsafe, where range checks are omitted,
|
||||
// then an out-of-bounds access constitutes undefined behavior. This means that we are allowed to
|
||||
// do anything, including changing the behavior.
|
||||
//
|
||||
// If we can set the right conditions, we have a guarantee that an overflow is either impossible
|
||||
// (no overflow or range checks preventing that) or undefined behavior. In both cases, we are
|
||||
// safe to do a vectorization.
|
||||
//
|
||||
// Approach: We want to prove a lower bound for the distance between these two pointers, and an
|
||||
// upper bound for the size of a memory object. We can derive such an upper bound for
|
||||
// arrays. We know they have at most 2^31 elements. If we know the size of the elements
|
||||
// in bytes, we have:
|
||||
//
|
||||
// array_element_size_in_bytes * 2^31 >= max_possible_array_size_in_bytes
|
||||
// >= array_size_in_bytes (ARR)
|
||||
//
|
||||
// If some small difference "delta" leads to an int_index overflow, we know that the
|
||||
// int_index1 before overflow must have been close to max_int, and the int_index2 after
|
||||
// the overflow must be close to min_int:
|
||||
//
|
||||
// pointer1 = adr + long_offset + long_invar + long_scale * ConvI2L(int_index1)
|
||||
// =approx adr + long_offset + long_invar + long_scale * max_int
|
||||
//
|
||||
// pointer2 = adr + long_offset + long_invar + long_scale * ConvI2L(int_index2)
|
||||
// =approx adr + long_offset + long_invar + long_scale * min_int
|
||||
//
|
||||
// We realize that the pointer difference is very large:
|
||||
//
|
||||
// difference =approx long_scale * 2^32
|
||||
//
|
||||
// Hence, if we set the right condition for long_scale and array_element_size_in_bytes,
|
||||
// we can prove that an overflow is impossible (or would imply undefined behaviour).
|
||||
//
|
||||
// We must now take this intuition, and develop a rigorous proof. We start by stating the problem
|
||||
// more precisely, with the help of some definitions and the Statement we are going to prove.
|
||||
//
|
||||
// Definition:
|
||||
// Two VPointers are "comparable" (i.e. VPointer::comparable is true, set with VPointer::cmp()),
|
||||
// iff all of these conditions apply for the simple form:
|
||||
// 1) Both VPointers are valid.
|
||||
// 2) The adr are identical, or both are array bases of different arrays.
|
||||
// 3) They have identical scale.
|
||||
// 4) They have identical invar.
|
||||
// 5) The difference in offsets is limited: abs(offset1 - offset2) < 2^31. (DIFF)
|
||||
//
|
||||
// For the Vectorization Optimization, we pair-wise compare VPointers and determine if they are:
|
||||
// 1) "not comparable":
|
||||
// We do not optimize them (assume they alias, not assume adjacency).
|
||||
//
|
||||
// Whenever we chose this option based on the simple form, it is also correct based on the
|
||||
// compound-long-int form, since we make no optimizations based on it.
|
||||
//
|
||||
// 2) "comparable" with different array bases at runtime:
|
||||
// We assume they do not alias (remove memory edges), but not assume adjacency.
|
||||
//
|
||||
// Whenever we have two different array bases for the simple form, we also have different
|
||||
// array bases for the compound-long-form. Since VPointers provably point to different
|
||||
// memory objects, they can never alias.
|
||||
//
|
||||
// 3) "comparable" with the same base address:
|
||||
// We compute the relative pointer difference, and based on the load/store size we can
|
||||
// compute aliasing and adjacency.
|
||||
//
|
||||
// We must find a condition under which the pointer difference of the simple form is
|
||||
// identical to the pointer difference of the compound-long-form. We do this with the
|
||||
// Statement below, which we then proceed to prove.
|
||||
//
|
||||
// Statement:
|
||||
// If two VPointers satisfy these 3 conditions:
|
||||
// 1) They are "comparable".
|
||||
// 2) They have the same base address.
|
||||
// 3) Their long_scale is a multiple of the array element size in bytes:
|
||||
//
|
||||
// abs(long_scale) % array_element_size_in_bytes = 0 (A)
|
||||
//
|
||||
// Then their pointer difference of the simple form is identical to the pointer difference
|
||||
// of the compound-long-int form.
|
||||
//
|
||||
// More precisely:
|
||||
// Such two VPointers by definition have identical adr, invar, and scale.
|
||||
// Their simple form is:
|
||||
//
|
||||
// s_pointer1 = adr + offset1 + invar + scale * ConvI2L(iv) (B1)
|
||||
// s_pointer2 = adr + offset2 + invar + scale * ConvI2L(iv) (B2)
|
||||
//
|
||||
// Thus, the pointer difference of the simple forms collapses to the difference in offsets:
|
||||
//
|
||||
// s_difference = s_pointer1 - s_pointer2 = offset1 - offset2 (C)
|
||||
//
|
||||
// Their compound-long-int form for these VPointer is:
|
||||
//
|
||||
// c_pointer1 = adr + long_offset1 + long_invar1 + long_scale1 * ConvI2L(int_index1) (D1)
|
||||
// int_index1 = int_offset1 + int_invar1 + int_scale1 * iv (D2)
|
||||
//
|
||||
// c_pointer2 = adr + long_offset2 + long_invar2 + long_scale2 * ConvI2L(int_index2) (D3)
|
||||
// int_index2 = int_offset2 + int_invar2 + int_scale2 * iv (D4)
|
||||
//
|
||||
// And these are the offset1, offset2, invar and scale from the simple form (B1) and (B2):
|
||||
//
|
||||
// offset1 = long_offset1 + long_scale1 * ConvI2L(int_offset1) (D5)
|
||||
// offset2 = long_offset2 + long_scale2 * ConvI2L(int_offset2) (D6)
|
||||
//
|
||||
// invar = long_invar1 + long_scale1 * ConvI2L(int_invar1)
|
||||
// = long_invar2 + long_scale2 * ConvI2L(int_invar2) (D7)
|
||||
//
|
||||
// scale = long_scale1 * ConvI2L(int_scale1)
|
||||
// = long_scale2 * ConvI2L(int_scale2) (D8)
|
||||
//
|
||||
// The pointer difference of the compound-long-int form is defined as:
|
||||
//
|
||||
// c_difference = c_pointer1 - c_pointer2
|
||||
//
|
||||
// Thus, the statement claims that for the two VPointer we have:
|
||||
//
|
||||
// s_difference = c_difference (Statement)
|
||||
//
|
||||
// We prove the Statement with the help of a Lemma:
|
||||
//
|
||||
// Lemma:
|
||||
// There is some integer x, such that:
|
||||
//
|
||||
// c_difference = s_difference + array_element_size_in_bytes * x * 2^32 (Lemma)
|
||||
//
|
||||
// From condition (DIFF), we can derive:
|
||||
//
|
||||
// abs(s_difference) < 2^31 (E)
|
||||
//
|
||||
// Assuming the Lemma, we prove the Statement:
|
||||
// If "x = 0" (intuitively: the int_index does not overflow), then:
|
||||
// c_difference = s_difference
|
||||
// and hence the simple form computes the same pointer difference as the compound-long-int form.
|
||||
// If "x != 0" (intuitively: the int_index overflows), then:
|
||||
// abs(c_difference) >= abs(s_difference + array_element_size_in_bytes * x * 2^32)
|
||||
// >= array_element_size_in_bytes * 2^32 - abs(s_difference)
|
||||
// -- apply (E) --
|
||||
// > array_element_size_in_bytes * 2^32 - 2^31
|
||||
// >= array_element_size_in_bytes * 2^31
|
||||
// -- apply (ARR) --
|
||||
// >= max_possible_array_size_in_bytes
|
||||
// >= array_size_in_bytes
|
||||
//
|
||||
// This shows that c_pointer1 and c_pointer2 have a distance that exceeds the maximum array size.
|
||||
// Thus, at least one of the two pointers must be outside of the array bounds. But we can assume
|
||||
// that out-of-bounds accesses do not happen. If they still do, it is undefined behavior. Hence,
|
||||
// we are allowed to do anything. We can also "safely" use the simple form in this case even though
|
||||
// it might not match the compound-long-int form at runtime.
|
||||
// QED Statement.
|
||||
//
|
||||
// We must now prove the Lemma.
|
||||
//
|
||||
// ConvI2L always truncates by some power of 2^32, i.e. there is some integer y such that:
|
||||
//
|
||||
// ConvI2L(y1 + y2) = ConvI2L(y1) + ConvI2L(y2) + 2^32 * y (F)
|
||||
//
|
||||
// It follows, that there is an integer y1 such that:
|
||||
//
|
||||
// ConvI2L(int_index1) = ConvI2L(int_offset1 + int_invar1 + int_scale1 * iv)
|
||||
// -- apply (F) --
|
||||
// = ConvI2L(int_offset1)
|
||||
// + ConvI2L(int_invar1)
|
||||
// + ConvI2L(int_scale1) * ConvI2L(iv)
|
||||
// + y1 * 2^32 (G)
|
||||
//
|
||||
// Thus, we can write the compound-long-int form (D1) as:
|
||||
//
|
||||
// c_pointer1 = adr + long_offset1 + long_invar1 + long_scale1 * ConvI2L(int_index1)
|
||||
// -- apply (G) --
|
||||
// = adr
|
||||
// + long_offset1
|
||||
// + long_invar1
|
||||
// + long_scale1 * ConvI2L(int_offset1)
|
||||
// + long_scale1 * ConvI2L(int_invar1)
|
||||
// + long_scale1 * ConvI2L(int_scale1) * ConvI2L(iv)
|
||||
// + long_scale1 * y1 * 2^32 (H)
|
||||
//
|
||||
// And we can write the simple form as:
|
||||
//
|
||||
// s_pointer1 = adr + offset1 + invar + scale * ConvI2L(iv)
|
||||
// -- apply (D5, D7, D8) --
|
||||
// = adr
|
||||
// + long_offset1
|
||||
// + long_scale1 * ConvI2L(int_offset1)
|
||||
// + long_invar1
|
||||
// + long_scale1 * ConvI2L(int_invar1)
|
||||
// + long_scale1 * ConvI2L(int_scale1) * ConvI2L(iv) (K)
|
||||
//
|
||||
// We now compute the pointer difference between the simple (K) and compound-long-int form (H).
|
||||
// Most terms cancel out immediately:
|
||||
//
|
||||
// sc_difference1 = c_pointer1 - s_pointer1 = long_scale1 * y1 * 2^32 (L)
|
||||
//
|
||||
// Rearranging the equation (L), we get:
|
||||
//
|
||||
// c_pointer1 = s_pointer1 + long_scale1 * y1 * 2^32 (M)
|
||||
//
|
||||
// And since long_scale1 is a multiple of array_element_size_in_bytes, there is some integer
|
||||
// x1, such that (M) implies:
|
||||
//
|
||||
// c_pointer1 = s_pointer1 + array_element_size_in_bytes * x1 * 2^32 (N)
|
||||
//
|
||||
// With an analogue equation for c_pointer2, we can now compute the pointer difference for
|
||||
// the compound-long-int form:
|
||||
//
|
||||
// c_difference = c_pointer1 - c_pointer2
|
||||
// -- apply (N) --
|
||||
// = s_pointer1 + array_element_size_in_bytes * x1 * 2^32
|
||||
// -(s_pointer2 + array_element_size_in_bytes * x2 * 2^32)
|
||||
// -- where "x = x1 - x2" --
|
||||
// = s_pointer1 - s_pointer2 + array_element_size_in_bytes * x * 2^32
|
||||
// -- apply (C) --
|
||||
// = s_difference + array_element_size_in_bytes * x * 2^32
|
||||
// QED Lemma.
|
||||
if (ary_ptr_t != nullptr) {
|
||||
BasicType array_element_bt = ary_ptr_t->elem()->array_element_basic_type();
|
||||
if (is_java_primitive(array_element_bt)) {
|
||||
int array_element_size_in_bytes = type2aelembytes(array_element_bt);
|
||||
if (abs(long_scale) % array_element_size_in_bytes == 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// General case: we do not know if it is safe to use the simple form.
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool VPointer::is_loop_member(Node* n) const {
|
||||
Node* n_c = phase()->get_ctrl(n);
|
||||
return lpt()->is_member(phase()->get_loop(n_c));
|
||||
@@ -633,6 +994,37 @@ bool VPointer::scaled_iv(Node* n) {
|
||||
NOT_PRODUCT(_tracer.scaled_iv_6(n, _scale);)
|
||||
return true;
|
||||
}
|
||||
} else if (opc == Op_ConvI2L && !has_iv()) {
|
||||
// So far we have not found the iv yet, and are about to enter a ConvI2L subgraph,
|
||||
// which may be the int index (that might overflow) for the memory access, of the form:
|
||||
//
|
||||
// int_index = int_offset + int_invar + int_scale * iv
|
||||
//
|
||||
// If we simply continue parsing with the current VPointer, then the int_offset and
|
||||
// int_invar simply get added to the long offset and invar. But for the checks in
|
||||
// VPointer::is_safe_to_use_as_simple_form() we need to have explicit access to the
|
||||
// int_index. Thus, we must parse it explicitly here. For this, we use a temporary
|
||||
// VPointer, to pattern match the int_index sub-expression of the address.
|
||||
|
||||
NOT_PRODUCT(Tracer::Depth dddd;)
|
||||
VPointer tmp(this);
|
||||
NOT_PRODUCT(_tracer.scaled_iv_8(n, &tmp);)
|
||||
|
||||
if (tmp.scaled_iv_plus_offset(n->in(1)) && tmp.has_iv()) {
|
||||
// We successfully matched an integer index, of the form:
|
||||
// int_index = int_offset + int_invar + int_scale * iv
|
||||
_has_int_index_after_convI2L = true;
|
||||
_int_index_after_convI2L_offset = tmp._offset;
|
||||
_int_index_after_convI2L_invar = tmp._invar;
|
||||
_int_index_after_convI2L_scale = tmp._scale;
|
||||
}
|
||||
|
||||
// Now parse it again for the real VPointer. This makes sure that the int_offset, int_invar,
|
||||
// and int_scale are properly added to the final VPointer's offset, invar, and scale.
|
||||
if (scaled_iv_plus_offset(n->in(1))) {
|
||||
NOT_PRODUCT(_tracer.scaled_iv_7(n);)
|
||||
return true;
|
||||
}
|
||||
} else if (opc == Op_ConvI2L || opc == Op_CastII) {
|
||||
if (scaled_iv_plus_offset(n->in(1))) {
|
||||
NOT_PRODUCT(_tracer.scaled_iv_7(n);)
|
||||
@@ -649,8 +1041,17 @@ bool VPointer::scaled_iv(Node* n) {
|
||||
|
||||
if (tmp.scaled_iv_plus_offset(n->in(1))) {
|
||||
int scale = n->in(2)->get_int();
|
||||
// Accumulate scale.
|
||||
_scale = tmp._scale << scale;
|
||||
_offset += tmp._offset << scale;
|
||||
// Accumulate offset.
|
||||
int shifted_offset = 0;
|
||||
if (!try_LShiftI_no_overflow(tmp._offset, scale, shifted_offset)) {
|
||||
return false; // shift overflow.
|
||||
}
|
||||
if (!try_AddI_no_overflow(_offset, shifted_offset, _offset)) {
|
||||
return false; // add overflow.
|
||||
}
|
||||
// Accumulate invar.
|
||||
if (tmp._invar != nullptr) {
|
||||
BasicType bt = tmp._invar->bottom_type()->basic_type();
|
||||
assert(bt == T_INT || bt == T_LONG, "");
|
||||
@@ -659,6 +1060,13 @@ bool VPointer::scaled_iv(Node* n) {
|
||||
_debug_invar_scale = n->in(2);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Forward info about the int_index:
|
||||
_has_int_index_after_convI2L = tmp._has_int_index_after_convI2L;
|
||||
_int_index_after_convI2L_offset = tmp._int_index_after_convI2L_offset;
|
||||
_int_index_after_convI2L_invar = tmp._int_index_after_convI2L_invar;
|
||||
_int_index_after_convI2L_scale = tmp._int_index_after_convI2L_scale;
|
||||
|
||||
NOT_PRODUCT(_tracer.scaled_iv_9(n, _scale, _offset, _invar);)
|
||||
return true;
|
||||
}
|
||||
@@ -676,7 +1084,9 @@ bool VPointer::offset_plus_k(Node* n, bool negate) {
|
||||
|
||||
int opc = n->Opcode();
|
||||
if (opc == Op_ConI) {
|
||||
_offset += negate ? -(n->get_int()) : n->get_int();
|
||||
if (!try_AddSubI_no_overflow(_offset, n->get_int(), negate, _offset)) {
|
||||
return false; // add/sub overflow.
|
||||
}
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_2(n, _offset);)
|
||||
return true;
|
||||
} else if (opc == Op_ConL) {
|
||||
@@ -685,7 +1095,9 @@ bool VPointer::offset_plus_k(Node* n, bool negate) {
|
||||
if (t->higher_equal(TypeLong::INT)) {
|
||||
jlong loff = n->get_long();
|
||||
jint off = (jint)loff;
|
||||
_offset += negate ? -off : loff;
|
||||
if (!try_AddSubI_no_overflow(_offset, off, negate, _offset)) {
|
||||
return false; // add/sub overflow.
|
||||
}
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_3(n, _offset);)
|
||||
return true;
|
||||
}
|
||||
@@ -700,11 +1112,15 @@ bool VPointer::offset_plus_k(Node* n, bool negate) {
|
||||
if (opc == Op_AddI) {
|
||||
if (n->in(2)->is_Con() && invariant(n->in(1))) {
|
||||
maybe_add_to_invar(n->in(1), negate);
|
||||
_offset += negate ? -(n->in(2)->get_int()) : n->in(2)->get_int();
|
||||
if (!try_AddSubI_no_overflow(_offset, n->in(2)->get_int(), negate, _offset)) {
|
||||
return false; // add/sub overflow.
|
||||
}
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_6(n, _invar, negate, _offset);)
|
||||
return true;
|
||||
} else if (n->in(1)->is_Con() && invariant(n->in(2))) {
|
||||
_offset += negate ? -(n->in(1)->get_int()) : n->in(1)->get_int();
|
||||
if (!try_AddSubI_no_overflow(_offset, n->in(1)->get_int(), negate, _offset)) {
|
||||
return false; // add/sub overflow.
|
||||
}
|
||||
maybe_add_to_invar(n->in(2), negate);
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_7(n, _invar, negate, _offset);)
|
||||
return true;
|
||||
@@ -713,11 +1129,15 @@ bool VPointer::offset_plus_k(Node* n, bool negate) {
|
||||
if (opc == Op_SubI) {
|
||||
if (n->in(2)->is_Con() && invariant(n->in(1))) {
|
||||
maybe_add_to_invar(n->in(1), negate);
|
||||
_offset += !negate ? -(n->in(2)->get_int()) : n->in(2)->get_int();
|
||||
if (!try_AddSubI_no_overflow(_offset, n->in(2)->get_int(), !negate, _offset)) {
|
||||
return false; // add/sub overflow.
|
||||
}
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_8(n, _invar, negate, _offset);)
|
||||
return true;
|
||||
} else if (n->in(1)->is_Con() && invariant(n->in(2))) {
|
||||
_offset += negate ? -(n->in(1)->get_int()) : n->in(1)->get_int();
|
||||
if (!try_AddSubI_no_overflow(_offset, n->in(1)->get_int(), negate, _offset)) {
|
||||
return false; // add/sub overflow.
|
||||
}
|
||||
maybe_add_to_invar(n->in(2), !negate);
|
||||
NOT_PRODUCT(_tracer.offset_plus_k_9(n, _invar, !negate, _offset);)
|
||||
return true;
|
||||
@@ -807,6 +1227,44 @@ void VPointer::maybe_add_to_invar(Node* new_invar, bool negate) {
|
||||
_invar = register_if_new(add);
|
||||
}
|
||||
|
||||
bool VPointer::try_AddI_no_overflow(int offset1, int offset2, int& result) {
|
||||
jlong long_offset = java_add((jlong)(offset1), (jlong)(offset2));
|
||||
jint int_offset = java_add( offset1, offset2);
|
||||
if (long_offset != int_offset) {
|
||||
return false;
|
||||
}
|
||||
result = int_offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VPointer::try_SubI_no_overflow(int offset1, int offset2, int& result) {
|
||||
jlong long_offset = java_subtract((jlong)(offset1), (jlong)(offset2));
|
||||
jint int_offset = java_subtract( offset1, offset2);
|
||||
if (long_offset != int_offset) {
|
||||
return false;
|
||||
}
|
||||
result = int_offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VPointer::try_AddSubI_no_overflow(int offset1, int offset2, bool is_sub, int& result) {
|
||||
if (is_sub) {
|
||||
return try_SubI_no_overflow(offset1, offset2, result);
|
||||
} else {
|
||||
return try_AddI_no_overflow(offset1, offset2, result);
|
||||
}
|
||||
}
|
||||
|
||||
bool VPointer::try_LShiftI_no_overflow(int offset, int shift, int& result) {
|
||||
jlong long_offset = java_shift_left((jlong)(offset), shift);
|
||||
jint int_offset = java_shift_left( offset, shift);
|
||||
if (long_offset != int_offset) {
|
||||
return false;
|
||||
}
|
||||
result = int_offset;
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Function for printing the fields of a VPointer
|
||||
void VPointer::print() const {
|
||||
|
||||
@@ -667,13 +667,51 @@ private:
|
||||
// A vectorization pointer (VPointer) has information about an address for
|
||||
// dependence checking and vector alignment. It's usually bound to a memory
|
||||
// operation in a counted loop for vectorizable analysis.
|
||||
//
|
||||
// We parse and represent pointers of the simple form:
|
||||
//
|
||||
// pointer = adr + offset + invar + scale * ConvI2L(iv)
|
||||
//
|
||||
// Where:
|
||||
//
|
||||
// adr: the base address of an array (base = adr)
|
||||
// OR
|
||||
// some address to off-heap memory (base = TOP)
|
||||
//
|
||||
// offset: a constant offset
|
||||
// invar: a runtime variable, which is invariant during the loop
|
||||
// scale: scaling factor
|
||||
// iv: loop induction variable
|
||||
//
|
||||
// But more precisely, we parse the composite-long-int form:
|
||||
//
|
||||
// pointer = adr + long_offset + long_invar + long_scale * ConvI2L(int_offset + inv_invar + int_scale * iv)
|
||||
//
|
||||
// pointer = adr + long_offset + long_invar + long_scale * ConvI2L(int_index)
|
||||
// int_index = int_offset + int_invar + int_scale * iv
|
||||
//
|
||||
// However, for aliasing and adjacency checks (e.g. VPointer::cmp()) we always use the simple form to make
|
||||
// decisions. Hence, we must make sure to only create a "valid" VPointer if the optimisations based on the
|
||||
// simple form produce the same result as the compound-long-int form would. Intuitively, this depends on
|
||||
// if the int_index overflows, but the precise conditions are given in VPointer::is_safe_to_use_as_simple_form().
|
||||
//
|
||||
// ConvI2L(int_index) = ConvI2L(int_offset + int_invar + int_scale * iv)
|
||||
// = Convi2L(int_offset) + ConvI2L(int_invar) + ConvI2L(int_scale) * ConvI2L(iv)
|
||||
//
|
||||
// scale = long_scale * ConvI2L(int_scale)
|
||||
// offset = long_offset + long_scale * ConvI2L(int_offset)
|
||||
// invar = long_invar + long_scale * ConvI2L(int_invar)
|
||||
//
|
||||
// pointer = adr + offset + invar + scale * ConvI2L(iv)
|
||||
//
|
||||
class VPointer : public ArenaObj {
|
||||
protected:
|
||||
const MemNode* _mem; // My memory reference node
|
||||
const VLoop& _vloop;
|
||||
|
||||
Node* _base; // null if unsafe nonheap reference
|
||||
Node* _adr; // address pointer
|
||||
// Components of the simple form:
|
||||
Node* _base; // Base address of an array OR null if some off-heap memory.
|
||||
Node* _adr; // Same as _base if an array pointer OR some off-heap memory pointer.
|
||||
int _scale; // multiplier for iv (in bytes), 0 if no loop iv
|
||||
int _offset; // constant offset (in bytes)
|
||||
|
||||
@@ -684,6 +722,13 @@ class VPointer : public ArenaObj {
|
||||
Node* _debug_invar_scale; // multiplier for invariant
|
||||
#endif
|
||||
|
||||
// The int_index components of the compound-long-int form. Used to decide if it is safe to use the
|
||||
// simple form rather than the compound-long-int form that was parsed.
|
||||
bool _has_int_index_after_convI2L;
|
||||
int _int_index_after_convI2L_offset;
|
||||
Node* _int_index_after_convI2L_invar;
|
||||
int _int_index_after_convI2L_scale;
|
||||
|
||||
Node_Stack* _nstack; // stack used to record a vpointer trace of variants
|
||||
bool _analyze_only; // Used in loop unrolling only for vpointer trace
|
||||
uint _stack_idx; // Used in loop unrolling only for vpointer trace
|
||||
@@ -723,6 +768,8 @@ class VPointer : public ArenaObj {
|
||||
VPointer(VPointer* p);
|
||||
NONCOPYABLE(VPointer);
|
||||
|
||||
bool is_safe_to_use_as_simple_form(Node* base, Node* adr) const;
|
||||
|
||||
public:
|
||||
bool valid() const { return _adr != nullptr; }
|
||||
bool has_iv() const { return _scale != 0; }
|
||||
@@ -748,10 +795,43 @@ class VPointer : public ArenaObj {
|
||||
return _invar == q._invar;
|
||||
}
|
||||
|
||||
// We compute if and how two VPointers can alias at runtime, i.e. if the two addressed regions of memory can
|
||||
// ever overlap. There are essentially 3 relevant return states:
|
||||
// - NotComparable: Synonymous to "unknown aliasing".
|
||||
// We have no information about how the two VPointers can alias. They could overlap, refer
|
||||
// to another location in the same memory object, or point to a completely different object.
|
||||
// -> Memory edge required. Aliasing unlikely but possible.
|
||||
//
|
||||
// - Less / Greater: Synonymous to "never aliasing".
|
||||
// The two VPointers may point into the same memory object, but be non-aliasing (i.e. we
|
||||
// know both address regions inside the same memory object, but these regions are non-
|
||||
// overlapping), or the VPointers point to entirely different objects.
|
||||
// -> No memory edge required. Aliasing impossible.
|
||||
//
|
||||
// - Equal: Synonymous to "overlap, or point to different memory objects".
|
||||
// The two VPointers either overlap on the same memory object, or point to two different
|
||||
// memory objects.
|
||||
// -> Memory edge required. Aliasing likely.
|
||||
//
|
||||
// In a future refactoring, we can simplify to two states:
|
||||
// - NeverAlias: instead of Less / Greater
|
||||
// - MayAlias: instead of Equal / NotComparable
|
||||
//
|
||||
// Two VPointer are "comparable" (Less / Greater / Equal), iff all of these conditions apply:
|
||||
// 1) Both are valid, i.e. expressible in the compound-long-int or simple form.
|
||||
// 2) The adr are identical, or both are array bases of different arrays.
|
||||
// 3) They have identical scale.
|
||||
// 4) They have identical invar.
|
||||
// 5) The difference in offsets is limited: abs(offset0 - offset1) < 2^31.
|
||||
int cmp(const VPointer& q) const {
|
||||
if (valid() && q.valid() &&
|
||||
(_adr == q._adr || (_base == _adr && q._base == q._adr)) &&
|
||||
_scale == q._scale && invar_equals(q)) {
|
||||
jlong difference = abs(java_subtract((jlong)_offset, (jlong)q._offset));
|
||||
jlong max_diff = (jlong)1 << 31;
|
||||
if (difference >= max_diff) {
|
||||
return NotComparable;
|
||||
}
|
||||
bool overlap = q._offset < _offset + memory_size() &&
|
||||
_offset < q._offset + q.memory_size();
|
||||
return overlap ? Equal : (_offset < q._offset ? Less : Greater);
|
||||
@@ -851,6 +931,11 @@ class VPointer : public ArenaObj {
|
||||
|
||||
void maybe_add_to_invar(Node* new_invar, bool negate);
|
||||
|
||||
static bool try_AddI_no_overflow(int offset1, int offset2, int& result);
|
||||
static bool try_SubI_no_overflow(int offset1, int offset2, int& result);
|
||||
static bool try_AddSubI_no_overflow(int offset1, int offset2, bool is_sub, int& result);
|
||||
static bool try_LShiftI_no_overflow(int offset1, int offset2, int& result);
|
||||
|
||||
Node* register_if_new(Node* n) const;
|
||||
};
|
||||
|
||||
|
||||
@@ -2339,7 +2339,7 @@ JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobje
|
||||
}
|
||||
|
||||
// Iterate over all the modules loaded to the system.
|
||||
ClassLoaderDataGraph::modules_do(&do_module);
|
||||
ClassLoaderDataGraph::modules_do_keepalive(&do_module);
|
||||
|
||||
jint len = _tbl->length();
|
||||
guarantee(len > 0, "at least one module must be present");
|
||||
|
||||
@@ -105,7 +105,7 @@ JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jcla
|
||||
// Iterate through all classes in ClassLoaderDataGraph
|
||||
// and collect them using the LoadedClassesClosure
|
||||
MutexLocker mcld(ClassLoaderDataGraph_lock);
|
||||
ClassLoaderDataGraph::loaded_classes_do(&closure);
|
||||
ClassLoaderDataGraph::loaded_classes_do_keepalive(&closure);
|
||||
}
|
||||
|
||||
return closure.get_result(env, classCountPtr, classesPtr);
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "sanitizers/ub.hpp"
|
||||
#include "services/threadService.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
@@ -242,6 +243,9 @@ public:
|
||||
return normalize_for_read(*addr());
|
||||
}
|
||||
|
||||
// we use this method at some places for writing to 0 e.g. to cause a crash;
|
||||
// ubsan does not know that this is the desired behavior
|
||||
ATTRIBUTE_NO_UBSAN
|
||||
void put(T x) {
|
||||
GuardUnsafeAccess guard(_thread);
|
||||
*addr() = normalize_for_write(x);
|
||||
|
||||
@@ -1442,7 +1442,7 @@ bool ObjectMonitor::check_owner(TRAPS) {
|
||||
static inline bool is_excluded(const Klass* monitor_klass) {
|
||||
assert(monitor_klass != nullptr, "invariant");
|
||||
NOT_JFR_RETURN_(false);
|
||||
JFR_ONLY(return vmSymbols::jfr_chunk_rotation_monitor() == monitor_klass->name();)
|
||||
JFR_ONLY(return vmSymbols::jdk_jfr_internal_HiddenWait() == monitor_klass->name();)
|
||||
}
|
||||
|
||||
static void post_monitor_wait_event(EventJavaMonitorWait* event,
|
||||
|
||||
@@ -1333,7 +1333,7 @@ methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, T
|
||||
|
||||
if (invoke_code == Bytecodes::_invokestatic) {
|
||||
assert(callee_method->method_holder()->is_initialized() ||
|
||||
callee_method->method_holder()->is_init_thread(current),
|
||||
callee_method->method_holder()->is_reentrant_initialization(current),
|
||||
"invalid class initialization state for invoke_static");
|
||||
if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
|
||||
// In order to keep class initialization check, do not patch call
|
||||
|
||||
@@ -821,6 +821,16 @@ int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
|
||||
if (millis < 0) {
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
|
||||
}
|
||||
ObjectSynchronizer::inflate(THREAD,
|
||||
obj(),
|
||||
inflate_cause_wait)->wait(millis, false, THREAD);
|
||||
}
|
||||
|
||||
|
||||
void ObjectSynchronizer::notify(Handle obj, TRAPS) {
|
||||
JavaThread* current = THREAD;
|
||||
|
||||
|
||||
@@ -119,6 +119,11 @@ public:
|
||||
static bool quick_notify(oopDesc* obj, JavaThread* current, bool All);
|
||||
static bool quick_enter(oop obj, JavaThread* current, BasicLock* Lock);
|
||||
|
||||
// Special internal-use-only method for use by JVM infrastructure
|
||||
// that needs to wait() on a java-level object but that can't risk
|
||||
// throwing unexpected InterruptedExecutionExceptions.
|
||||
static void waitUninterruptibly(Handle obj, jlong Millis, TRAPS);
|
||||
|
||||
// Inflate light weight monitor to heavy weight monitor
|
||||
static ObjectMonitor* inflate(Thread* current, oop obj, const InflateCause cause);
|
||||
// Used to inflate a monitor as if it was done from the thread JavaThread.
|
||||
@@ -225,6 +230,7 @@ class ObjectLocker : public StackObj {
|
||||
|
||||
// Monitor behavior
|
||||
void wait(TRAPS) { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever
|
||||
void wait_uninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); } // wait forever
|
||||
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
|
||||
};
|
||||
|
||||
|
||||
@@ -205,8 +205,9 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
|
||||
Klass* k = obj->klass();
|
||||
st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", p2i(obj), k->external_name());
|
||||
}
|
||||
else if (thread()->osthread()->get_state() == CONDVAR_WAIT) {
|
||||
// We are waiting on the native class initialization monitor.
|
||||
else if (thread()->osthread()->get_state() == OBJECT_WAIT) {
|
||||
// We are waiting on an Object monitor but Object.wait() isn't the
|
||||
// top-frame, so we should be waiting on a Class initialization monitor.
|
||||
InstanceKlass* k = thread()->class_to_be_initialized();
|
||||
if (k != nullptr) {
|
||||
st->print_cr("\t- waiting on the Class initialization monitor for %s", k->external_name());
|
||||
|
||||
@@ -246,6 +246,7 @@
|
||||
nonstatic_field(InstanceKlass, _nonstatic_oop_map_size, int) \
|
||||
volatile_nonstatic_field(InstanceKlass, _init_state, InstanceKlass::ClassState) \
|
||||
volatile_nonstatic_field(InstanceKlass, _init_thread, JavaThread*) \
|
||||
nonstatic_field(InstanceKlass, _is_marked_dependent, bool) \
|
||||
nonstatic_field(InstanceKlass, _itable_len, int) \
|
||||
nonstatic_field(InstanceKlass, _nest_host_index, u2) \
|
||||
nonstatic_field(InstanceKlass, _reference_type, u1) \
|
||||
@@ -2163,7 +2164,6 @@
|
||||
\
|
||||
declare_constant(InstanceKlass::allocated) \
|
||||
declare_constant(InstanceKlass::loaded) \
|
||||
declare_constant(InstanceKlass::being_linked) \
|
||||
declare_constant(InstanceKlass::linked) \
|
||||
declare_constant(InstanceKlass::being_initialized) \
|
||||
declare_constant(InstanceKlass::fully_initialized) \
|
||||
|
||||
43
src/hotspot/share/sanitizers/ub.hpp
Normal file
43
src/hotspot/share/sanitizers/ub.hpp
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_SANITIZERS_UB_HPP
|
||||
#define SHARE_SANITIZERS_UB_HPP
|
||||
|
||||
// ATTRIBUTE_NO_UBSAN
|
||||
//
|
||||
// Function attribute which informs the compiler to disable UBSan checks in the
|
||||
// following function or method.
|
||||
// Useful if the function or method is known to do something special or even 'dangerous', for
|
||||
// example causing desired signals/crashes.
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
#define ATTRIBUTE_NO_UBSAN __attribute__((no_sanitize("undefined")))
|
||||
#endif
|
||||
|
||||
#ifndef ATTRIBUTE_NO_UBSAN
|
||||
#define ATTRIBUTE_NO_UBSAN
|
||||
#endif
|
||||
|
||||
#endif // SHARE_SANITIZERS_UB_HPP
|
||||
@@ -1089,6 +1089,14 @@ u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
|
||||
}
|
||||
}
|
||||
|
||||
// Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
|
||||
// arrays.
|
||||
oop init_lock = ik->init_lock();
|
||||
if (init_lock != nullptr) {
|
||||
field_count++;
|
||||
size += sizeof(address);
|
||||
}
|
||||
|
||||
// We write the value itself plus a name and a one byte type tag per field.
|
||||
return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
|
||||
}
|
||||
@@ -1126,6 +1134,14 @@ void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
|
||||
prev = prev->previous_versions();
|
||||
}
|
||||
}
|
||||
|
||||
// Add init lock to the end if the class is not yet initialized
|
||||
oop init_lock = ik->init_lock();
|
||||
if (init_lock != nullptr) {
|
||||
writer->write_symbolID(vmSymbols::init_lock_name()); // name
|
||||
writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
|
||||
writer->write_objectID(init_lock);
|
||||
}
|
||||
}
|
||||
|
||||
// dump the raw values of the instance fields of the given object
|
||||
|
||||
@@ -60,6 +60,7 @@
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "sanitizers/ub.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/decoder.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
@@ -2086,9 +2087,7 @@ typedef void (*voidfun_t)();
|
||||
// compared to one generated with raise (asynchronous vs synchronous). See JDK-8065895.
|
||||
volatile int sigfpe_int = 0;
|
||||
|
||||
#if defined(__clang__) || defined(__GNUC__)
|
||||
__attribute__((no_sanitize("undefined")))
|
||||
#endif
|
||||
ATTRIBUTE_NO_UBSAN
|
||||
static void ALWAYSINLINE crash_with_sigfpe() {
|
||||
|
||||
// generate a native synchronous SIGFPE where possible;
|
||||
|
||||
@@ -222,7 +222,7 @@ import java.util.stream.Stream;
|
||||
* <pre>
|
||||
* MemoryLayout.structLayout(
|
||||
* ValueLayout.JAVA_INT.withName("x"),
|
||||
* MemoryLayout.paddingLayout(32),
|
||||
* MemoryLayout.paddingLayout(4),
|
||||
* ValueLayout.JAVA_LONG.withName("y")
|
||||
* );
|
||||
* </pre>
|
||||
|
||||
@@ -369,7 +369,7 @@ import jdk.internal.foreign.layout.UnionLayoutImpl;
|
||||
* int size = ...
|
||||
* MemorySegment points = ...
|
||||
* for (int i = 0 ; i < size ; i++) {
|
||||
* ... POINT_ARR_X.get(segment, 0L, (long)i) ...
|
||||
* ... POINT_ARR_X.get(points, 0L, (long)i) ...
|
||||
* }
|
||||
* }
|
||||
*
|
||||
|
||||
@@ -630,6 +630,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* {@snippet lang=java :
|
||||
* asSlice(offset, newSize, 1);
|
||||
* }
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @see #asSlice(long, long, long)
|
||||
*
|
||||
@@ -646,6 +649,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* Returns a slice of this memory segment, at the given offset, with the provided
|
||||
* alignment constraint. The returned segment's address is the address of this
|
||||
* segment plus the given offset; its size is specified by the given argument.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @param offset The new segment base offset (relative to the address of this segment),
|
||||
* specified in bytes
|
||||
@@ -670,6 +676,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* {@snippet lang=java :
|
||||
* asSlice(offset, layout.byteSize(), layout.byteAlignment());
|
||||
* }
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @see #asSlice(long, long, long)
|
||||
*
|
||||
@@ -693,6 +702,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* {@snippet lang=java :
|
||||
* asSlice(offset, byteSize() - offset);
|
||||
* }
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @see #asSlice(long, long)
|
||||
*
|
||||
@@ -706,6 +718,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
/**
|
||||
* Returns a new memory segment that has the same address and scope as this segment,
|
||||
* but with the provided size.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @param newSize the size of the returned segment
|
||||
* @return a new memory segment that has the same address and scope as
|
||||
@@ -741,6 +756,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* That is, the cleanup action receives a segment that is associated with the global
|
||||
* scope, and is accessible from any thread. The size of the segment accepted by the
|
||||
* cleanup action is {@link #byteSize()}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @apiNote The cleanup action (if present) should take care not to leak the received
|
||||
* segment to external clients that might access the segment after its
|
||||
@@ -786,6 +804,9 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* That is, the cleanup action receives a segment that is associated with the global
|
||||
* scope, and is accessible from any thread. The size of the segment accepted by the
|
||||
* cleanup action is {@code newSize}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @apiNote The cleanup action (if present) should take care not to leak the received
|
||||
* segment to external clients that might access the segment after its
|
||||
|
||||
@@ -39,7 +39,6 @@ import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import jdk.internal.constant.ConstantUtils;
|
||||
import jdk.internal.constant.MethodTypeDescImpl;
|
||||
import jdk.internal.constant.ReferenceClassDescImpl;
|
||||
import sun.security.action.GetBooleanAction;
|
||||
@@ -47,10 +46,7 @@ import sun.security.action.GetBooleanAction;
|
||||
import static java.lang.classfile.ClassFile.*;
|
||||
import java.lang.classfile.attribute.StackMapFrameInfo;
|
||||
import java.lang.classfile.attribute.StackMapTableAttribute;
|
||||
import java.lang.constant.ConstantDescs;
|
||||
import static java.lang.constant.ConstantDescs.*;
|
||||
import java.lang.constant.DirectMethodHandleDesc;
|
||||
import java.lang.constant.DynamicConstantDesc;
|
||||
|
||||
/**
|
||||
* ProxyGenerator contains the code to generate a dynamic proxy class
|
||||
@@ -65,7 +61,10 @@ final class ProxyGenerator {
|
||||
ClassFile.of(ClassFile.StackMapsOption.DROP_STACK_MAPS);
|
||||
|
||||
private static final ClassDesc
|
||||
CD_ClassLoader = ReferenceClassDescImpl.ofValidated("Ljava/lang/ClassLoader;"),
|
||||
CD_Class_array = ReferenceClassDescImpl.ofValidated("[Ljava/lang/Class;"),
|
||||
CD_ClassNotFoundException = ReferenceClassDescImpl.ofValidated("Ljava/lang/ClassNotFoundException;"),
|
||||
CD_NoClassDefFoundError = ReferenceClassDescImpl.ofValidated("Ljava/lang/NoClassDefFoundError;"),
|
||||
CD_IllegalAccessException = ReferenceClassDescImpl.ofValidated("Ljava/lang/IllegalAccessException;"),
|
||||
CD_InvocationHandler = ReferenceClassDescImpl.ofValidated("Ljava/lang/reflect/InvocationHandler;"),
|
||||
CD_Method = ReferenceClassDescImpl.ofValidated("Ljava/lang/reflect/Method;"),
|
||||
@@ -81,8 +80,9 @@ final class ProxyGenerator {
|
||||
MTD_void_String = MethodTypeDescImpl.ofValidated(CD_void, CD_String),
|
||||
MTD_void_Throwable = MethodTypeDescImpl.ofValidated(CD_void, CD_Throwable),
|
||||
MTD_Class = MethodTypeDescImpl.ofValidated(CD_Class),
|
||||
MTD_Class_array = MethodTypeDescImpl.ofValidated(CD_Class_array),
|
||||
MTD_Method_String_Class_array = MethodTypeDescImpl.ofValidated(CD_Method, ConstantDescs.CD_String, CD_Class_array),
|
||||
MTD_Class_String_boolean_ClassLoader = MethodTypeDescImpl.ofValidated(CD_Class, CD_String, CD_boolean, CD_ClassLoader),
|
||||
MTD_ClassLoader = MethodTypeDescImpl.ofValidated(CD_ClassLoader),
|
||||
MTD_Method_String_Class_array = MethodTypeDescImpl.ofValidated(CD_Method, CD_String, CD_Class_array),
|
||||
MTD_MethodHandles$Lookup = MethodTypeDescImpl.ofValidated(CD_MethodHandles_Lookup),
|
||||
MTD_MethodHandles$Lookup_MethodHandles$Lookup = MethodTypeDescImpl.ofValidated(CD_MethodHandles_Lookup, CD_MethodHandles_Lookup),
|
||||
MTD_Object_Object_Method_ObjectArray = MethodTypeDescImpl.ofValidated(CD_Object, CD_Object, CD_Method, CD_Object_array),
|
||||
@@ -107,34 +107,33 @@ final class ProxyGenerator {
|
||||
"jdk.proxy.ProxyGenerator.saveGeneratedFiles"));
|
||||
|
||||
/* Preloaded ProxyMethod objects for methods in java.lang.Object */
|
||||
private static final ProxyMethod HASH_CODE_METHOD;
|
||||
private static final ProxyMethod EQUALS_METHOD;
|
||||
private static final ProxyMethod TO_STRING_METHOD;
|
||||
private static final Method OBJECT_HASH_CODE_METHOD;
|
||||
private static final Method OBJECT_EQUALS_METHOD;
|
||||
private static final Method OBJECT_TO_STRING_METHOD;
|
||||
|
||||
static {
|
||||
try {
|
||||
HASH_CODE_METHOD = new ProxyMethod(Object.class.getMethod("hashCode"));
|
||||
EQUALS_METHOD = new ProxyMethod(Object.class.getMethod("equals", Object.class));
|
||||
TO_STRING_METHOD = new ProxyMethod(Object.class.getMethod("toString"));
|
||||
OBJECT_HASH_CODE_METHOD = Object.class.getMethod("hashCode");
|
||||
OBJECT_EQUALS_METHOD = Object.class.getMethod("equals", Object.class);
|
||||
OBJECT_TO_STRING_METHOD = Object.class.getMethod("toString");
|
||||
} catch (NoSuchMethodException e) {
|
||||
throw new NoSuchMethodError(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private final ConstantPoolBuilder cp;
|
||||
private final List<StackMapFrameInfo.VerificationTypeInfo> throwableStack;
|
||||
private final List<StackMapFrameInfo.VerificationTypeInfo> classLoaderLocal, throwableStack;
|
||||
private final NameAndTypeEntry exInit;
|
||||
private final ClassEntry object, proxy, ute;
|
||||
private final ClassEntry objectCE, proxyCE, uteCE, classCE;
|
||||
private final FieldRefEntry handlerField;
|
||||
private final InterfaceMethodRefEntry invoke;
|
||||
private final MethodRefEntry uteInit;
|
||||
private final DirectMethodHandleDesc bsm;
|
||||
private final InterfaceMethodRefEntry invocationHandlerInvoke;
|
||||
private final MethodRefEntry uteInit, classGetMethod, classForName, throwableGetMessage;
|
||||
|
||||
|
||||
/**
|
||||
* Name of proxy class
|
||||
* ClassEntry for this proxy class
|
||||
*/
|
||||
private ClassEntry classEntry;
|
||||
private final ClassEntry thisClassCE;
|
||||
|
||||
/**
|
||||
* Proxy interfaces
|
||||
@@ -153,6 +152,12 @@ final class ProxyGenerator {
|
||||
*/
|
||||
private final Map<String, List<ProxyMethod>> proxyMethods = new LinkedHashMap<>();
|
||||
|
||||
/**
|
||||
* Ordinal of next ProxyMethod object added to proxyMethods.
|
||||
* Indexes are reserved for hashcode(0), equals(1), toString(2).
|
||||
*/
|
||||
private int proxyMethodCount = 3;
|
||||
|
||||
/**
|
||||
* Construct a ProxyGenerator to generate a proxy class with the
|
||||
* specified name and for the given interfaces.
|
||||
@@ -163,18 +168,23 @@ final class ProxyGenerator {
|
||||
private ProxyGenerator(ClassLoader loader, String className, List<Class<?>> interfaces,
|
||||
int accessFlags) {
|
||||
this.cp = ConstantPoolBuilder.of();
|
||||
this.classEntry = cp.classEntry(ReferenceClassDescImpl.ofValidatedBinaryName(className));
|
||||
this.thisClassCE = cp.classEntry(ReferenceClassDescImpl.ofValidatedBinaryName(className));
|
||||
this.interfaces = interfaces;
|
||||
this.accessFlags = accessFlags;
|
||||
this.throwableStack = List.of(StackMapFrameInfo.ObjectVerificationTypeInfo.of(cp.classEntry(CD_Throwable)));
|
||||
var throwable = cp.classEntry(CD_Throwable);
|
||||
this.classLoaderLocal = List.of(StackMapFrameInfo.ObjectVerificationTypeInfo.of(cp.classEntry(CD_ClassLoader)));
|
||||
this.throwableStack = List.of(StackMapFrameInfo.ObjectVerificationTypeInfo.of(throwable));
|
||||
this.exInit = cp.nameAndTypeEntry(INIT_NAME, MTD_void_String);
|
||||
this.object = cp.classEntry(CD_Object);
|
||||
this.proxy = cp.classEntry(CD_Proxy);
|
||||
this.handlerField = cp.fieldRefEntry(proxy, cp.nameAndTypeEntry(NAME_HANDLER_FIELD, CD_InvocationHandler));
|
||||
this.invoke = cp.interfaceMethodRefEntry(CD_InvocationHandler, "invoke", MTD_Object_Object_Method_ObjectArray);
|
||||
this.ute = cp.classEntry(CD_UndeclaredThrowableException);
|
||||
this.uteInit = cp.methodRefEntry(ute, cp.nameAndTypeEntry(INIT_NAME, MTD_void_Throwable));
|
||||
this.bsm = ConstantDescs.ofConstantBootstrap(classEntry.asSymbol(), "$getMethod", CD_Method, CD_Class, CD_String, CD_MethodType);
|
||||
this.objectCE = cp.classEntry(CD_Object);
|
||||
this.proxyCE = cp.classEntry(CD_Proxy);
|
||||
this.classCE = cp.classEntry(CD_Class);
|
||||
this.handlerField = cp.fieldRefEntry(proxyCE, cp.nameAndTypeEntry(NAME_HANDLER_FIELD, CD_InvocationHandler));
|
||||
this.invocationHandlerInvoke = cp.interfaceMethodRefEntry(CD_InvocationHandler, "invoke", MTD_Object_Object_Method_ObjectArray);
|
||||
this.uteCE = cp.classEntry(CD_UndeclaredThrowableException);
|
||||
this.uteInit = cp.methodRefEntry(uteCE, cp.nameAndTypeEntry(INIT_NAME, MTD_void_Throwable));
|
||||
this.classGetMethod = cp.methodRefEntry(classCE, cp.nameAndTypeEntry("getMethod", MTD_Method_String_Class_array));
|
||||
this.classForName = cp.methodRefEntry(classCE, cp.nameAndTypeEntry("forName", MTD_Class_String_boolean_ClassLoader));
|
||||
this.throwableGetMessage = cp.methodRefEntry(throwable, cp.nameAndTypeEntry("getMessage", MTD_String));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -441,9 +451,9 @@ final class ProxyGenerator {
|
||||
* java.lang.Object take precedence over duplicate methods in the
|
||||
* proxy interfaces.
|
||||
*/
|
||||
addProxyMethod(HASH_CODE_METHOD);
|
||||
addProxyMethod(EQUALS_METHOD);
|
||||
addProxyMethod(TO_STRING_METHOD);
|
||||
addProxyMethod(new ProxyMethod(OBJECT_HASH_CODE_METHOD, "m0"));
|
||||
addProxyMethod(new ProxyMethod(OBJECT_EQUALS_METHOD, "m1"));
|
||||
addProxyMethod(new ProxyMethod(OBJECT_TO_STRING_METHOD, "m2"));
|
||||
|
||||
/*
|
||||
* Accumulate all of the methods from the proxy interfaces.
|
||||
@@ -464,20 +474,23 @@ final class ProxyGenerator {
|
||||
checkReturnTypes(sigmethods);
|
||||
}
|
||||
|
||||
return CF_CONTEXT.build(classEntry, cp, clb -> {
|
||||
clb.withSuperclass(proxy);
|
||||
return CF_CONTEXT.build(thisClassCE, cp, clb -> {
|
||||
clb.withSuperclass(proxyCE);
|
||||
clb.withFlags(accessFlags);
|
||||
clb.withInterfaces(toClassEntries(cp, interfaces));
|
||||
generateConstructor(clb);
|
||||
|
||||
for (List<ProxyMethod> sigmethods : proxyMethods.values()) {
|
||||
for (ProxyMethod pm : sigmethods) {
|
||||
// add static field for the Method object
|
||||
clb.withField(pm.methodFieldName, CD_Method, ACC_PRIVATE | ACC_STATIC | ACC_FINAL);
|
||||
|
||||
// Generate code for proxy method
|
||||
pm.generateMethod(this, clb);
|
||||
pm.generateMethod(clb);
|
||||
}
|
||||
}
|
||||
|
||||
generateBootstrapMethod(clb);
|
||||
generateStaticInitializer(clb);
|
||||
generateLookupAccessor(clb);
|
||||
});
|
||||
}
|
||||
@@ -520,7 +533,7 @@ final class ProxyGenerator {
|
||||
}
|
||||
}
|
||||
sigmethods.add(new ProxyMethod(m, sig, m.getSharedParameterTypes(), returnType,
|
||||
exceptionTypes, fromClass));
|
||||
exceptionTypes, fromClass, "m" + proxyMethodCount++));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -542,32 +555,56 @@ final class ProxyGenerator {
|
||||
clb.withMethodBody(INIT_NAME, MTD_void_InvocationHandler, ACC_PUBLIC, cob -> cob
|
||||
.aload(0)
|
||||
.aload(1)
|
||||
.invokespecial(cp.methodRefEntry(proxy, cp.nameAndTypeEntry(INIT_NAME, MTD_void_InvocationHandler)))
|
||||
.invokespecial(cp.methodRefEntry(proxyCE,
|
||||
cp.nameAndTypeEntry(INIT_NAME, MTD_void_InvocationHandler)))
|
||||
.return_());
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate CONDY bootstrap method for the proxy class to retrieve {@link Method} instances.
|
||||
* Generate the class initializer.
|
||||
* Discussion: Currently, for Proxy to work with SecurityManager,
|
||||
* we rely on the parameter classes of the methods to be computed
|
||||
* from Proxy instead of via user code paths like bootstrap method
|
||||
* lazy evaluation. That might change if we can pass in the live
|
||||
* Method objects directly..
|
||||
*/
|
||||
private void generateBootstrapMethod(ClassBuilder clb) {
|
||||
clb.withMethodBody(bsm.methodName(), bsm.invocationType(), ClassFile.ACC_PRIVATE | ClassFile.ACC_STATIC, cob -> {
|
||||
cob.aload(3) //interface Class
|
||||
.aload(4) //interface method name String
|
||||
.aload(5) //interface MethodType
|
||||
.invokevirtual(CD_MethodType, "parameterArray", MTD_Class_array)
|
||||
.invokevirtual(ConstantDescs.CD_Class, "getMethod", MTD_Method_String_Class_array)
|
||||
.areturn();
|
||||
Label failLabel = cob.newBoundLabel();
|
||||
ClassEntry nsme = cp.classEntry(CD_NoSuchMethodError);
|
||||
cob.exceptionCatch(cob.startLabel(), failLabel, failLabel, CD_NoSuchMethodException)
|
||||
.new_(nsme)
|
||||
private void generateStaticInitializer(ClassBuilder clb) {
|
||||
clb.withMethodBody(CLASS_INIT_NAME, MTD_void, ACC_STATIC, cob -> {
|
||||
// Put ClassLoader at local variable index 0, used by
|
||||
// Class.forName(String, boolean, ClassLoader) calls
|
||||
cob.ldc(thisClassCE)
|
||||
.invokevirtual(cp.methodRefEntry(classCE,
|
||||
cp.nameAndTypeEntry("getClassLoader", MTD_ClassLoader)))
|
||||
.astore(0);
|
||||
var ts = cob.newBoundLabel();
|
||||
for (List<ProxyMethod> sigmethods : proxyMethods.values()) {
|
||||
for (ProxyMethod pm : sigmethods) {
|
||||
pm.codeFieldInitialization(cob);
|
||||
}
|
||||
}
|
||||
cob.return_();
|
||||
var c1 = cob.newBoundLabel();
|
||||
var nsmError = cp.classEntry(CD_NoSuchMethodError);
|
||||
cob.exceptionCatch(ts, c1, c1, CD_NoSuchMethodException)
|
||||
.new_(nsmError)
|
||||
.dup_x1()
|
||||
.swap()
|
||||
.invokevirtual(cp.methodRefEntry(CD_Throwable, "getMessage", MTD_String))
|
||||
.invokespecial(cp.methodRefEntry(nsme, exInit))
|
||||
.athrow()
|
||||
.with(StackMapTableAttribute.of(List.of(
|
||||
StackMapFrameInfo.of(failLabel, List.of(), throwableStack))));
|
||||
.invokevirtual(throwableGetMessage)
|
||||
.invokespecial(cp.methodRefEntry(nsmError, exInit))
|
||||
.athrow();
|
||||
var c2 = cob.newBoundLabel();
|
||||
var ncdfError = cp.classEntry(CD_NoClassDefFoundError);
|
||||
cob.exceptionCatch(ts, c1, c2, CD_ClassNotFoundException)
|
||||
.new_(ncdfError)
|
||||
.dup_x1()
|
||||
.swap()
|
||||
.invokevirtual(throwableGetMessage)
|
||||
.invokespecial(cp.methodRefEntry(ncdfError, exInit))
|
||||
.athrow();
|
||||
cob.with(StackMapTableAttribute.of(List.of(
|
||||
StackMapFrameInfo.of(c1, classLoaderLocal, throwableStack),
|
||||
StackMapFrameInfo.of(c2, classLoaderLocal, throwableStack))));
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
@@ -587,7 +624,7 @@ final class ProxyGenerator {
|
||||
ClassEntry iae = cp.classEntry(CD_IllegalAccessException);
|
||||
cob.aload(cob.parameterSlot(0))
|
||||
.invokevirtual(cp.methodRefEntry(mhl, cp.nameAndTypeEntry("lookupClass", MTD_Class)))
|
||||
.ldc(proxy)
|
||||
.ldc(proxyCE)
|
||||
.if_acmpne(failLabel)
|
||||
.aload(cob.parameterSlot(0))
|
||||
.invokevirtual(cp.methodRefEntry(mhl, cp.nameAndTypeEntry("hasFullPrivilegeAccess", MTD_boolean)))
|
||||
@@ -613,24 +650,29 @@ final class ProxyGenerator {
|
||||
* being generated: a method whose implementation will encode and
|
||||
* dispatch invocations to the proxy instance's invocation handler.
|
||||
*/
|
||||
private static class ProxyMethod {
|
||||
private class ProxyMethod {
|
||||
|
||||
private final Method method;
|
||||
private final String shortSignature;
|
||||
private final Class<?> fromClass;
|
||||
private final Class<?>[] parameterTypes;
|
||||
private final Class<?> returnType;
|
||||
private final String methodFieldName;
|
||||
private Class<?>[] exceptionTypes;
|
||||
private final FieldRefEntry methodField;
|
||||
|
||||
private ProxyMethod(Method method, String sig, Class<?>[] parameterTypes,
|
||||
Class<?> returnType, Class<?>[] exceptionTypes,
|
||||
Class<?> fromClass) {
|
||||
Class<?> fromClass, String methodFieldName) {
|
||||
this.method = method;
|
||||
this.shortSignature = sig;
|
||||
this.parameterTypes = parameterTypes;
|
||||
this.returnType = returnType;
|
||||
this.exceptionTypes = exceptionTypes;
|
||||
this.fromClass = fromClass;
|
||||
this.methodFieldName = methodFieldName;
|
||||
this.methodField = cp.fieldRefEntry(thisClassCE,
|
||||
cp.nameAndTypeEntry(methodFieldName, CD_Method));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -639,17 +681,16 @@ final class ProxyGenerator {
|
||||
* @param method The method for which to create a proxy
|
||||
* @param methodFieldName the fieldName to generate
|
||||
*/
|
||||
private ProxyMethod(Method method) {
|
||||
private ProxyMethod(Method method, String methodFieldName) {
|
||||
this(method, method.toShortSignature(),
|
||||
method.getSharedParameterTypes(), method.getReturnType(),
|
||||
method.getSharedExceptionTypes(), method.getDeclaringClass());
|
||||
method.getSharedExceptionTypes(), method.getDeclaringClass(), methodFieldName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate this method, including the code and exception table entry.
|
||||
*/
|
||||
private void generateMethod(ProxyGenerator pg, ClassBuilder clb) {
|
||||
var cp = pg.cp;
|
||||
private void generateMethod(ClassBuilder clb) {
|
||||
var pTypes = new ClassDesc[parameterTypes.length];
|
||||
for (int i = 0; i < pTypes.length; i++) {
|
||||
pTypes[i] = toClassDesc(parameterTypes[i]);
|
||||
@@ -661,17 +702,14 @@ final class ProxyGenerator {
|
||||
clb.withMethod(method.getName(), desc, accessFlags, mb ->
|
||||
mb.with(ExceptionsAttribute.of(toClassEntries(cp, List.of(exceptionTypes))))
|
||||
.withCode(cob -> {
|
||||
cob.aload(0)
|
||||
.getfield(pg.handlerField)
|
||||
.aload(0)
|
||||
.ldc(DynamicConstantDesc.of(pg.bsm,
|
||||
toClassDesc(fromClass),
|
||||
method.getName(),
|
||||
desc));
|
||||
cob.aload(cob.receiverSlot())
|
||||
.getfield(handlerField)
|
||||
.aload(cob.receiverSlot())
|
||||
.getstatic(methodField);
|
||||
if (parameterTypes.length > 0) {
|
||||
// Create an array and fill with the parameters converting primitives to wrappers
|
||||
cob.loadConstant(parameterTypes.length)
|
||||
.anewarray(pg.object);
|
||||
.anewarray(objectCE);
|
||||
for (int i = 0; i < parameterTypes.length; i++) {
|
||||
cob.dup()
|
||||
.loadConstant(i);
|
||||
@@ -682,7 +720,7 @@ final class ProxyGenerator {
|
||||
cob.aconst_null();
|
||||
}
|
||||
|
||||
cob.invokeinterface(pg.invoke);
|
||||
cob.invokeinterface(invocationHandlerInvoke);
|
||||
|
||||
if (returnType == void.class) {
|
||||
cob.pop()
|
||||
@@ -698,14 +736,14 @@ final class ProxyGenerator {
|
||||
cob.athrow(); // just rethrow the exception
|
||||
var c2 = cob.newBoundLabel();
|
||||
cob.exceptionCatchAll(cob.startLabel(), c1, c2)
|
||||
.new_(pg.ute)
|
||||
.new_(uteCE)
|
||||
.dup_x1()
|
||||
.swap()
|
||||
.invokespecial(pg.uteInit)
|
||||
.invokespecial(uteInit)
|
||||
.athrow()
|
||||
.with(StackMapTableAttribute.of(List.of(
|
||||
StackMapFrameInfo.of(c1, List.of(), pg.throwableStack),
|
||||
StackMapFrameInfo.of(c2, List.of(), pg.throwableStack))));
|
||||
StackMapFrameInfo.of(c1, List.of(), throwableStack),
|
||||
StackMapFrameInfo.of(c2, List.of(), throwableStack))));
|
||||
}
|
||||
}));
|
||||
}
|
||||
@@ -720,7 +758,7 @@ final class ProxyGenerator {
|
||||
if (type.isPrimitive()) {
|
||||
cob.loadLocal(TypeKind.from(type).asLoadable(), slot);
|
||||
PrimitiveTypeInfo prim = PrimitiveTypeInfo.get(type);
|
||||
cob.invokestatic(prim.wrapperMethodRef(cob.constantPool()));
|
||||
cob.invokestatic(prim.wrapperMethodRef(cp));
|
||||
} else {
|
||||
cob.aload(slot);
|
||||
}
|
||||
@@ -736,7 +774,7 @@ final class ProxyGenerator {
|
||||
PrimitiveTypeInfo prim = PrimitiveTypeInfo.get(type);
|
||||
|
||||
cob.checkcast(prim.wrapperClass)
|
||||
.invokevirtual(prim.unwrapMethodRef(cob.constantPool()))
|
||||
.invokevirtual(prim.unwrapMethodRef(cp))
|
||||
.return_(TypeKind.from(type).asLoadable());
|
||||
} else {
|
||||
cob.checkcast(toClassDesc(type))
|
||||
@@ -744,6 +782,57 @@ final class ProxyGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate code for initializing the static field that stores
|
||||
* the Method object for this proxy method. A class loader is
|
||||
* anticipated at local variable index 0.
|
||||
* The generated code must be run in an AccessController.doPrivileged
|
||||
* block if a SecurityManager is present, as otherwise the code
|
||||
* cannot pass {@code null} ClassLoader to forName.
|
||||
*/
|
||||
private void codeFieldInitialization(CodeBuilder cob) {
|
||||
var cp = cob.constantPool();
|
||||
codeClassForName(cob, fromClass);
|
||||
|
||||
cob.ldc(method.getName())
|
||||
.loadConstant(parameterTypes.length)
|
||||
.anewarray(classCE);
|
||||
|
||||
// Construct an array with the parameter types mapping primitives to Wrapper types
|
||||
for (int i = 0; i < parameterTypes.length; i++) {
|
||||
cob.dup()
|
||||
.loadConstant(i);
|
||||
if (parameterTypes[i].isPrimitive()) {
|
||||
PrimitiveTypeInfo prim = PrimitiveTypeInfo.get(parameterTypes[i]);
|
||||
cob.getstatic(prim.typeFieldRef(cp));
|
||||
} else {
|
||||
codeClassForName(cob, parameterTypes[i]);
|
||||
}
|
||||
cob.aastore();
|
||||
}
|
||||
// lookup the method
|
||||
cob.invokevirtual(classGetMethod)
|
||||
.putstatic(methodField);
|
||||
}
|
||||
|
||||
/*
|
||||
* =============== Code Generation Utility Methods ===============
|
||||
*/
|
||||
|
||||
/**
|
||||
* Generate code to invoke the Class.forName with the name of the given
|
||||
* class to get its Class object at runtime. The code is written to
|
||||
* the supplied stream. Note that the code generated by this method
|
||||
* may cause the checked ClassNotFoundException to be thrown. A class
|
||||
* loader is anticipated at local variable index 0.
|
||||
*/
|
||||
private void codeClassForName(CodeBuilder cob, Class<?> cl) {
|
||||
cob.ldc(cl.getName())
|
||||
.iconst_0() // false
|
||||
.aload(0)// classLoader
|
||||
.invokestatic(classForName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return method.toShortString();
|
||||
@@ -810,5 +899,9 @@ final class ProxyGenerator {
|
||||
public MethodRefEntry unwrapMethodRef(ConstantPoolBuilder cp) {
|
||||
return cp.methodRefEntry(wrapperClass, unwrapMethodName, unwrapMethodType);
|
||||
}
|
||||
|
||||
public FieldRefEntry typeFieldRef(ConstantPoolBuilder cp) {
|
||||
return cp.fieldRefEntry(wrapperClass, "TYPE", CD_Class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -253,6 +253,15 @@ to determine the proxy that should be used for connecting to a given URI.</P>
|
||||
</OL>
|
||||
<P>The channel binding tokens generated are of the type "tls-server-end-point" as defined in
|
||||
RFC 5929.</P>
|
||||
|
||||
<LI><P><B>{@systemProperty jdk.http.maxHeaderSize}</B> (default: 393216 or 384kB)<BR>
|
||||
This is the maximum header field section size that a client is prepared to accept.
|
||||
This is computed as the sum of the size of the uncompressed header name, plus
|
||||
the size of the uncompressed header value, plus an overhead of 32 bytes for
|
||||
each field section line. If a peer sends a field section that exceeds this
|
||||
size a {@link java.net.ProtocolException ProtocolException} will be raised.
|
||||
This applies to all versions of the HTTP protocol. A value of zero or a negative
|
||||
value means no limit. If left unspecified, the default value is 393216 bytes.
|
||||
</UL>
|
||||
<P>All these properties are checked only once at startup.</P>
|
||||
<a id="AddressCache"></a>
|
||||
|
||||
@@ -41,6 +41,7 @@ package java.text;
|
||||
import java.io.IOException;
|
||||
import java.io.InvalidObjectException;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectStreamException;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
@@ -1172,6 +1173,8 @@ public class MessageFormat extends Format {
|
||||
maximumArgumentNumber = argumentNumbers[i];
|
||||
}
|
||||
}
|
||||
|
||||
// Constructors/applyPattern ensure that resultArray.length < MAX_ARGUMENT_INDEX
|
||||
Object[] resultArray = new Object[maximumArgumentNumber + 1];
|
||||
|
||||
int patternOffset = 0;
|
||||
@@ -1450,6 +1453,9 @@ public class MessageFormat extends Format {
|
||||
* @serial
|
||||
*/
|
||||
private int[] argumentNumbers = new int[INITIAL_FORMATS];
|
||||
// Implementation limit for ArgumentIndex pattern element. Valid indices must
|
||||
// be less than this value
|
||||
private static final int MAX_ARGUMENT_INDEX = 10000;
|
||||
|
||||
/**
|
||||
* One less than the number of entries in {@code offsets}. Can also be thought of
|
||||
@@ -1630,6 +1636,11 @@ public class MessageFormat extends Format {
|
||||
+ argumentNumber);
|
||||
}
|
||||
|
||||
if (argumentNumber >= MAX_ARGUMENT_INDEX) {
|
||||
throw new IllegalArgumentException(
|
||||
argumentNumber + " exceeds the ArgumentIndex implementation limit");
|
||||
}
|
||||
|
||||
// resize format information arrays if necessary
|
||||
if (offsetNumber >= formats.length) {
|
||||
int newLength = formats.length * 2;
|
||||
@@ -1997,24 +2008,53 @@ public class MessageFormat extends Format {
|
||||
*/
|
||||
@java.io.Serial
|
||||
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
|
||||
in.defaultReadObject();
|
||||
boolean isValid = maxOffset >= -1
|
||||
&& formats.length > maxOffset
|
||||
&& offsets.length > maxOffset
|
||||
&& argumentNumbers.length > maxOffset;
|
||||
ObjectInputStream.GetField fields = in.readFields();
|
||||
if (fields.defaulted("argumentNumbers") || fields.defaulted("offsets")
|
||||
|| fields.defaulted("formats") || fields.defaulted("locale")
|
||||
|| fields.defaulted("pattern") || fields.defaulted("maxOffset")){
|
||||
throw new InvalidObjectException("Stream has missing data");
|
||||
}
|
||||
|
||||
locale = (Locale) fields.get("locale", null);
|
||||
String patt = (String) fields.get("pattern", null);
|
||||
int maxOff = fields.get("maxOffset", -2);
|
||||
int[] argNums = ((int[]) fields.get("argumentNumbers", null)).clone();
|
||||
int[] offs = ((int[]) fields.get("offsets", null)).clone();
|
||||
Format[] fmts = ((Format[]) fields.get("formats", null)).clone();
|
||||
|
||||
// Check arrays/maxOffset have correct value/length
|
||||
boolean isValid = maxOff >= -1 && argNums.length > maxOff
|
||||
&& offs.length > maxOff && fmts.length > maxOff;
|
||||
|
||||
// Check the correctness of arguments and offsets
|
||||
if (isValid) {
|
||||
int lastOffset = pattern.length() + 1;
|
||||
for (int i = maxOffset; i >= 0; --i) {
|
||||
if ((offsets[i] < 0) || (offsets[i] > lastOffset)) {
|
||||
int lastOffset = patt.length() + 1;
|
||||
for (int i = maxOff; i >= 0; --i) {
|
||||
if (argNums[i] < 0 || argNums[i] >= MAX_ARGUMENT_INDEX
|
||||
|| offs[i] < 0 || offs[i] > lastOffset) {
|
||||
isValid = false;
|
||||
break;
|
||||
} else {
|
||||
lastOffset = offsets[i];
|
||||
lastOffset = offs[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!isValid) {
|
||||
throw new InvalidObjectException("Could not reconstruct MessageFormat from corrupt stream.");
|
||||
throw new InvalidObjectException("Stream has invalid data");
|
||||
}
|
||||
maxOffset = maxOff;
|
||||
pattern = patt;
|
||||
offsets = offs;
|
||||
formats = fmts;
|
||||
argumentNumbers = argNums;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialization without data not supported for this class.
|
||||
*/
|
||||
@java.io.Serial
|
||||
private void readObjectNoData() throws ObjectStreamException {
|
||||
throw new InvalidObjectException("Deserialized MessageFormat objects need data");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -293,7 +293,7 @@ public interface Gatherer<T, A, R> {
|
||||
*
|
||||
* @implSpec This method always returns the same instance.
|
||||
*
|
||||
* @see Gatherer#finisher()
|
||||
* @see Gatherer#combiner()
|
||||
* @return the instance of the default combiner
|
||||
* @param <A> the type of the state of the returned combiner
|
||||
*/
|
||||
|
||||
@@ -1047,7 +1047,7 @@ public final class StackMapGenerator {
|
||||
void setLocalsFromArg(String name, MethodTypeDesc methodDesc, boolean isStatic, Type thisKlass) {
|
||||
int localsSize = 0;
|
||||
// Pre-emptively create a locals array that encompass all parameter slots
|
||||
checkLocal(methodDesc.parameterCount() + (isStatic ? 0 : -1));
|
||||
checkLocal(methodDesc.parameterCount() + (isStatic ? -1 : 0));
|
||||
if (!isStatic) {
|
||||
localsSize++;
|
||||
if (OBJECT_INITIALIZER_NAME.equals(name) && !CD_Object.equals(thisKlass.sym)) {
|
||||
|
||||
@@ -165,6 +165,7 @@ module java.base {
|
||||
java.desktop,
|
||||
java.logging,
|
||||
java.management,
|
||||
java.management.rmi,
|
||||
java.naming,
|
||||
java.rmi,
|
||||
jdk.charsets,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,7 +35,7 @@ native Agent Library <libname>. Beispiel: -agentlib:jdwp\n sieh
|
||||
|
||||
# Translators please note do not translate the options themselves
|
||||
java.launcher.X.usage=\n -Xbatch Deaktiviert die Hintergrundkompilierung\n -Xbootclasspath/a:<durch {0} getrennte Verzeichnisse und ZIP-/JAR-Dateien>\n An das Ende des Bootstrap Classpaths anhängen\n -Xcheck:jni Führt zusätzliche Prüfungen für JNI-Funktionen aus\n -Xcomp Erzwingt die Kompilierung von Methoden beim ersten Aufruf\n -Xdebug Führt keine Aktion aus. Ist veraltet und wird in einem zukünftigen Release entfernt.\n -Xdiag Zeigt zusätzliche Diagnosemeldungen an\n -Xfuture Aktiviert strengste Prüfungen, als möglicher zukünftiger Standardwert erwartet.\n Diese Option ist veraltet und kann in einem\n zukünftigen Release entfernt werden.\n -Xint Nur Ausführung im interpretierten Modus\n -Xinternalversion\n Zeigt detailliertere JVM-Versionsinformationen an als die\n Option -version\n -Xlog:<Optionen> Konfiguriert oder aktiviert Logging mit dem einheitlichen Java Virtual\n Machine-(JVM-)Logging-Framework. Verwenden Sie -Xlog:help\n für weitere Einzelheiten.\n -Xloggc:<Datei> Protokolliert den GC-Status in einer Datei mit Zeitstempeln.\n Diese Option ist veraltet und kann in einem\n zukünftigen Release entfernt werden. Wird durch -Xlog:gc:<Datei> ersetzt.\n -Xmixed Ausführung im gemischten Modus (Standard)\n -Xmn<Größe> Legt die anfängliche und maximale Größe (in Byte) des Heaps\n für die Young Generation (Nursery) fest\n -Xms<Größe> Legt die anfängliche Java-Heap-Größe fest\n -Xmx<Größe> Legt die maximale Java-Heap-Größe fest\n -Xnoclassgc Deaktiviert die Klassen-Garbage Collection\n -Xrs Reduziert die Verwendung von BS-Signalen durch Java/VM (siehe Dokumentation)\n -Xshare:auto Verwendet freigegebene Klassendaten, wenn möglich (Standard)\n -Xshare:off Versucht nicht, freigegebene Klassendaten zu verwenden\n -Xshare:on Erfordert die Verwendung freigegebener Klassendaten, verläuft sonst nicht erfolgreich.\n Diese Testoption kann zeitweise zu\n Fehlern führen. Sie darf nicht in Produktionsumgebungen verwendet werden.\n -XshowSettings Zeigt alle Einstellungen an und fährt fort\n -XshowSettings:all\n Zeigt alle Einstellungen als Verbose-Ausgabe an und fährt fort\n -XshowSettings:locale\n Zeigt alle gebietsschemabezogenen Einstellungen an und fährt fort\n -XshowSettings:properties\n Zeigt alle Eigenschaftseinstellungen an und fährt fort\n -XshowSettings:vm\n Zeigt alle VM-bezogenen Einstellungen an und fährt fort\n -XshowSettings:security\n Zeigt alle Sicherheitseinstellungen an und fährt fort\n -XshowSettings:security:all\n Zeigt alle Sicherheitseinstellungen an und fährt fort\n -XshowSettings:security:properties\n Zeigt Sicherheitseigenschaften an und fährt fort\n -XshowSettings:security:providers\n Zeigt statische Sicherheitsprovidereinstellungen an und fährt fort\n -XshowSettings:security:tls\n Zeigt TLS-bezogene Sicherheitseinstellungen an und fährt fort\n -XshowSettings:system\n (Nur Linux) Zeigt die Konfiguration des Hostsystems oder Containers an\n und fährt fort\n -Xss<Größe> Legt die Stackgröße des Java-Threads fest\n Die tatsächliche \
|
||||
Größe kann auf ein Vielfaches der\n Systemseitengröße aufgerundet werden, wenn für das Betriebssystem erforderlich.\n -Xverify Legt den Modus der Bytecodeverifizierung fest\n Beachten Sie, dass die Option -Xverify:none veraltet ist und\n in einem zukünftigen Release entfernt werden kann.\n --add-reads <Modul>=<Zielmodul>(,<Zielmodul>)*\n Aktualisiert <Modul>, damit <Zielmodul> gelesen wird, ungeachtet\n der Moduldeklaration. \n <Zielmodul> kann ALL-UNNAMED sein, um alle unbenannten\n Module zu lesen.\n --add-exports <Modul>/<Package>=<Zielmodul>(,<Zielmodul>)*\n Aktualisiert <Modul>, um <Package> in <Zielmodul> zu exportieren,\n ungeachtet der Moduldeklaration.\n <Zielmodul> kann ALL-UNNAMED sein, um in alle\n unbenannten Module zu exportieren.\n --add-opens <Modul>/<Package>=<Zielmodul>(,<Zielmodul>)*\n Aktualisiert <Modul>, um <Package> in\n <Zielmodul> zu öffnen, ungeachtet der Moduldeklaration.\n --limit-modules <Modulname>[,<Modulname>...]\n Grenzt die Gesamtmenge der beobachtbaren Module ein\n --patch-module <Modul>=<Datei>({0}<Datei>)*\n Überschreibt oder erweitert ein Modul mit Klassen und Ressourcen\n in JAR-Dateien oder Verzeichnissen.\n --source <Version>\n Legt die Version der Quelle im Quelldateimodus fest.\n --finalization=<Wert>\n Steuert, ob die JVM Objekte finalisiert.\n Dabei ist <Wert> entweder "enabled" oder "disabled".\n Die Finalisierung ist standardmäßig aktiviert.\n\nDiese zusätzlichen Optionen können jederzeit ohne vorherige Ankündigung geändert werden.\n
|
||||
Größe kann auf ein Vielfaches der\n Systemseitengröße aufgerundet werden, wenn für das Betriebssystem erforderlich.\n -Xverify Legt den Modus der Bytecodeverifizierung fest\n Beachten Sie, dass die Option -Xverify:none veraltet ist und\n in einem zukünftigen Release entfernt werden kann.\n --add-reads <Modul>=<Zielmodul>(,<Zielmodul>)*\n Aktualisiert <Modul>, damit <Zielmodul> gelesen wird, ungeachtet\n der Moduldeklaration. \n <Zielmodul> kann ALL-UNNAMED sein, um alle unbenannten\n Module zu lesen.\n --add-exports <Modul>/<Package>=<Zielmodul>(,<Zielmodul>)*\n Aktualisiert <Modul>, um <Package> in <Zielmodul> zu exportieren,\n ungeachtet der Moduldeklaration.\n <Zielmodul> kann ALL-UNNAMED sein, um in alle\n unbenannten Module zu exportieren.\n --add-opens <Modul>/<Package>=<Zielmodul>(,<Zielmodul>)*\n Aktualisiert <Modul>, um <Package> in\n <Zielmodul> zu öffnen, ungeachtet der Moduldeklaration.\n --limit-modules <Modulname>[,<Modulname>...]\n Grenzt die Gesamtmenge der beobachtbaren Module ein\n --patch-module <Modul>=<Datei>({0}<Datei>)*\n Überschreibt oder erweitert ein Modul mit Klassen und Ressourcen\n in JAR-Dateien oder Verzeichnissen.\n --source <Version>\n Legt die Version der Quelle im Quelldateimodus fest.\n --finalization=<Wert>\n Steuert, ob die JVM Objekte finalisiert.\n Dabei ist <Wert> entweder "enabled" oder "disabled".\n Die Finalisierung ist standardmäßig aktiviert.\n --sun-misc-unsafe-memory-access=<value>\n Verwendung der nicht unterstützten API sun.misc.Unsafe zulassen oder verweigern\n <value> ist "allow", "warn", "debug" oder "deny".\n Der Standardwert ist "allow".\n\nDiese zusätzlichen Optionen können jederzeit ohne vorherige Ankündigung geändert werden.\n
|
||||
|
||||
# Translators please note do not translate the options themselves
|
||||
java.launcher.X.macosx.usage=\nDie folgenden Optionen sind für macOS spezifisch:\n -XstartOnFirstThread\n Führt die main()-Methode für den ersten (AppKit-)Thread aus\n -Xdock:name=<Anwendungsname>\n Setzt den im Dock angezeigten Standardanwendungsnamen außer Kraft\n -Xdock:icon=<Pfad zu Symboldatei>\n Setzt das im Dock angezeigte Standardsymbol außer Kraft\n\n
|
||||
@@ -52,6 +52,7 @@ java.launcher.jar.error1=Fehler: Beim Versuch, Datei {0} zu öffnen, ist ein une
|
||||
java.launcher.jar.error2=Manifest in {0} nicht gefunden
|
||||
java.launcher.jar.error3=kein Hauptmanifestattribut, in {0}
|
||||
java.launcher.jar.error4=Fehler beim Laden des Java-Agents in {0}
|
||||
java.launcher.jar.error5=Fehler: Beim Versuch, Datei {0} zu schließen, ist ein unerwarteter Fehler aufgetreten
|
||||
java.launcher.jar.error.illegal.ena.value=Fehler: Ungültiger Wert "{0}" für das Manifestattribut "Enable-Native-Access". Nur ''ALL-UNNAMED'' ist zulässig
|
||||
java.launcher.init.error=Initialisierungsfehler
|
||||
java.launcher.javafx.error1=Fehler: Die JavaFX-Methode launchApplication hat die falsche Signatur, sie\nmuss als statisch deklariert werden und einen Wert vom Typ VOID zurückgeben
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -37,7 +37,7 @@ java.launcher.opt.footer = \ -cp <ディレクトリおよびzip/jarファイ
|
||||
# Translators please note do not translate the options themselves
|
||||
java.launcher.X.usage=\n -Xbatch バックグラウンド・コンパイルを無効にします\n -Xbootclasspath/a:<directories and zip/jar files separated by {0}>\n ブートストラップ・クラス・パスの最後に追加します\n -Xcheck:jni JNI関数に対する追加のチェックを実行します\n -Xcomp 初回呼出し時にメソッドのコンパイルを強制します\n -Xdebug 何も実行されません。将来のリリースで削除されるため、非推奨になりました。\n -Xdiag 追加の診断メッセージを表示します\n -Xfuture 将来のデフォルトを見越して、最も厳密なチェックを有効にします\n このオプションは非推奨であり、将来のリリースで削除される\n 可能性があります。\n -Xint インタプリタ・モードの実行のみ\n -Xinternalversion\n -versionオプションより詳細なJVMバージョン情報を\n 表示します\n -Xlog:<opts> Java Virtual Machine (JVM)統合ロギング・フレームワークでの\n ロギングを構成または有効化します。詳細は、-Xlog:helpを\n 使用してください。\n -Xloggc:<file> タイムスタンプが付いたファイルにGCステータスのログを記録します\n このオプションは非推奨であり、将来のリリースで削除される\n 可能性があります。-Xlog:gc:<file>で置換されています。\n -Xmixed 混合モードの実行(デフォルト)\n -Xmn<size> 若い世代(ナーサリ)のヒープの初期サイズおよび最大サイズ\n (バイト単位)を設定します\n -Xms<size> Javaの初期ヒープ・サイズを設定します\n -Xmx<size> Javaの最大ヒープ・サイズを設定します\n -Xnoclassgc クラスのガベージ・コレクションを無効にします\n -Xrs Java/VMによるOSシグナルの使用を削減します(ドキュメントを参照)\n -Xshare:auto 可能であれば共有クラス・データを使用します(デフォルト)\n -Xshare:off \
|
||||
共有クラス・データの使用を試みません\n -Xshare:on 共有クラス・データの使用を必須にし、できなければ失敗します。\n これはテスト・オプションであり、断続的な失敗につながる\n 可能性があります。本番環境では使用しないでください。\n -XshowSettings すべての設定を表示して続行します\n -XshowSettings:all\n すべての設定を詳細に表示して続行します\n -XshowSettings:locale\n すべてのロケール関連の設定を表示して続行します\n -XshowSettings:properties\n すべてのプロパティ設定を表示して続行します\n -XshowSettings:vm\n すべてのVM関連の設定を表示して続行します\n -XshowSettings:security\n すべてのセキュリティ設定を表示して続行します\n -XshowSettings:security:all\n すべてのセキュリティ設定を表示して続行します\n -XshowSettings:security:properties\n セキュリティ・プロパティを表示して続行します\n -XshowSettings:security:providers\n 静的セキュリティ・プロバイダ設定を表示して続行します\n -XshowSettings:security:tls\n TLS関連のセキュリティ設定を表示して続行します\n -XshowSettings:system\n (Linuxのみ)ホスト・システムまたはコンテナを表示します\n 構成して続行します\n -Xss<size> javaスレッドのスタック・サイズを設定します\n 実際のサイズは、次の倍数に切り上げられる場合があります: \n オペレーティング・システムの要件に応じたシステム・ページ・サイズ。\n -Xverify バイトコード・ベリファイアのモードを設定します\n オプション-Xverify:noneは非推奨になり、\n 将来のリリースで削除される可能性があります。\n --add-reads <module>=<target-module>(,<target-module>)*\n モジュール宣言に関係なく、<module>を更新して<target-module>を\n \
|
||||
読み取ります。 \n <target-module>をALL-UNNAMEDに設定すると、すべての名前のないモジュールを\n 読み取ることができます。\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n モジュール宣言に関係なく、<module>を更新して<package>を<target-module>に\n エクスポートします。\n <target-module>をALL-UNNAMEDに設定すると、すべての名前のないモジュールに\n エクスポートできます。\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n モジュール宣言に関係なく、<module>を更新して<package>を\n <target-module>に開きます。\n --limit-modules <module name>[,<module name>...]\n 参照可能なモジュールの領域を制限します\n --patch-module <module>=<file>({0}<file>)*\n JARファイルまたはディレクトリのクラスおよびリソースで\n モジュールをオーバーライドまたは拡張します。\n --source <version>\n ソースファイル・モードでソースのバージョンを設定します。\n --finalization=<value>\n JVMがオブジェクトのファイナライズを実行するかどうかを制御します\n <value>は"enabled"または"disabled"のいずれかです。\n ファイナライズはデフォルトで有効になっています。\n\nこの追加オプションは予告なしに変更されることがあります。\n
|
||||
読み取ります。 \n <target-module>をALL-UNNAMEDに設定すると、すべての名前のないモジュールを\n 読み取ることができます。\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n モジュール宣言に関係なく、<module>を更新して<package>を<target-module>に\n エクスポートします。\n <target-module>をALL-UNNAMEDに設定すると、すべての名前のないモジュールに\n エクスポートできます。\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n モジュール宣言に関係なく、<module>を更新して<package>を\n <target-module>に開きます。\n --limit-modules <module name>[,<module name>...]\n 参照可能なモジュールの領域を制限します\n --patch-module <module>=<file>({0}<file>)*\n JARファイルまたはディレクトリのクラスおよびリソースで\n モジュールをオーバーライドまたは拡張します。\n --source <version>\n ソースファイル・モードでソースのバージョンを設定します。\n --finalization=<value>\n JVMがオブジェクトのファイナライズを実行するかどうかを制御します\n <value>は"enabled"または"disabled"のいずれかです。\n ファイナライズはデフォルトで有効になっています。\n --sun-misc-unsafe-memory-access=<value>\n サポートされていないAPI sun.misc.Unsafeの使用を許可または拒否します\n <value>は"allow"、"warn"、"debug"または"deny"のいずれかです。\n デフォルト値は、"allow"です。\n\nこの追加オプションは予告なしに変更されることがあります。\n
|
||||
|
||||
# Translators please note do not translate the options themselves
|
||||
java.launcher.X.macosx.usage=\n次のオプションはmacOS固有です:\n -XstartOnFirstThread\n main()メソッドを最初(AppKit)のスレッドで実行する\n -Xdock:name=<application name>\n Dockに表示されるデフォルト・アプリケーション名をオーバーライドする\n -Xdock:icon=<path to icon file>\n Dockに表示されるデフォルト・アイコンをオーバーライドする\n\n
|
||||
@@ -54,6 +54,7 @@ java.launcher.jar.error1=エラー: ファイル{0}を開こうとしている
|
||||
java.launcher.jar.error2={0}にマニフェストが見つかりません
|
||||
java.launcher.jar.error3={0}にメイン・マニフェスト属性がありません
|
||||
java.launcher.jar.error4={0}内のJavaエージェントのロード中にエラーが発生しました
|
||||
java.launcher.jar.error5=エラー: ファイル{0}を閉じるときに、予期しないエラーが発生しました
|
||||
java.launcher.jar.error.illegal.ena.value=エラー: Enable-Native-Accessマニフェスト属性の値"{0}"が不正です。''ALL-UNNAMED''のみ許可されます
|
||||
java.launcher.init.error=初期化エラー
|
||||
java.launcher.javafx.error1=エラー: JavaFX launchApplicationメソッドに誤ったシグネチャがあり、\nstaticを宣言してvoid型の値を返す必要があります
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2007, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,7 +35,7 @@ java.launcher.opt.footer = \ -cp <目录和 zip/jar 文件的类搜索路径>
|
||||
|
||||
# Translators please note do not translate the options themselves
|
||||
java.launcher.X.usage=\n -Xbatch 禁用后台编译\n -Xbootclasspath/a:<以 {0} 分隔的目录和 zip/jar 文件>\n 附加在引导类路径末尾\n -Xcheck:jni 对 JNI 函数执行其他检查\n -Xcomp 强制在首次调用时编译方法\n -Xdebug 不执行任何操作;已过时,将在未来发行版中删除。\n -Xdiag 显示附加诊断消息\n -Xfuture 启用最严格的检查,预期将来的默认值。\n 此选项已过时,可能会在\n 未来发行版中删除。\n -Xint 仅解释模式执行\n -Xinternalversion\n 显示比 -version 选项更详细的\n JVM 版本信息\n -Xlog:<opts> 配置或启用采用 Java 虚拟\n 机 (Java Virtual Machine, JVM) 统一记录框架进行事件记录。使用 -Xlog:help\n 可了解详细信息。\n -Xloggc:<file> 将 GC 状态记录在文件中(带时间戳)。\n 此选项已过时,可能会在\n 将来的发行版中删除。它将替换为 -Xlog:gc:<file>。\n -Xmixed 混合模式执行(默认值)\n -Xmn<size> 为年轻代(新生代)设置初始和最大堆大小\n (以字节为单位)\n -Xms<size> 设置初始 Java 堆大小\n -Xmx<size> 设置最大 Java 堆大小\n -Xnoclassgc 禁用类垃圾收集\n -Xrs 减少 Java/VM 对操作系统信号的使用(请参见文档)\n -Xshare:auto 在可能的情况下使用共享类数据(默认值)\n -Xshare:off 不尝试使用共享类数据\n -Xshare:on 要求使用共享类数据,否则将失败。\n 这是一个测试选项,可能导致间歇性\n 故障。不应在生产环境中使用它。\n -XshowSettings 显示所有设置并继续\n -XshowSettings:all\n 详细显示所有设置并继续\n -XshowSettings:locale\n 显示所有与区域设置相关的设置并继续\n -XshowSettings:properties\n 显示所有属性设置并继续\n -XshowSettings:vm\n 显示所有与 vm 相关的设置并继续\n -XshowSettings:security\n 显示所有安全设置并继续\n -XshowSettings:security:all\n 显示所有安全设置并继续\n -XshowSettings:security:properties\n \
|
||||
显示安全属性并继续\n -XshowSettings:security:providers\n 显示静态安全提供方设置并继续\n -XshowSettings:security:tls\n 显示与 TLS 相关的安全设置并继续\n -XshowSettings:system\n (仅 Linux)显示主机系统或容器\n 配置并继续\n -Xss<size> 设置 Java 线程堆栈大小\n 实际大小可以舍入到\n 操作系统要求的系统页面大小的倍数。\n -Xverify 设置字节码验证器的模式\n 请注意,选项 -Xverify:none 已过时,\n 可能会在未来发行版中删除。\n --add-reads <module>=<target-module>(,<target-module>)*\n 更新 <module> 以读取 <target-module>,而无论\n 模块如何声明。 \n <target-module> 可以是 ALL-UNNAMED,将读取所有未命名\n 模块。\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n 更新 <module> 以将 <package> 导出到 <target-module>,\n 而无论模块如何声明。\n <target-module> 可以是 ALL-UNNAMED,将导出到所有\n 未命名模块。\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n 更新 <module> 以在 <target-module> 中打开\n <package>,而无论模块如何声明。\n --limit-modules <module name>[,<module name>...]\n 限制可观察模块的领域\n --patch-module <module>=<file>({0}<file>)*\n 使用 JAR 文件或目录中的类和资源\n 覆盖或增强模块。\n --source <version>\n 设置源文件模式中源的版本。\n --finalization=<value>\n 控制 JVM 是否执行对象最终处理,\n 其中 <value> 为 "enabled" 或 "disabled" 之一。\n 默认情况下,最终处理处于启用状态。\n\n这些额外选项如有更改, 恕不另行通知。\n
|
||||
显示安全属性并继续\n -XshowSettings:security:providers\n 显示静态安全提供方设置并继续\n -XshowSettings:security:tls\n 显示与 TLS 相关的安全设置并继续\n -XshowSettings:system\n (仅 Linux)显示主机系统或容器\n 配置并继续\n -Xss<size> 设置 Java 线程堆栈大小\n 实际大小可以舍入到\n 操作系统要求的系统页面大小的倍数。\n -Xverify 设置字节码验证器的模式\n 请注意,选项 -Xverify:none 已过时,\n 可能会在未来发行版中删除。\n --add-reads <module>=<target-module>(,<target-module>)*\n 更新 <module> 以读取 <target-module>,而无论\n 模块如何声明。 \n <target-module> 可以是 ALL-UNNAMED,将读取所有未命名\n 模块。\n --add-exports <module>/<package>=<target-module>(,<target-module>)*\n 更新 <module> 以将 <package> 导出到 <target-module>,\n 而无论模块如何声明。\n <target-module> 可以是 ALL-UNNAMED,将导出到所有\n 未命名模块。\n --add-opens <module>/<package>=<target-module>(,<target-module>)*\n 更新 <module> 以在 <target-module> 中打开\n <package>,而无论模块如何声明。\n --limit-modules <module name>[,<module name>...]\n 限制可观察模块的领域\n --patch-module <module>=<file>({0}<file>)*\n 使用 JAR 文件或目录中的类和资源\n 覆盖或增强模块。\n --source <version>\n 设置源文件模式中源的版本。\n --finalization=<value>\n 控制 JVM 是否执行对象最终处理,\n 其中 <value> 为 "enabled" 或 "disabled" 之一。\n 默认情况下,最终处理处于启用状态。\n --sun-misc-unsafe-memory-access=<value>\n 允许或拒绝使用不受支持的 API sun.misc.Unsafe\n <value> 为 "allow"、"warn"、"debug" 或 "deny" 之一。\n 默认值为 "allow"。\n\n这些额外选项如有更改, 恕不另行通知。\n
|
||||
|
||||
# Translators please note do not translate the options themselves
|
||||
java.launcher.X.macosx.usage=\n以下选项是特定于 macOS 的选项:\n -XstartOnFirstThread\n 在第一个 (AppKit) 线程上运行 main() 方法\n -Xdock:name=<application name>\n 覆盖停靠栏中显示的默认应用程序名称\n -Xdock:icon=<path to icon file>\n 覆盖停靠栏中显示的默认图标\n\n
|
||||
@@ -52,6 +52,7 @@ java.launcher.jar.error1=错误: 尝试打开文件{0}时出现意外错误
|
||||
java.launcher.jar.error2=在{0}中找不到清单
|
||||
java.launcher.jar.error3={0}中没有主清单属性
|
||||
java.launcher.jar.error4=在 {0} 中加载 Java 代理时出错
|
||||
java.launcher.jar.error5=错误:尝试关闭文件 {0} 时出现意外错误
|
||||
java.launcher.jar.error.illegal.ena.value=错误:Enable-Native-Access 清单属性的值 "{0}" 非法。仅允许使用 ''ALL-UNNAMED''
|
||||
java.launcher.init.error=初始化错误
|
||||
java.launcher.javafx.error1=错误: JavaFX launchApplication 方法具有错误的签名, 必须\n将方法声明为静态方法并返回空类型的值
|
||||
|
||||
@@ -30,6 +30,8 @@
|
||||
package sun.net.www;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.reflect.Array;
|
||||
import java.net.ProtocolException;
|
||||
import java.util.Collections;
|
||||
import java.util.*;
|
||||
|
||||
@@ -45,11 +47,32 @@ public final class MessageHeader {
|
||||
private String[] values;
|
||||
private int nkeys;
|
||||
|
||||
// max number of bytes for headers, <=0 means unlimited;
|
||||
// this corresponds to the length of the names, plus the length
|
||||
// of the values, plus an overhead of 32 bytes per name: value
|
||||
// pair.
|
||||
// Note: we use the same definition as HTTP/2 SETTINGS_MAX_HEADER_LIST_SIZE
|
||||
// see RFC 9113, section 6.5.2.
|
||||
// https://www.rfc-editor.org/rfc/rfc9113.html#SETTINGS_MAX_HEADER_LIST_SIZE
|
||||
private final int maxHeaderSize;
|
||||
|
||||
// Aggregate size of the field lines (name + value + 32) x N
|
||||
// that have been parsed and accepted so far.
|
||||
// This is defined as a long to force promotion to long
|
||||
// and avoid overflows; see checkNewSize;
|
||||
private long size;
|
||||
|
||||
public MessageHeader () {
|
||||
this(0);
|
||||
}
|
||||
|
||||
public MessageHeader (int maxHeaderSize) {
|
||||
this.maxHeaderSize = maxHeaderSize;
|
||||
grow();
|
||||
}
|
||||
|
||||
public MessageHeader (InputStream is) throws java.io.IOException {
|
||||
maxHeaderSize = 0;
|
||||
parseHeader(is);
|
||||
}
|
||||
|
||||
@@ -476,10 +499,28 @@ public final class MessageHeader {
|
||||
public void parseHeader(InputStream is) throws java.io.IOException {
|
||||
synchronized (this) {
|
||||
nkeys = 0;
|
||||
size = 0;
|
||||
}
|
||||
mergeHeader(is);
|
||||
}
|
||||
|
||||
private void checkMaxHeaderSize(int sz) throws ProtocolException {
|
||||
if (maxHeaderSize > 0) checkNewSize(size, sz, 0);
|
||||
}
|
||||
|
||||
private long checkNewSize(long size, int name, int value) throws ProtocolException {
|
||||
// See SETTINGS_MAX_HEADER_LIST_SIZE, RFC 9113, section 6.5.2.
|
||||
long newSize = size + name + value + 32;
|
||||
if (maxHeaderSize > 0 && newSize > maxHeaderSize) {
|
||||
Arrays.fill(keys, 0, nkeys, null);
|
||||
Arrays.fill(values,0, nkeys, null);
|
||||
nkeys = 0;
|
||||
throw new ProtocolException(String.format("Header size too big: %s > %s",
|
||||
newSize, maxHeaderSize));
|
||||
}
|
||||
return newSize;
|
||||
}
|
||||
|
||||
/** Parse and merge a MIME header from an input stream. */
|
||||
@SuppressWarnings("fallthrough")
|
||||
public void mergeHeader(InputStream is) throws java.io.IOException {
|
||||
@@ -493,7 +534,15 @@ public final class MessageHeader {
|
||||
int c;
|
||||
boolean inKey = firstc > ' ';
|
||||
s[len++] = (char) firstc;
|
||||
checkMaxHeaderSize(len);
|
||||
parseloop:{
|
||||
// We start parsing for a new name value pair here.
|
||||
// The max header size includes an overhead of 32 bytes per
|
||||
// name value pair.
|
||||
// See SETTINGS_MAX_HEADER_LIST_SIZE, RFC 9113, section 6.5.2.
|
||||
long maxRemaining = maxHeaderSize > 0
|
||||
? maxHeaderSize - size - 32
|
||||
: Long.MAX_VALUE;
|
||||
while ((c = is.read()) >= 0) {
|
||||
switch (c) {
|
||||
case ':':
|
||||
@@ -527,6 +576,9 @@ public final class MessageHeader {
|
||||
s = ns;
|
||||
}
|
||||
s[len++] = (char) c;
|
||||
if (maxHeaderSize > 0 && len > maxRemaining) {
|
||||
checkMaxHeaderSize(len);
|
||||
}
|
||||
}
|
||||
firstc = -1;
|
||||
}
|
||||
@@ -548,6 +600,9 @@ public final class MessageHeader {
|
||||
v = new String();
|
||||
else
|
||||
v = String.copyValueOf(s, keyend, len - keyend);
|
||||
int klen = k == null ? 0 : k.length();
|
||||
|
||||
size = checkNewSize(size, klen, v.length());
|
||||
add(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,6 +172,8 @@ public class HttpURLConnection extends java.net.HttpURLConnection {
|
||||
*/
|
||||
private static final int bufSize4ES;
|
||||
|
||||
private static final int maxHeaderSize;
|
||||
|
||||
/*
|
||||
* Restrict setting of request headers through the public api
|
||||
* consistent with JavaScript XMLHttpRequest2 with a few
|
||||
@@ -288,6 +290,19 @@ public class HttpURLConnection extends java.net.HttpURLConnection {
|
||||
} else {
|
||||
restrictedHeaderSet = null;
|
||||
}
|
||||
|
||||
int defMaxHeaderSize = 384 * 1024;
|
||||
String maxHeaderSizeStr = getNetProperty("jdk.http.maxHeaderSize");
|
||||
int maxHeaderSizeVal = defMaxHeaderSize;
|
||||
if (maxHeaderSizeStr != null) {
|
||||
try {
|
||||
maxHeaderSizeVal = Integer.parseInt(maxHeaderSizeStr);
|
||||
} catch (NumberFormatException n) {
|
||||
maxHeaderSizeVal = defMaxHeaderSize;
|
||||
}
|
||||
}
|
||||
if (maxHeaderSizeVal < 0) maxHeaderSizeVal = 0;
|
||||
maxHeaderSize = maxHeaderSizeVal;
|
||||
}
|
||||
|
||||
static final String httpVersion = "HTTP/1.1";
|
||||
@@ -754,7 +769,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection {
|
||||
}
|
||||
ps = (PrintStream) http.getOutputStream();
|
||||
connected=true;
|
||||
responses = new MessageHeader();
|
||||
responses = new MessageHeader(maxHeaderSize);
|
||||
setRequests=false;
|
||||
writeRequests();
|
||||
}
|
||||
@@ -912,7 +927,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection {
|
||||
throws IOException {
|
||||
super(checkURL(u));
|
||||
requests = new MessageHeader();
|
||||
responses = new MessageHeader();
|
||||
responses = new MessageHeader(maxHeaderSize);
|
||||
userHeaders = new MessageHeader();
|
||||
this.handler = handler;
|
||||
instProxy = p;
|
||||
@@ -2810,7 +2825,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection {
|
||||
}
|
||||
|
||||
// clear out old response headers!!!!
|
||||
responses = new MessageHeader();
|
||||
responses = new MessageHeader(maxHeaderSize);
|
||||
if (stat == HTTP_USE_PROXY) {
|
||||
/* This means we must re-request the resource through the
|
||||
* proxy denoted in the "Location:" field of the response.
|
||||
@@ -3000,7 +3015,7 @@ public class HttpURLConnection extends java.net.HttpURLConnection {
|
||||
} catch (IOException e) { }
|
||||
}
|
||||
responseCode = -1;
|
||||
responses = new MessageHeader();
|
||||
responses = new MessageHeader(maxHeaderSize);
|
||||
connected = false;
|
||||
}
|
||||
|
||||
|
||||
@@ -806,14 +806,19 @@ class DatagramChannelImpl
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives a datagram into a direct buffer.
|
||||
*/
|
||||
private int receiveIntoNativeBuffer(ByteBuffer bb, int rem, int pos,
|
||||
boolean connected)
|
||||
throws IOException
|
||||
{
|
||||
NIO_ACCESS.acquireSession(bb);
|
||||
try {
|
||||
long bufAddress = NIO_ACCESS.getBufferAddress(bb);
|
||||
int n = receive0(fd,
|
||||
((DirectBuffer)bb).address() + pos, rem,
|
||||
bufAddress + pos,
|
||||
rem,
|
||||
sourceSockAddr.address(),
|
||||
connected);
|
||||
if (n > 0)
|
||||
@@ -991,6 +996,9 @@ class DatagramChannelImpl
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a datagram contained in a direct buffer.
|
||||
*/
|
||||
private int sendFromNativeBuffer(FileDescriptor fd, ByteBuffer bb,
|
||||
InetSocketAddress target)
|
||||
throws IOException
|
||||
@@ -1003,9 +1011,13 @@ class DatagramChannelImpl
|
||||
int written;
|
||||
NIO_ACCESS.acquireSession(bb);
|
||||
try {
|
||||
long bufAddress = NIO_ACCESS.getBufferAddress(bb);
|
||||
int addressLen = targetSocketAddress(target);
|
||||
written = send0(fd, ((DirectBuffer)bb).address() + pos, rem,
|
||||
targetSockAddr.address(), addressLen);
|
||||
written = send0(fd,
|
||||
bufAddress + pos,
|
||||
rem,
|
||||
targetSockAddr.address(),
|
||||
addressLen);
|
||||
} catch (PortUnreachableException pue) {
|
||||
if (isConnected())
|
||||
throw pue;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -213,8 +213,6 @@ final class ClientHello {
|
||||
// ignore cookie
|
||||
hos.putBytes16(getEncodedCipherSuites());
|
||||
hos.putBytes8(compressionMethod);
|
||||
extensions.send(hos); // In TLS 1.3, use of certain
|
||||
// extensions is mandatory.
|
||||
} catch (IOException ioe) {
|
||||
// unlikely
|
||||
}
|
||||
@@ -903,8 +901,8 @@ final class ClientHello {
|
||||
throw context.conContext.fatal(Alert.PROTOCOL_VERSION,
|
||||
"The client supported protocol versions " + Arrays.toString(
|
||||
ProtocolVersion.toStringArray(clientSupportedVersions)) +
|
||||
" are not accepted by server preferences " +
|
||||
context.activeProtocols);
|
||||
" are not accepted by server preferences " + Arrays.toString(
|
||||
ProtocolVersion.toStringArray(context.activeProtocols)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1426,6 +1424,9 @@ final class ClientHello {
|
||||
shc.handshakeProducers.put(SSLHandshake.SERVER_HELLO.id,
|
||||
SSLHandshake.SERVER_HELLO);
|
||||
|
||||
// Reset the ClientHello non-zero offset fragment allowance
|
||||
shc.acceptCliHelloFragments = false;
|
||||
|
||||
//
|
||||
// produce
|
||||
//
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -40,12 +40,23 @@ import sun.security.ssl.SSLCipher.SSLReadCipher;
|
||||
final class DTLSInputRecord extends InputRecord implements DTLSRecord {
|
||||
private DTLSReassembler reassembler = null;
|
||||
private int readEpoch;
|
||||
private SSLContextImpl sslContext;
|
||||
|
||||
DTLSInputRecord(HandshakeHash handshakeHash) {
|
||||
super(handshakeHash, SSLReadCipher.nullDTlsReadCipher());
|
||||
this.readEpoch = 0;
|
||||
}
|
||||
|
||||
// Method to set TransportContext
|
||||
public void setTransportContext(TransportContext tc) {
|
||||
this.tc = tc;
|
||||
}
|
||||
|
||||
// Method to set SSLContext
|
||||
public void setSSLContext(SSLContextImpl sslContext) {
|
||||
this.sslContext = sslContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
void changeReadCiphers(SSLReadCipher readCipher) {
|
||||
this.readCipher = readCipher;
|
||||
@@ -537,6 +548,27 @@ final class DTLSInputRecord extends InputRecord implements DTLSRecord {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Turn a sufficiently-large initial ClientHello fragment into one that
|
||||
* stops immediately after the compression methods. This is only used
|
||||
* for the initial CH message fragment at offset 0.
|
||||
*
|
||||
* @param srcFrag the fragment actually received by the DTLSReassembler
|
||||
* @param limit the size of the new, cloned/truncated handshake fragment
|
||||
*
|
||||
* @return a truncated handshake fragment that is sized to look like a
|
||||
* complete message, but actually contains only up to the compression
|
||||
* methods (no extensions)
|
||||
*/
|
||||
private static HandshakeFragment truncateChFragment(HandshakeFragment srcFrag,
|
||||
int limit) {
|
||||
return new HandshakeFragment(Arrays.copyOf(srcFrag.fragment, limit),
|
||||
srcFrag.contentType, srcFrag.majorVersion,
|
||||
srcFrag.minorVersion, srcFrag.recordEnS, srcFrag.recordEpoch,
|
||||
srcFrag.recordSeq, srcFrag.handshakeType, limit,
|
||||
srcFrag.messageSeq, srcFrag.fragmentOffset, limit);
|
||||
}
|
||||
|
||||
private static final class HoleDescriptor {
|
||||
int offset; // fragment_offset
|
||||
int limit; // fragment_offset + fragment_length
|
||||
@@ -640,10 +672,17 @@ final class DTLSInputRecord extends InputRecord implements DTLSRecord {
|
||||
// Queue up a handshake message.
|
||||
void queueUpHandshake(HandshakeFragment hsf) throws SSLProtocolException {
|
||||
if (!isDesirable(hsf)) {
|
||||
// Not a dedired record, discard it.
|
||||
// Not a desired record, discard it.
|
||||
return;
|
||||
}
|
||||
|
||||
if (hsf.handshakeType == SSLHandshake.CLIENT_HELLO.id) {
|
||||
// validate the first or subsequent ClientHello message
|
||||
if ((hsf = valHello(hsf, hsf.messageSeq == 0)) == null) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the retransmission messages if necessary.
|
||||
cleanUpRetransmit(hsf);
|
||||
|
||||
@@ -769,6 +808,100 @@ final class DTLSInputRecord extends InputRecord implements DTLSRecord {
|
||||
}
|
||||
}
|
||||
|
||||
private HandshakeFragment valHello(HandshakeFragment hsf,
|
||||
boolean firstHello) {
|
||||
ServerHandshakeContext shc =
|
||||
(ServerHandshakeContext) tc.handshakeContext;
|
||||
// Drop any fragment that is not a zero offset until we've received
|
||||
// a second (or possibly later) CH message that passes the cookie
|
||||
// check.
|
||||
if (shc == null || !shc.acceptCliHelloFragments) {
|
||||
if (hsf.fragmentOffset != 0) {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
// Let this fragment through to the DTLSReassembler as-is
|
||||
return hsf;
|
||||
}
|
||||
|
||||
try {
|
||||
ByteBuffer fragmentData = ByteBuffer.wrap(hsf.fragment);
|
||||
|
||||
ProtocolVersion pv = ProtocolVersion.valueOf(
|
||||
Record.getInt16(fragmentData));
|
||||
if (!pv.isDTLS) {
|
||||
return null;
|
||||
}
|
||||
// Read the random (32 bytes)
|
||||
if (fragmentData.remaining() < 32) {
|
||||
if (SSLLogger.isOn && SSLLogger.isOn("verbose")) {
|
||||
SSLLogger.fine("Rejected client hello fragment (bad random len) " +
|
||||
"fo=" + hsf.fragmentOffset + " fl=" + hsf.fragmentLength);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
fragmentData.position(fragmentData.position() + 32);
|
||||
|
||||
// SessionID
|
||||
byte[] sessId = Record.getBytes8(fragmentData);
|
||||
if (sessId.length > 0 &&
|
||||
!SSLConfiguration.enableDtlsResumeCookie) {
|
||||
// If we are in a resumption it is possible that the cookie
|
||||
// exchange will be skipped. This is a server-side setting
|
||||
// and it is NOT the default. If enableDtlsResumeCookie is
|
||||
// false though, then we will buffer fragments since there
|
||||
// is no cookie exchange to execute prior to performing
|
||||
// reassembly.
|
||||
return hsf;
|
||||
}
|
||||
|
||||
// Cookie
|
||||
byte[] cookie = Record.getBytes8(fragmentData);
|
||||
if (firstHello && cookie.length != 0) {
|
||||
if (SSLLogger.isOn && SSLLogger.isOn("verbose")) {
|
||||
SSLLogger.fine("Rejected initial client hello fragment (bad cookie len) " +
|
||||
"fo=" + hsf.fragmentOffset + " fl=" + hsf.fragmentLength);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
// CipherSuites
|
||||
Record.getBytes16(fragmentData);
|
||||
// Compression methods
|
||||
Record.getBytes8(fragmentData);
|
||||
|
||||
// If it's the first fragment, we'll truncate it and push it
|
||||
// through the reassembler.
|
||||
if (firstHello) {
|
||||
return truncateChFragment(hsf, fragmentData.position());
|
||||
} else {
|
||||
HelloCookieManager hcMgr = sslContext.
|
||||
getHelloCookieManager(ProtocolVersion.DTLS10);
|
||||
ByteBuffer msgFragBuf = ByteBuffer.wrap(hsf.fragment, 0,
|
||||
fragmentData.position());
|
||||
ClientHello.ClientHelloMessage chMsg =
|
||||
new ClientHello.ClientHelloMessage(shc, msgFragBuf, null);
|
||||
if (!hcMgr.isCookieValid(shc, chMsg, cookie)) {
|
||||
// Bad cookie check, truncate it and let the ClientHello
|
||||
// consumer recheck, fail and take the appropriate action.
|
||||
return truncateChFragment(hsf, fragmentData.position());
|
||||
} else {
|
||||
// It's a good cookie, return the original handshake
|
||||
// fragment and let it go into the DTLSReassembler like
|
||||
// any other fragment so we can wait for the rest of
|
||||
// the CH message.
|
||||
shc.acceptCliHelloFragments = true;
|
||||
return hsf;
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
if (SSLLogger.isOn && SSLLogger.isOn("verbose")) {
|
||||
SSLLogger.fine("Rejected client hello fragment " +
|
||||
"fo=" + hsf.fragmentOffset + " fl=" + hsf.fragmentLength);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Queue up a ChangeCipherSpec message
|
||||
void queueUpChangeCipherSpec(RecordFragment rf)
|
||||
throws SSLProtocolException {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -55,6 +55,7 @@ class ServerHandshakeContext extends HandshakeContext {
|
||||
CertificateMessage.CertificateEntry currentCertEntry;
|
||||
private static final long DEFAULT_STATUS_RESP_DELAY = 5000L;
|
||||
final long statusRespTimeout;
|
||||
boolean acceptCliHelloFragments = false;
|
||||
|
||||
|
||||
ServerHandshakeContext(SSLContextImpl sslContext,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -156,6 +156,11 @@ final class TransportContext implements ConnectionContext {
|
||||
|
||||
this.acc = AccessController.getContext();
|
||||
this.consumers = new HashMap<>();
|
||||
|
||||
if (inputRecord instanceof DTLSInputRecord dtlsInputRecord) {
|
||||
dtlsInputRecord.setTransportContext(this);
|
||||
dtlsInputRecord.setSSLContext(this.sslContext);
|
||||
}
|
||||
}
|
||||
|
||||
// Dispatch plaintext to a specific consumer.
|
||||
|
||||
@@ -90,12 +90,11 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
* store the result in an IntegerPolynomial representation in a. Requires
|
||||
* that a.length == numLimbs.
|
||||
*/
|
||||
protected int multByInt(long[] a, long b) {
|
||||
protected void multByInt(long[] a, long b) {
|
||||
for (int i = 0; i < a.length; i++) {
|
||||
a[i] *= b;
|
||||
}
|
||||
reduce(a);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -104,7 +103,7 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
* a.length == b.length == r.length == numLimbs. It is allowed for a and r
|
||||
* to be the same array.
|
||||
*/
|
||||
protected abstract int mult(long[] a, long[] b, long[] r);
|
||||
protected abstract void mult(long[] a, long[] b, long[] r);
|
||||
|
||||
/**
|
||||
* Multiply an IntegerPolynomial representation (a) with itself and store
|
||||
@@ -112,7 +111,7 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
* a.length == r.length == numLimbs. It is allowed for a and r
|
||||
* to be the same array.
|
||||
*/
|
||||
protected abstract int square(long[] a, long[] r);
|
||||
protected abstract void square(long[] a, long[] r);
|
||||
|
||||
IntegerPolynomial(int bitsPerLimb,
|
||||
int numLimbs,
|
||||
@@ -622,8 +621,8 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
}
|
||||
|
||||
long[] newLimbs = new long[limbs.length];
|
||||
int numAdds = mult(limbs, b.limbs, newLimbs);
|
||||
return new ImmutableElement(newLimbs, numAdds);
|
||||
mult(limbs, b.limbs, newLimbs);
|
||||
return new ImmutableElement(newLimbs, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -635,8 +634,8 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
}
|
||||
|
||||
long[] newLimbs = new long[limbs.length];
|
||||
int numAdds = IntegerPolynomial.this.square(limbs, newLimbs);
|
||||
return new ImmutableElement(newLimbs, numAdds);
|
||||
IntegerPolynomial.this.square(limbs, newLimbs);
|
||||
return new ImmutableElement(newLimbs, 0);
|
||||
}
|
||||
|
||||
public void addModPowerTwo(IntegerModuloP arg, byte[] result) {
|
||||
@@ -751,7 +750,8 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
b.numAdds = 0;
|
||||
}
|
||||
|
||||
numAdds = mult(limbs, b.limbs, limbs);
|
||||
mult(limbs, b.limbs, limbs);
|
||||
numAdds = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -764,7 +764,8 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
}
|
||||
|
||||
int value = ((Limb)v).value;
|
||||
numAdds += multByInt(limbs, value);
|
||||
multByInt(limbs, value);
|
||||
numAdds = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -824,7 +825,8 @@ public abstract sealed class IntegerPolynomial implements IntegerFieldModuloP
|
||||
numAdds = 0;
|
||||
}
|
||||
|
||||
numAdds = IntegerPolynomial.this.square(limbs, limbs);
|
||||
IntegerPolynomial.this.square(limbs, limbs);
|
||||
numAdds = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ public final class IntegerPolynomial1305 extends IntegerPolynomial {
|
||||
super(BITS_PER_LIMB, NUM_LIMBS, 1, MODULUS);
|
||||
}
|
||||
|
||||
protected int mult(long[] a, long[] b, long[] r) {
|
||||
protected void mult(long[] a, long[] b, long[] r) {
|
||||
|
||||
// Use grade-school multiplication into primitives to avoid the
|
||||
// temporary array allocation. This is equivalent to the following
|
||||
@@ -73,7 +73,6 @@ public final class IntegerPolynomial1305 extends IntegerPolynomial {
|
||||
long c8 = (a[4] * b[4]);
|
||||
|
||||
carryReduce(r, c0, c1, c2, c3, c4, c5, c6, c7, c8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
private void carryReduce(long[] r, long c0, long c1, long c2, long c3,
|
||||
@@ -100,7 +99,7 @@ public final class IntegerPolynomial1305 extends IntegerPolynomial {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int square(long[] a, long[] r) {
|
||||
protected void square(long[] a, long[] r) {
|
||||
// Use grade-school multiplication with a simple squaring optimization.
|
||||
// Multiply into primitives to avoid the temporary array allocation.
|
||||
// This is equivalent to the following code:
|
||||
@@ -123,7 +122,6 @@ public final class IntegerPolynomial1305 extends IntegerPolynomial {
|
||||
long c8 = (a[4] * a[4]);
|
||||
|
||||
carryReduce(r, c0, c1, c2, c3, c4, c5, c6, c7, c8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -131,12 +131,11 @@ public sealed class IntegerPolynomialModBinP extends IntegerPolynomial {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int mult(long[] a, long[] b, long[] r) {
|
||||
protected void mult(long[] a, long[] b, long[] r) {
|
||||
|
||||
long[] c = new long[2 * numLimbs];
|
||||
multOnly(a, b, c);
|
||||
carryReduce(c, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
private void modReduceInBits(long[] limbs, int index, int bits, long x) {
|
||||
@@ -189,7 +188,7 @@ public sealed class IntegerPolynomialModBinP extends IntegerPolynomial {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int square(long[] a, long[] r) {
|
||||
protected void square(long[] a, long[] r) {
|
||||
|
||||
long[] c = new long[2 * numLimbs];
|
||||
for (int i = 0; i < numLimbs; i++) {
|
||||
@@ -200,7 +199,6 @@ public sealed class IntegerPolynomialModBinP extends IntegerPolynomial {
|
||||
}
|
||||
|
||||
carryReduce(c, r);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -31,6 +31,7 @@ import sun.security.util.math.SmallValue;
|
||||
import sun.security.util.math.IntegerFieldModuloP;
|
||||
import java.math.BigInteger;
|
||||
import jdk.internal.vm.annotation.IntrinsicCandidate;
|
||||
import jdk.internal.vm.annotation.ForceInline;
|
||||
|
||||
// Reference:
|
||||
// - [1] Shay Gueron and Vlad Krasnov "Fast Prime Field Elliptic Curve
|
||||
@@ -103,8 +104,8 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
setLimbsValuePositive(v, vLimbs);
|
||||
|
||||
// Convert to Montgomery domain
|
||||
int numAdds = mult(vLimbs, h, montLimbs);
|
||||
return new ImmutableElement(montLimbs, numAdds);
|
||||
mult(vLimbs, h, montLimbs);
|
||||
return new ImmutableElement(montLimbs, 0);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -114,24 +115,6 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
return super.getSmallValue(value);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is used by IntegerPolynomial.setProduct(SmallValue v) to
|
||||
* multiply by a small constant (i.e. (int) 1,2,3,4). Instead of doing a
|
||||
* montgomery conversion followed by a montgomery multiplication, just use
|
||||
* the spare top (64-BITS_PER_LIMB) bits to multiply by a constant. (See [1]
|
||||
* Section 4 )
|
||||
*
|
||||
* Will return an unreduced value
|
||||
*/
|
||||
@Override
|
||||
protected int multByInt(long[] a, long b) {
|
||||
assert (b < (1 << BITS_PER_LIMB));
|
||||
for (int i = 0; i < a.length; i++) {
|
||||
a[i] *= b;
|
||||
}
|
||||
return (int) (b - 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableIntegerModuloP fromMontgomery(ImmutableIntegerModuloP n) {
|
||||
assert n.getField() == MontgomeryIntegerPolynomialP256.ONE;
|
||||
@@ -163,10 +146,11 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int square(long[] a, long[] r) {
|
||||
return mult(a, a, r);
|
||||
protected void square(long[] a, long[] r) {
|
||||
mult(a, a, r);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Unrolled Word-by-Word Montgomery Multiplication r = a * b * 2^-260 (mod P)
|
||||
*
|
||||
@@ -174,8 +158,15 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
* for a Montgomery Friendly modulus p". Note: Step 6. Skipped; Instead use
|
||||
* numAdds to reuse existing overflow logic.
|
||||
*/
|
||||
@Override
|
||||
protected void mult(long[] a, long[] b, long[] r) {
|
||||
multImpl(a, b, r);
|
||||
reducePositive(r);
|
||||
}
|
||||
|
||||
@ForceInline
|
||||
@IntrinsicCandidate
|
||||
protected int mult(long[] a, long[] b, long[] r) {
|
||||
private void multImpl(long[] a, long[] b, long[] r) {
|
||||
long aa0 = a[0];
|
||||
long aa1 = a[1];
|
||||
long aa2 = a[2];
|
||||
@@ -408,36 +399,16 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
d4 += n4 & LIMB_MASK;
|
||||
|
||||
c5 += d1 + dd0 + (d0 >>> BITS_PER_LIMB);
|
||||
c6 += d2 + dd1 + (c5 >>> BITS_PER_LIMB);
|
||||
c7 += d3 + dd2 + (c6 >>> BITS_PER_LIMB);
|
||||
c8 += d4 + dd3 + (c7 >>> BITS_PER_LIMB);
|
||||
c9 = dd4 + (c8 >>> BITS_PER_LIMB);
|
||||
c6 += d2 + dd1;
|
||||
c7 += d3 + dd2;
|
||||
c8 += d4 + dd3;
|
||||
c9 = dd4;
|
||||
|
||||
c5 &= LIMB_MASK;
|
||||
c6 &= LIMB_MASK;
|
||||
c7 &= LIMB_MASK;
|
||||
c8 &= LIMB_MASK;
|
||||
|
||||
// At this point, the result could overflow by one modulus.
|
||||
c0 = c5 - modulus[0];
|
||||
c1 = c6 - modulus[1] + (c0 >> BITS_PER_LIMB);
|
||||
c0 &= LIMB_MASK;
|
||||
c2 = c7 - modulus[2] + (c1 >> BITS_PER_LIMB);
|
||||
c1 &= LIMB_MASK;
|
||||
c3 = c8 - modulus[3] + (c2 >> BITS_PER_LIMB);
|
||||
c2 &= LIMB_MASK;
|
||||
c4 = c9 - modulus[4] + (c3 >> BITS_PER_LIMB);
|
||||
c3 &= LIMB_MASK;
|
||||
|
||||
long mask = c4 >> BITS_PER_LIMB; // Signed shift!
|
||||
|
||||
r[0] = ((c5 & mask) | (c0 & ~mask));
|
||||
r[1] = ((c6 & mask) | (c1 & ~mask));
|
||||
r[2] = ((c7 & mask) | (c2 & ~mask));
|
||||
r[3] = ((c8 & mask) | (c3 & ~mask));
|
||||
r[4] = ((c9 & mask) | (c4 & ~mask));
|
||||
|
||||
return 0;
|
||||
r[0] = c5;
|
||||
r[1] = c6;
|
||||
r[2] = c7;
|
||||
r[3] = c8;
|
||||
r[4] = c9;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -516,8 +487,8 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
super.encode(v, offset, length, highByte, vLimbs);
|
||||
|
||||
// Convert to Montgomery domain
|
||||
int numAdds = mult(vLimbs, h, montLimbs);
|
||||
return new ImmutableElement(montLimbs, numAdds);
|
||||
mult(vLimbs, h, montLimbs);
|
||||
return new ImmutableElement(montLimbs, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -556,4 +527,27 @@ public final class MontgomeryIntegerPolynomialP256 extends IntegerPolynomial
|
||||
limbs[i - 5] += (v << 4) & LIMB_MASK;
|
||||
limbs[i - 4] += v >> 48;
|
||||
}
|
||||
|
||||
// Used when limbs a could overflow by one modulus.
|
||||
@ForceInline
|
||||
protected void reducePositive(long[] a) {
|
||||
long aa0 = a[0];
|
||||
long aa1 = a[1] + (aa0>>BITS_PER_LIMB);
|
||||
long aa2 = a[2] + (aa1>>BITS_PER_LIMB);
|
||||
long aa3 = a[3] + (aa2>>BITS_PER_LIMB);
|
||||
long aa4 = a[4] + (aa3>>BITS_PER_LIMB);
|
||||
|
||||
long c0 = a[0] - modulus[0];
|
||||
long c1 = a[1] - modulus[1] + (c0 >> BITS_PER_LIMB);
|
||||
long c2 = a[2] - modulus[2] + (c1 >> BITS_PER_LIMB);
|
||||
long c3 = a[3] - modulus[3] + (c2 >> BITS_PER_LIMB);
|
||||
long c4 = a[4] - modulus[4] + (c3 >> BITS_PER_LIMB);
|
||||
long mask = c4 >> BITS_PER_LIMB; // Signed shift!
|
||||
|
||||
a[0] = ((aa0 & mask) | (c0 & ~mask)) & LIMB_MASK;
|
||||
a[1] = ((aa1 & mask) | (c1 & ~mask)) & LIMB_MASK;
|
||||
a[2] = ((aa2 & mask) | (c2 & ~mask)) & LIMB_MASK;
|
||||
a[3] = ((aa3 & mask) | (c3 & ~mask)) & LIMB_MASK;
|
||||
a[4] = ((aa4 & mask) | (c4 & ~mask));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -287,6 +287,7 @@ ZAR=ZAR
|
||||
ZMK=ZMK
|
||||
ZMW=ZMW
|
||||
ZWD=ZWD
|
||||
ZWG=ZWG
|
||||
ZWL=ZWL
|
||||
ZWN=ZWN
|
||||
ZWR=ZWR
|
||||
@@ -512,5 +513,6 @@ yum=Yugoslavian New Dinar (1994-2002)
|
||||
zar=South African Rand
|
||||
zmk=Zambian Kwacha
|
||||
zwd=Zimbabwean Dollar (1980-2008)
|
||||
zwg=Zimbabwe Gold
|
||||
zwl=Zimbabwean Dollar (2009)
|
||||
zwr=Zimbabwean Dollar (2008)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user