mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-07 09:59:37 +01:00
Compare commits
100 Commits
lbourges/W
...
jdk-23-ga
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9ad2e63f17 | ||
|
|
6f582f4ee3 | ||
|
|
b7ede41a2d | ||
|
|
2eb7709ec3 | ||
|
|
946c6cc6c7 | ||
|
|
6a6591e88c | ||
|
|
2288c052e6 | ||
|
|
5473e9e488 | ||
|
|
65197a3207 | ||
|
|
e83e2b305e | ||
|
|
0c82e4bf19 | ||
|
|
88775f95f2 | ||
|
|
ec1782cd5b | ||
|
|
024b39c36b | ||
|
|
7afb958e8d | ||
|
|
d876cacf73 | ||
|
|
7aaf83df1d | ||
|
|
f2e126dd5f | ||
|
|
73e83a3e19 | ||
|
|
30260adb13 | ||
|
|
cf1b618dc7 | ||
|
|
58a274dc23 | ||
|
|
794cd0e591 | ||
|
|
343da68432 | ||
|
|
d7b7c1724d | ||
|
|
5162e1a31c | ||
|
|
908d1e92fc | ||
|
|
fd3860685d | ||
|
|
a106e522f8 | ||
|
|
52cd9bb534 | ||
|
|
7cc50a181a | ||
|
|
cfbfe4a472 | ||
|
|
d1373a2fd6 | ||
|
|
0a9e3bfc90 | ||
|
|
06191aca86 | ||
|
|
9620b912dd | ||
|
|
e991c0f921 | ||
|
|
6720685abd | ||
|
|
4aab58be4a | ||
|
|
5b9ecb1786 | ||
|
|
70ad622bc2 | ||
|
|
ca37a482cc | ||
|
|
ae10055b2c | ||
|
|
4e52320979 | ||
|
|
2f60d36848 | ||
|
|
b415b98139 | ||
|
|
90d5b5b4c4 | ||
|
|
653c481d71 | ||
|
|
10b28babe5 | ||
|
|
d383365ea4 | ||
|
|
b6d0ead93f | ||
|
|
272d11a389 | ||
|
|
9d744b0e04 | ||
|
|
4410cdc839 | ||
|
|
7040de19bd | ||
|
|
e5fbc631ca | ||
|
|
e78c682142 | ||
|
|
87a29629e3 | ||
|
|
32ed61572c | ||
|
|
62d0ee9cc0 | ||
|
|
98fd657cfa | ||
|
|
d7b9454205 | ||
|
|
b5fbdb2166 | ||
|
|
2086b0f070 | ||
|
|
d1510505c1 | ||
|
|
ae49182985 | ||
|
|
37ebecec88 | ||
|
|
08c7c38342 | ||
|
|
fa7521b29e | ||
|
|
fbcf6d9c4f | ||
|
|
a124e6e5c7 | ||
|
|
0779f0d668 | ||
|
|
bd66b6b6f9 | ||
|
|
3edf379b67 | ||
|
|
10d81a337d | ||
|
|
1dbad8058b | ||
|
|
215149310c | ||
|
|
23f2c97f4c | ||
|
|
e84e0cdf62 | ||
|
|
2243974d29 | ||
|
|
79dd575113 | ||
|
|
12a61bce8d | ||
|
|
d9dd2d19b0 | ||
|
|
b21d7b23c1 | ||
|
|
867312a7e5 | ||
|
|
a4b49253e3 | ||
|
|
86fcbe09f8 | ||
|
|
48997f54c9 | ||
|
|
4e3bfc926e | ||
|
|
d0b4f9baab | ||
|
|
cb3c45a698 | ||
|
|
10f71f7dd4 | ||
|
|
a7964453cf | ||
|
|
5230786a0d | ||
|
|
378cd12f6b | ||
|
|
d96476d8bd | ||
|
|
b17a1c092f | ||
|
|
9e22b6dec3 | ||
|
|
fdbc2b24d3 | ||
|
|
31696a445c |
@@ -9,7 +9,7 @@ warning=issuestitle
|
||||
|
||||
[repository]
|
||||
tags=(?:jdk-(?:[1-9]([0-9]*)(?:\.(?:0|[1-9][0-9]*)){0,4})(?:\+(?:(?:[0-9]+))|(?:-ga)))|(?:jdk[4-9](?:u\d{1,3})?-(?:(?:b\d{2,3})|(?:ga)))|(?:hs\d\d(?:\.\d{1,2})?-b\d\d)
|
||||
branches=
|
||||
branches=.*
|
||||
|
||||
[census]
|
||||
version=0
|
||||
|
||||
@@ -39,4 +39,4 @@ DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="22 23"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=23
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
DEFAULT_PROMOTED_VERSION_PRE=
|
||||
|
||||
@@ -778,7 +778,7 @@ public class FieldGen {
|
||||
result.appendLine("}");
|
||||
|
||||
result.appendLine("@Override");
|
||||
result.appendLine("protected int mult(long[] a, long[] b, long[] r) {");
|
||||
result.appendLine("protected void mult(long[] a, long[] b, long[] r) {");
|
||||
result.incrIndent();
|
||||
for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) {
|
||||
result.appendIndent();
|
||||
@@ -804,9 +804,6 @@ public class FieldGen {
|
||||
}
|
||||
}
|
||||
result.append(");\n");
|
||||
result.appendIndent();
|
||||
result.append("return 0;");
|
||||
result.appendLine();
|
||||
result.decrIndent();
|
||||
result.appendLine("}");
|
||||
|
||||
@@ -836,7 +833,7 @@ public class FieldGen {
|
||||
// }
|
||||
// }
|
||||
result.appendLine("@Override");
|
||||
result.appendLine("protected int square(long[] a, long[] r) {");
|
||||
result.appendLine("protected void square(long[] a, long[] r) {");
|
||||
result.incrIndent();
|
||||
for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) {
|
||||
result.appendIndent();
|
||||
@@ -877,9 +874,6 @@ public class FieldGen {
|
||||
}
|
||||
}
|
||||
result.append(");\n");
|
||||
result.appendIndent();
|
||||
result.append("return 0;");
|
||||
result.appendLine();
|
||||
result.decrIndent();
|
||||
result.appendLine("}");
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -107,7 +107,7 @@ public final class SealedGraph implements Taglet {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
String simpleTypeName = element.getSimpleName().toString();
|
||||
String simpleTypeName = packagelessCanonicalName(typeElement).replace('.', '/');
|
||||
String imageFile = simpleTypeName + "-sealed-graph.svg";
|
||||
int thumbnailHeight = 100; // also appears in the stylesheet
|
||||
String hoverImage = "<span>"
|
||||
@@ -315,14 +315,14 @@ public final class SealedGraph implements Taglet {
|
||||
case MEMBER -> packageName((TypeElement) element.getEnclosingElement());
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private static String packagelessCanonicalName(TypeElement element) {
|
||||
String result = element.getSimpleName().toString();
|
||||
while (element.getNestingKind() == NestingKind.MEMBER) {
|
||||
element = (TypeElement) element.getEnclosingElement();
|
||||
result = element.getSimpleName().toString() + '.' + result;
|
||||
}
|
||||
return result;
|
||||
private static String packagelessCanonicalName(TypeElement element) {
|
||||
String result = element.getSimpleName().toString();
|
||||
while (element.getNestingKind() == NestingKind.MEMBER) {
|
||||
element = (TypeElement) element.getEnclosingElement();
|
||||
result = element.getSimpleName().toString() + '.' + result;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,7 +249,6 @@ address StubGenerator::generate_intpoly_montgomeryMult_P256() {
|
||||
const Register tmp = r9;
|
||||
|
||||
montgomeryMultiply(aLimbs, bLimbs, rLimbs, tmp, _masm);
|
||||
__ mov64(rax, 0x1); // Return 1 (Fig. 5, Step 6 [1] skipped in montgomeryMultiply)
|
||||
|
||||
__ leave();
|
||||
__ ret(0);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -483,14 +483,14 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
|
||||
|
||||
if (c) {
|
||||
jint value = c->type()->as_IntConstant()->value();
|
||||
if (value != min_jint) {
|
||||
if (ao->op() == Bytecodes::_isub) {
|
||||
value = -value;
|
||||
}
|
||||
if (ao->op() == Bytecodes::_iadd) {
|
||||
base = java_add(base, value);
|
||||
last_integer = base;
|
||||
last_instruction = other;
|
||||
} else {
|
||||
assert(ao->op() == Bytecodes::_isub, "unexpected bytecode");
|
||||
base = java_subtract(base, value);
|
||||
}
|
||||
last_integer = base;
|
||||
last_instruction = other;
|
||||
index = other;
|
||||
} else {
|
||||
break;
|
||||
|
||||
@@ -241,9 +241,14 @@ LockedClassesDo::~LockedClassesDo() {
|
||||
|
||||
|
||||
// Iterating over the CLDG needs to be locked because
|
||||
// unloading can remove entries concurrently soon.
|
||||
template <bool keep_alive = true>
|
||||
class ClassLoaderDataGraphIteratorBase : public StackObj {
|
||||
// unloading can remove entries concurrently.
|
||||
// This iterator does not keep the CLD alive.
|
||||
// Any CLD OopHandles (modules, mirrors, resolved refs)
|
||||
// resolved must be treated as no keepalive. And requires
|
||||
// that its CLD's holder is kept alive if they escape the
|
||||
// caller's safepoint or ClassLoaderDataGraph_lock
|
||||
// critical section.
|
||||
class ClassLoaderDataGraph::ClassLoaderDataGraphIterator : public StackObj {
|
||||
ClassLoaderData* _next;
|
||||
Thread* _thread;
|
||||
HandleMark _hm; // clean up handles when this is done.
|
||||
@@ -251,12 +256,8 @@ class ClassLoaderDataGraphIteratorBase : public StackObj {
|
||||
// unless verifying at a safepoint.
|
||||
|
||||
public:
|
||||
ClassLoaderDataGraphIteratorBase() : _next(ClassLoaderDataGraph::_head), _thread(Thread::current()), _hm(_thread) {
|
||||
if (keep_alive) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
} else {
|
||||
assert_at_safepoint();
|
||||
}
|
||||
ClassLoaderDataGraphIterator() : _next(ClassLoaderDataGraph::_head), _thread(Thread::current()), _hm(_thread) {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
}
|
||||
|
||||
ClassLoaderData* get_next() {
|
||||
@@ -266,10 +267,6 @@ public:
|
||||
cld = cld->next();
|
||||
}
|
||||
if (cld != nullptr) {
|
||||
if (keep_alive) {
|
||||
// Keep cld that is being returned alive.
|
||||
Handle(_thread, cld->holder());
|
||||
}
|
||||
_next = cld->next();
|
||||
} else {
|
||||
_next = nullptr;
|
||||
@@ -278,9 +275,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
using ClassLoaderDataGraphIterator = ClassLoaderDataGraphIteratorBase<true /* keep_alive */>;
|
||||
using ClassLoaderDataGraphIteratorNoKeepAlive = ClassLoaderDataGraphIteratorBase<false /* keep_alive */>;
|
||||
|
||||
void ClassLoaderDataGraph::loaded_cld_do(CLDClosure* cl) {
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
@@ -288,13 +282,6 @@ void ClassLoaderDataGraph::loaded_cld_do(CLDClosure* cl) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::loaded_cld_do_no_keepalive(CLDClosure* cl) {
|
||||
ClassLoaderDataGraphIteratorNoKeepAlive iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
cl->do_cld(cld);
|
||||
}
|
||||
}
|
||||
|
||||
// These functions assume that the caller has locked the ClassLoaderDataGraph_lock
|
||||
// if they are not calling the function from a safepoint.
|
||||
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
|
||||
@@ -318,6 +305,16 @@ void ClassLoaderDataGraph::methods_do(void f(Method*)) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::modules_do_keepalive(void f(ModuleEntry*)) {
|
||||
assert_locked_or_safepoint(Module_lock);
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
// Keep the holder alive.
|
||||
(void)cld->holder();
|
||||
cld->modules_do(f);
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::modules_do(void f(ModuleEntry*)) {
|
||||
assert_locked_or_safepoint(Module_lock);
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
@@ -334,9 +331,11 @@ void ClassLoaderDataGraph::packages_do(void f(PackageEntry*)) {
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
|
||||
void ClassLoaderDataGraph::loaded_classes_do_keepalive(KlassClosure* klass_closure) {
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
// Keep the holder alive.
|
||||
(void)cld->holder();
|
||||
cld->loaded_classes_do(klass_closure);
|
||||
}
|
||||
}
|
||||
@@ -346,7 +345,7 @@ void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::verify_dictionary() {
|
||||
ClassLoaderDataGraphIteratorNoKeepAlive iter;
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
if (cld->dictionary() != nullptr) {
|
||||
cld->dictionary()->verify();
|
||||
@@ -354,26 +353,28 @@ void ClassLoaderDataGraph::verify_dictionary() {
|
||||
}
|
||||
}
|
||||
|
||||
#define FOR_ALL_DICTIONARY(X) ClassLoaderDataGraphIterator iter; \
|
||||
while (ClassLoaderData* X = iter.get_next()) \
|
||||
if (X->dictionary() != nullptr)
|
||||
|
||||
void ClassLoaderDataGraph::print_dictionary(outputStream* st) {
|
||||
FOR_ALL_DICTIONARY(cld) {
|
||||
st->print("Dictionary for ");
|
||||
cld->print_value_on(st);
|
||||
st->cr();
|
||||
cld->dictionary()->print_on(st);
|
||||
st->cr();
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData *cld = iter.get_next()) {
|
||||
if (cld->dictionary() != nullptr) {
|
||||
st->print("Dictionary for ");
|
||||
cld->print_value_on(st);
|
||||
st->cr();
|
||||
cld->dictionary()->print_on(st);
|
||||
st->cr();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::print_table_statistics(outputStream* st) {
|
||||
FOR_ALL_DICTIONARY(cld) {
|
||||
ResourceMark rm; // loader_name_and_id
|
||||
stringStream tempst;
|
||||
tempst.print("System Dictionary for %s class loader", cld->loader_name_and_id());
|
||||
cld->dictionary()->print_table_statistics(st, tempst.freeze());
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData *cld = iter.get_next()) {
|
||||
if (cld->dictionary() != nullptr) {
|
||||
ResourceMark rm; // loader_name_and_id
|
||||
stringStream tempst;
|
||||
tempst.print("System Dictionary for %s class loader", cld->loader_name_and_id());
|
||||
cld->dictionary()->print_table_statistics(st, tempst.freeze());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -550,7 +551,7 @@ Klass* ClassLoaderDataGraphKlassIteratorAtomic::next_klass() {
|
||||
}
|
||||
|
||||
void ClassLoaderDataGraph::verify() {
|
||||
ClassLoaderDataGraphIteratorNoKeepAlive iter;
|
||||
ClassLoaderDataGraphIterator iter;
|
||||
while (ClassLoaderData* cld = iter.get_next()) {
|
||||
cld->verify();
|
||||
}
|
||||
|
||||
@@ -37,10 +37,10 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
friend class ClassLoaderDataGraphMetaspaceIterator;
|
||||
friend class ClassLoaderDataGraphKlassIteratorAtomic;
|
||||
friend class ClassLoaderDataGraphKlassIteratorStatic;
|
||||
template <bool keep_alive>
|
||||
friend class ClassLoaderDataGraphIteratorBase;
|
||||
friend class VMStructs;
|
||||
private:
|
||||
class ClassLoaderDataGraphIterator;
|
||||
|
||||
// All CLDs (except unlinked CLDs) can be reached by walking _head->_next->...
|
||||
static ClassLoaderData* volatile _head;
|
||||
|
||||
@@ -71,8 +71,12 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void roots_cld_do(CLDClosure* strong, CLDClosure* weak);
|
||||
static void always_strong_cld_do(CLDClosure* cl);
|
||||
// Iteration through CLDG not by GC.
|
||||
// All the do suffixed functions do not keep the CLD alive. Any CLD OopHandles
|
||||
// (modules, mirrors, resolved refs) resolved must be treated as no keepalive.
|
||||
// And requires that its CLD's holder is kept alive if they escape the
|
||||
// caller's safepoint or ClassLoaderDataGraph_lock critical section.
|
||||
// The do_keepalive suffixed functions will keep all CLDs alive.
|
||||
static void loaded_cld_do(CLDClosure* cl);
|
||||
static void loaded_cld_do_no_keepalive(CLDClosure* cl);
|
||||
// klass do
|
||||
// Walking classes through the ClassLoaderDataGraph include array classes. It also includes
|
||||
// classes that are allocated but not loaded, classes that have errors, and scratch classes
|
||||
@@ -81,9 +85,10 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||
static void classes_do(KlassClosure* klass_closure);
|
||||
static void classes_do(void f(Klass* const));
|
||||
static void methods_do(void f(Method*));
|
||||
static void modules_do_keepalive(void f(ModuleEntry*));
|
||||
static void modules_do(void f(ModuleEntry*));
|
||||
static void packages_do(void f(PackageEntry*));
|
||||
static void loaded_classes_do(KlassClosure* klass_closure);
|
||||
static void loaded_classes_do_keepalive(KlassClosure* klass_closure);
|
||||
static void classes_unloading_do(void f(Klass* const));
|
||||
static bool do_unloading();
|
||||
|
||||
|
||||
@@ -165,7 +165,7 @@ void ClassLoaderStatsClosure::addEmptyParents(oop cl) {
|
||||
|
||||
void ClassLoaderStatsVMOperation::doit() {
|
||||
ClassLoaderStatsClosure clsc (_out);
|
||||
ClassLoaderDataGraph::loaded_cld_do_no_keepalive(&clsc);
|
||||
ClassLoaderDataGraph::loaded_cld_do(&clsc);
|
||||
clsc.print();
|
||||
}
|
||||
|
||||
|
||||
@@ -788,6 +788,7 @@ int java_lang_Class::_class_loader_offset;
|
||||
int java_lang_Class::_module_offset;
|
||||
int java_lang_Class::_protection_domain_offset;
|
||||
int java_lang_Class::_component_mirror_offset;
|
||||
int java_lang_Class::_init_lock_offset;
|
||||
int java_lang_Class::_signers_offset;
|
||||
int java_lang_Class::_name_offset;
|
||||
int java_lang_Class::_source_file_offset;
|
||||
@@ -911,6 +912,12 @@ void java_lang_Class::initialize_mirror_fields(Klass* k,
|
||||
Handle protection_domain,
|
||||
Handle classData,
|
||||
TRAPS) {
|
||||
// Allocate a simple java object for a lock.
|
||||
// This needs to be a java object because during class initialization
|
||||
// it can be held across a java call.
|
||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK);
|
||||
set_init_lock(mirror(), r);
|
||||
|
||||
// Set protection domain also
|
||||
set_protection_domain(mirror(), protection_domain());
|
||||
|
||||
@@ -1132,6 +1139,10 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
|
||||
if (!k->is_array_klass()) {
|
||||
// - local static final fields with initial values were initialized at dump time
|
||||
|
||||
// create the init_lock
|
||||
typeArrayOop r = oopFactory::new_typeArray(T_INT, 0, CHECK_(false));
|
||||
set_init_lock(mirror(), r);
|
||||
|
||||
if (protection_domain.not_null()) {
|
||||
set_protection_domain(mirror(), protection_domain());
|
||||
}
|
||||
@@ -1196,6 +1207,15 @@ oop java_lang_Class::component_mirror(oop java_class) {
|
||||
return java_class->obj_field(_component_mirror_offset);
|
||||
}
|
||||
|
||||
oop java_lang_Class::init_lock(oop java_class) {
|
||||
assert(_init_lock_offset != 0, "must be set");
|
||||
return java_class->obj_field(_init_lock_offset);
|
||||
}
|
||||
void java_lang_Class::set_init_lock(oop java_class, oop init_lock) {
|
||||
assert(_init_lock_offset != 0, "must be set");
|
||||
java_class->obj_field_put(_init_lock_offset, init_lock);
|
||||
}
|
||||
|
||||
objArrayOop java_lang_Class::signers(oop java_class) {
|
||||
assert(_signers_offset != 0, "must be set");
|
||||
return (objArrayOop)java_class->obj_field(_signers_offset);
|
||||
@@ -1415,12 +1435,18 @@ void java_lang_Class::compute_offsets() {
|
||||
InstanceKlass* k = vmClasses::Class_klass();
|
||||
CLASS_FIELDS_DO(FIELD_COMPUTE_OFFSET);
|
||||
|
||||
// Init lock is a C union with component_mirror. Only instanceKlass mirrors have
|
||||
// init_lock and only ArrayKlass mirrors have component_mirror. Since both are oops
|
||||
// GC treats them the same.
|
||||
_init_lock_offset = _component_mirror_offset;
|
||||
|
||||
CLASS_INJECTED_FIELDS(INJECTED_FIELD_COMPUTE_OFFSET);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void java_lang_Class::serialize_offsets(SerializeClosure* f) {
|
||||
f->do_bool(&_offsets_computed);
|
||||
f->do_u4((u4*)&_init_lock_offset);
|
||||
|
||||
CLASS_FIELDS_DO(FIELD_SERIALIZE_OFFSET);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -226,6 +226,7 @@ class java_lang_Class : AllStatic {
|
||||
static int _static_oop_field_count_offset;
|
||||
|
||||
static int _protection_domain_offset;
|
||||
static int _init_lock_offset;
|
||||
static int _signers_offset;
|
||||
static int _class_loader_offset;
|
||||
static int _module_offset;
|
||||
@@ -240,6 +241,7 @@ class java_lang_Class : AllStatic {
|
||||
static GrowableArray<Klass*>* _fixup_mirror_list;
|
||||
static GrowableArray<Klass*>* _fixup_module_field_list;
|
||||
|
||||
static void set_init_lock(oop java_class, oop init_lock);
|
||||
static void set_protection_domain(oop java_class, oop protection_domain);
|
||||
static void set_class_loader(oop java_class, oop class_loader);
|
||||
static void set_component_mirror(oop java_class, oop comp_mirror);
|
||||
@@ -292,6 +294,10 @@ class java_lang_Class : AllStatic {
|
||||
|
||||
// Support for embedded per-class oops
|
||||
static oop protection_domain(oop java_class);
|
||||
static oop init_lock(oop java_class);
|
||||
static void clear_init_lock(oop java_class) {
|
||||
set_init_lock(java_class, nullptr);
|
||||
}
|
||||
static oop component_mirror(oop java_class);
|
||||
static objArrayOop signers(oop java_class);
|
||||
static void set_signers(oop java_class, objArrayOop signers);
|
||||
|
||||
@@ -344,8 +344,23 @@ Symbol* SymbolTable::lookup_common(const char* name,
|
||||
return sym;
|
||||
}
|
||||
|
||||
// Symbols should represent entities from the constant pool that are
|
||||
// limited to <64K in length, but usage errors creep in allowing Symbols
|
||||
// to be used for arbitrary strings. For debug builds we will assert if
|
||||
// a string is too long, whereas product builds will truncate it.
|
||||
static int check_length(const char* name, int len) {
|
||||
assert(len <= Symbol::max_length(),
|
||||
"String length %d exceeds the maximum Symbol length of %d", len, Symbol::max_length());
|
||||
if (len > Symbol::max_length()) {
|
||||
warning("A string \"%.80s ... %.80s\" exceeds the maximum Symbol "
|
||||
"length of %d and has been truncated", name, (name + len - 80), Symbol::max_length());
|
||||
len = Symbol::max_length();
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::new_symbol(const char* name, int len) {
|
||||
assert(len <= Symbol::max_length(), "sanity");
|
||||
len = check_length(name, len);
|
||||
unsigned int hash = hash_symbol(name, len, _alt_hash);
|
||||
Symbol* sym = lookup_common(name, len, hash);
|
||||
if (sym == nullptr) {
|
||||
@@ -485,6 +500,7 @@ void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHa
|
||||
for (int i = 0; i < names_count; i++) {
|
||||
const char *name = names[i];
|
||||
int len = lengths[i];
|
||||
assert(len <= Symbol::max_length(), "must be - these come from the constant pool");
|
||||
unsigned int hash = hashValues[i];
|
||||
assert(lookup_shared(name, len, hash) == nullptr, "must have checked already");
|
||||
Symbol* sym = do_add_if_needed(name, len, hash, is_permanent);
|
||||
@@ -494,6 +510,7 @@ void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHa
|
||||
}
|
||||
|
||||
Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool is_permanent) {
|
||||
assert(len <= Symbol::max_length(), "caller should have ensured this");
|
||||
SymbolTableLookup lookup(name, len, hash);
|
||||
SymbolTableGet stg;
|
||||
bool clean_hint = false;
|
||||
@@ -542,7 +559,7 @@ Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, boo
|
||||
|
||||
Symbol* SymbolTable::new_permanent_symbol(const char* name) {
|
||||
unsigned int hash = 0;
|
||||
int len = (int)strlen(name);
|
||||
int len = check_length(name, (int)strlen(name));
|
||||
Symbol* sym = SymbolTable::lookup_only(name, len, hash);
|
||||
if (sym == nullptr) {
|
||||
sym = do_add_if_needed(name, len, hash, /* is_permanent */ true);
|
||||
|
||||
@@ -177,7 +177,7 @@ class SystemDictionary : AllStatic {
|
||||
|
||||
static void classes_do(MetaspaceClosure* it);
|
||||
// Iterate over all methods in all klasses
|
||||
|
||||
// Will not keep metadata alive. See ClassLoaderDataGraph::methods_do.
|
||||
static void methods_do(void f(Method*));
|
||||
|
||||
// Garbage collection support
|
||||
|
||||
@@ -529,8 +529,8 @@ class methodHandle;
|
||||
/* support for sun.security.util.math.intpoly.MontgomeryIntegerPolynomialP256 */ \
|
||||
do_class(sun_security_util_math_intpoly_MontgomeryIntegerPolynomialP256, "sun/security/util/math/intpoly/MontgomeryIntegerPolynomialP256") \
|
||||
do_intrinsic(_intpoly_montgomeryMult_P256, sun_security_util_math_intpoly_MontgomeryIntegerPolynomialP256, intPolyMult_name, intPolyMult_signature, F_R) \
|
||||
do_name(intPolyMult_name, "mult") \
|
||||
do_signature(intPolyMult_signature, "([J[J[J)I") \
|
||||
do_name(intPolyMult_name, "multImpl") \
|
||||
do_signature(intPolyMult_signature, "([J[J[J)V") \
|
||||
\
|
||||
do_class(sun_security_util_math_intpoly_IntegerPolynomial, "sun/security/util/math/intpoly/IntegerPolynomial") \
|
||||
do_intrinsic(_intpoly_assign, sun_security_util_math_intpoly_IntegerPolynomial, intPolyAssign_name, intPolyAssign_signature, F_S) \
|
||||
|
||||
@@ -557,6 +557,7 @@ class SerializeClosure;
|
||||
template(bool_array_signature, "[Z") \
|
||||
template(byte_array_signature, "[B") \
|
||||
template(char_array_signature, "[C") \
|
||||
template(int_array_signature, "[I") \
|
||||
template(runnable_signature, "Ljava/lang/Runnable;") \
|
||||
template(continuation_signature, "Ljdk/internal/vm/Continuation;") \
|
||||
template(continuationscope_signature, "Ljdk/internal/vm/ContinuationScope;") \
|
||||
|
||||
@@ -761,7 +761,7 @@ DirectiveSet* DirectivesStack::getMatchingDirective(const methodHandle& method,
|
||||
if (dir->is_default_directive() || dir->match(method)) {
|
||||
match = dir->get_for(comp);
|
||||
assert(match != nullptr, "Consistency");
|
||||
if (match->EnableOption) {
|
||||
if (match->EnableOption || dir->is_default_directive()) {
|
||||
// The directiveSet for this compile is also enabled -> success
|
||||
dir->inc_refcount();
|
||||
break;
|
||||
|
||||
@@ -172,13 +172,9 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
nmethod* nm = cb->as_nmethod();
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
if (!bs_nm->is_armed(nm)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(!nm->is_osr_method(), "Should not reach here");
|
||||
// Called upon first entry after being armed
|
||||
bool may_enter = bs_nm->nmethod_entry_barrier(nm);
|
||||
assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
|
||||
|
||||
// In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
|
||||
// are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
|
||||
@@ -188,11 +184,11 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
// it can be made conditional on the nmethod_patching_type.
|
||||
OrderAccess::cross_modify_fence();
|
||||
|
||||
// Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
|
||||
// Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
|
||||
// a very rare event.
|
||||
if (DeoptimizeNMethodBarriersALot) {
|
||||
if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
|
||||
static volatile uint32_t counter=0;
|
||||
if (Atomic::add(&counter, 1u) % 3 == 0) {
|
||||
if (Atomic::add(&counter, 1u) % 10 == 0) {
|
||||
may_enter = false;
|
||||
}
|
||||
}
|
||||
@@ -205,15 +201,6 @@ int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
|
||||
}
|
||||
|
||||
bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
|
||||
// This check depends on the invariant that all nmethods that are deoptimized / made not entrant
|
||||
// are NOT disarmed.
|
||||
// This invariant is important because a method can be deoptimized after the method have been
|
||||
// resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
|
||||
// a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
|
||||
if (!is_armed(nm)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
assert(nm->is_osr_method(), "Should not reach here");
|
||||
log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
|
||||
bool result = nmethod_entry_barrier(nm);
|
||||
|
||||
@@ -132,7 +132,7 @@ bool VM_GC_Operation::doit_prologue() {
|
||||
void VM_GC_Operation::doit_epilogue() {
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
if (Universe::has_reference_pending_list()) {
|
||||
Heap_lock->notify_all();
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ void VM_ShenandoahOperation::doit_epilogue() {
|
||||
assert(!ShenandoahHeap::heap()->has_gc_state_changed(), "GC State was not synchronized to java threads.");
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
}
|
||||
|
||||
bool VM_ShenandoahReferenceOperation::doit_prologue() {
|
||||
|
||||
@@ -134,7 +134,7 @@ public:
|
||||
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
}
|
||||
|
||||
bool gc_locked() const {
|
||||
|
||||
@@ -524,6 +524,10 @@ static bool rule_major_allocation_rate(const ZDirectorStats& stats) {
|
||||
}
|
||||
|
||||
static double calculate_young_to_old_worker_ratio(const ZDirectorStats& stats) {
|
||||
if (!stats._old_stats._cycle._is_time_trustable) {
|
||||
return 1.0;
|
||||
}
|
||||
|
||||
const double young_gc_time = gc_time(stats._young_stats);
|
||||
const double old_gc_time = gc_time(stats._old_stats);
|
||||
const size_t reclaimed_per_young_gc = stats._young_stats._stat_heap._reclaimed_avg;
|
||||
|
||||
@@ -456,7 +456,7 @@ public:
|
||||
|
||||
// GC thread root traversal likely used OopMapCache a lot, which
|
||||
// might have created lots of old entries. Trigger the cleanup now.
|
||||
OopMapCache::trigger_cleanup();
|
||||
OopMapCache::try_trigger_cleanup();
|
||||
}
|
||||
|
||||
bool success() const {
|
||||
|
||||
@@ -1810,7 +1810,7 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, const constantPoolHan
|
||||
// the interpreter or runtime performs a serialized check of
|
||||
// the relevant ResolvedIndyEntry::method field. This is done by the caller
|
||||
// of this method, via CPC::set_dynamic_call, which uses
|
||||
// a lock to do the final serialization of updates
|
||||
// an ObjectLocker to do the final serialization of updates
|
||||
// to ResolvedIndyEntry state, including method.
|
||||
|
||||
// Log dynamic info to CDS classlist.
|
||||
|
||||
@@ -66,9 +66,6 @@ class OopMapCacheEntry: private InterpreterOopMap {
|
||||
public:
|
||||
OopMapCacheEntry() : InterpreterOopMap() {
|
||||
_next = nullptr;
|
||||
#ifdef ASSERT
|
||||
_resource_allocate_bit_mask = false;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
@@ -177,9 +174,13 @@ class VerifyClosure : public OffsetClosure {
|
||||
|
||||
InterpreterOopMap::InterpreterOopMap() {
|
||||
initialize();
|
||||
#ifdef ASSERT
|
||||
_resource_allocate_bit_mask = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
InterpreterOopMap::~InterpreterOopMap() {
|
||||
if (has_valid_mask() && mask_size() > small_mask_limit) {
|
||||
assert(_bit_mask[0] != 0, "should have pointer to C heap");
|
||||
FREE_C_HEAP_ARRAY(uintptr_t, _bit_mask[0]);
|
||||
}
|
||||
}
|
||||
|
||||
bool InterpreterOopMap::is_empty() const {
|
||||
@@ -399,37 +400,24 @@ void OopMapCacheEntry::deallocate(OopMapCacheEntry* const entry) {
|
||||
|
||||
// Implementation of OopMapCache
|
||||
|
||||
void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
|
||||
assert(_resource_allocate_bit_mask,
|
||||
"Should not resource allocate the _bit_mask");
|
||||
assert(from->has_valid_mask(),
|
||||
"Cannot copy entry with an invalid mask");
|
||||
void InterpreterOopMap::copy_from(const OopMapCacheEntry* src) {
|
||||
// The expectation is that this InterpreterOopMap is recently created
|
||||
// and empty. It is used to get a copy of a cached entry.
|
||||
assert(!has_valid_mask(), "InterpreterOopMap object can only be filled once");
|
||||
assert(src->has_valid_mask(), "Cannot copy entry with an invalid mask");
|
||||
|
||||
set_method(from->method());
|
||||
set_bci(from->bci());
|
||||
set_mask_size(from->mask_size());
|
||||
set_expression_stack_size(from->expression_stack_size());
|
||||
_num_oops = from->num_oops();
|
||||
set_method(src->method());
|
||||
set_bci(src->bci());
|
||||
set_mask_size(src->mask_size());
|
||||
set_expression_stack_size(src->expression_stack_size());
|
||||
_num_oops = src->num_oops();
|
||||
|
||||
// Is the bit mask contained in the entry?
|
||||
if (from->mask_size() <= small_mask_limit) {
|
||||
memcpy((void *)_bit_mask, (void *)from->_bit_mask,
|
||||
mask_word_size() * BytesPerWord);
|
||||
if (src->mask_size() <= small_mask_limit) {
|
||||
memcpy(_bit_mask, src->_bit_mask, mask_word_size() * BytesPerWord);
|
||||
} else {
|
||||
// The expectation is that this InterpreterOopMap is a recently created
|
||||
// and empty. It is used to get a copy of a cached entry.
|
||||
// If the bit mask has a value, it should be in the
|
||||
// resource area.
|
||||
assert(_bit_mask[0] == 0 ||
|
||||
Thread::current()->resource_area()->contains((void*)_bit_mask[0]),
|
||||
"The bit mask should have been allocated from a resource area");
|
||||
// Allocate the bit_mask from a Resource area for performance. Allocating
|
||||
// from the C heap as is done for OopMapCache has a significant
|
||||
// performance impact.
|
||||
_bit_mask[0] = (uintptr_t) NEW_RESOURCE_ARRAY(uintptr_t, mask_word_size());
|
||||
assert(_bit_mask[0] != 0, "bit mask was not allocated");
|
||||
memcpy((void*) _bit_mask[0], (void*) from->_bit_mask[0],
|
||||
mask_word_size() * BytesPerWord);
|
||||
_bit_mask[0] = (uintptr_t) NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size(), mtClass);
|
||||
memcpy((void*) _bit_mask[0], (void*) src->_bit_mask[0], mask_word_size() * BytesPerWord);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -516,7 +504,7 @@ void OopMapCache::lookup(const methodHandle& method,
|
||||
for (int i = 0; i < _probe_depth; i++) {
|
||||
OopMapCacheEntry *entry = entry_at(probe + i);
|
||||
if (entry != nullptr && !entry->is_empty() && entry->match(method, bci)) {
|
||||
entry_for->resource_copy(entry);
|
||||
entry_for->copy_from(entry);
|
||||
assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
|
||||
log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
|
||||
return;
|
||||
@@ -530,7 +518,7 @@ void OopMapCache::lookup(const methodHandle& method,
|
||||
OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
|
||||
tmp->initialize();
|
||||
tmp->fill(method, bci);
|
||||
entry_for->resource_copy(tmp);
|
||||
entry_for->copy_from(tmp);
|
||||
|
||||
if (method->should_not_be_cached()) {
|
||||
// It is either not safe or not a good idea to cache this Method*
|
||||
@@ -592,10 +580,13 @@ bool OopMapCache::has_cleanup_work() {
|
||||
return Atomic::load(&_old_entries) != nullptr;
|
||||
}
|
||||
|
||||
void OopMapCache::trigger_cleanup() {
|
||||
if (has_cleanup_work()) {
|
||||
MutexLocker ml(Service_lock, Mutex::_no_safepoint_check_flag);
|
||||
void OopMapCache::try_trigger_cleanup() {
|
||||
// See we can take the lock for the notification without blocking.
|
||||
// This allows triggering the cleanup from GC paths, that can hold
|
||||
// the service lock for e.g. oop iteration in service thread.
|
||||
if (has_cleanup_work() && Service_lock->try_lock_without_rank_check()) {
|
||||
Service_lock->notify_all();
|
||||
Service_lock->unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -628,7 +619,7 @@ void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, Inter
|
||||
tmp->initialize();
|
||||
tmp->fill(method, bci);
|
||||
if (tmp->has_valid_mask()) {
|
||||
entry->resource_copy(tmp);
|
||||
entry->copy_from(tmp);
|
||||
}
|
||||
OopMapCacheEntry::deallocate(tmp);
|
||||
}
|
||||
|
||||
@@ -36,13 +36,14 @@
|
||||
// OopMapCache's are allocated lazily per InstanceKlass.
|
||||
|
||||
// The oopMap (InterpreterOopMap) is stored as a bit mask. If the
|
||||
// bit_mask can fit into two words it is stored in
|
||||
// bit_mask can fit into four words it is stored in
|
||||
// the _bit_mask array, otherwise it is allocated on the heap.
|
||||
// For OopMapCacheEntry the bit_mask is allocated in the C heap
|
||||
// because these entries persist between garbage collections.
|
||||
// For InterpreterOopMap the bit_mask is allocated in
|
||||
// a resource area for better performance. InterpreterOopMap
|
||||
// should only be created and deleted during same garbage collection.
|
||||
// For InterpreterOopMap the bit_mask is allocated in the C heap
|
||||
// to avoid issues with allocations from the resource area that have
|
||||
// to live accross the oop closure. InterpreterOopMap should only be
|
||||
// created and deleted during the same garbage collection.
|
||||
//
|
||||
// If ENABBLE_ZAP_DEAD_LOCALS is defined, two bits are used
|
||||
// per entry instead of one. In all cases,
|
||||
@@ -95,9 +96,6 @@ class InterpreterOopMap: ResourceObj {
|
||||
// access it without using trickery in
|
||||
// method bit_mask().
|
||||
int _num_oops;
|
||||
#ifdef ASSERT
|
||||
bool _resource_allocate_bit_mask;
|
||||
#endif
|
||||
|
||||
// access methods
|
||||
Method* method() const { return _method; }
|
||||
@@ -128,12 +126,13 @@ class InterpreterOopMap: ResourceObj {
|
||||
|
||||
public:
|
||||
InterpreterOopMap();
|
||||
~InterpreterOopMap();
|
||||
|
||||
// Copy the OopMapCacheEntry in parameter "from" into this
|
||||
// InterpreterOopMap. If the _bit_mask[0] in "from" points to
|
||||
// allocated space (i.e., the bit mask was to large to hold
|
||||
// in-line), allocate the space from a Resource area.
|
||||
void resource_copy(OopMapCacheEntry* from);
|
||||
// Copy the OopMapCacheEntry in parameter "src" into this
|
||||
// InterpreterOopMap. If the _bit_mask[0] in "src" points to
|
||||
// allocated space (i.e., the bit mask was too large to hold
|
||||
// in-line), allocate the space from the C heap.
|
||||
void copy_from(const OopMapCacheEntry* src);
|
||||
|
||||
void iterate_oop(OffsetClosure* oop_closure) const;
|
||||
void print() const;
|
||||
@@ -183,8 +182,8 @@ class OopMapCache : public CHeapObj<mtClass> {
|
||||
// Check if we need to clean up old entries
|
||||
static bool has_cleanup_work();
|
||||
|
||||
// Request cleanup if work is needed
|
||||
static void trigger_cleanup();
|
||||
// Request cleanup if work is needed and notification is currently possible
|
||||
static void try_trigger_cleanup();
|
||||
|
||||
// Clean up the old entries
|
||||
static void cleanup();
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrMetadataEvent.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunk.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkRotation.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
||||
@@ -425,3 +426,7 @@ JVM_END
|
||||
JVM_ENTRY_NO_ENV(void, jfr_unregister_stack_filter(JNIEnv* env, jclass jvm, jlong id))
|
||||
JfrStackFilterRegistry::remove(id);
|
||||
JVM_END
|
||||
|
||||
NO_TRANSITION(jlong, jfr_nanos_now(JNIEnv* env, jclass jvm))
|
||||
return JfrChunk::nanos_now();
|
||||
NO_TRANSITION_END
|
||||
|
||||
@@ -165,6 +165,8 @@ jlong JNICALL jfr_register_stack_filter(JNIEnv* env, jclass jvm, jobjectArray cl
|
||||
|
||||
jlong JNICALL jfr_unregister_stack_filter(JNIEnv* env, jclass jvm, jlong id);
|
||||
|
||||
jlong JNICALL jfr_nanos_now(JNIEnv* env, jclass jvm);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -100,7 +100,8 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
|
||||
(char*)"hostTotalSwapMemory", (char*)"()J", (void*) jfr_host_total_swap_memory,
|
||||
(char*)"emitDataLoss", (char*)"(J)V", (void*)jfr_emit_data_loss,
|
||||
(char*)"registerStackFilter", (char*)"([Ljava/lang/String;[Ljava/lang/String;)J", (void*)jfr_register_stack_filter,
|
||||
(char*)"unregisterStackFilter", (char*)"(J)V", (void*)jfr_unregister_stack_filter
|
||||
(char*)"unregisterStackFilter", (char*)"(J)V", (void*)jfr_unregister_stack_filter,
|
||||
(char*)"nanosNow", (char*)"()J", (void*)jfr_nanos_now
|
||||
};
|
||||
|
||||
const size_t method_array_length = sizeof(method) / sizeof(JNINativeMethod);
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
#include "jfr/support/jfrMethodLookup.hpp"
|
||||
#include "jfr/utilities/jfrHashtable.hpp"
|
||||
@@ -272,11 +273,30 @@ static void install_stack_traces(const ObjectSampler* sampler) {
|
||||
iterate_samples(installer);
|
||||
}
|
||||
|
||||
// Resets the blob write states from the previous epoch.
|
||||
static void reset_blob_write_state(const ObjectSampler* sampler, JavaThread* jt) {
|
||||
assert(sampler != nullptr, "invariant");
|
||||
const ObjectSample* sample = sampler->last_resolved();
|
||||
while (sample != nullptr) {
|
||||
if (sample->has_stacktrace()) {
|
||||
sample->stacktrace()->reset_write_state();
|
||||
}
|
||||
if (sample->has_thread()) {
|
||||
sample->thread()->reset_write_state();
|
||||
}
|
||||
if (sample->has_type_set()) {
|
||||
sample->type_set()->reset_write_state();
|
||||
}
|
||||
sample = sample->next();
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
|
||||
assert(sampler != nullptr, "invariant");
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
JavaThread* const thread = JavaThread::current();
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);)
|
||||
reset_blob_write_state(sampler, thread);
|
||||
if (!ObjectSampler::has_unresolved_entry()) {
|
||||
return;
|
||||
}
|
||||
@@ -326,38 +346,34 @@ void ObjectSampleCheckpoint::write_stacktrace(const JfrStackTrace* trace, JfrChe
|
||||
}
|
||||
}
|
||||
|
||||
static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer, bool reset) {
|
||||
if (reset) {
|
||||
blob->reset_write_state();
|
||||
return;
|
||||
}
|
||||
static void write_blob(const JfrBlobHandle& blob, JfrCheckpointWriter& writer) {
|
||||
blob->exclusive_write(writer);
|
||||
}
|
||||
|
||||
static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_type_set_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
if (sample->has_type_set()) {
|
||||
write_blob(sample->type_set(), writer, reset);
|
||||
write_blob(sample->type_set(), writer);
|
||||
}
|
||||
}
|
||||
|
||||
static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_thread_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
assert(sample->has_thread(), "invariant");
|
||||
if (sample->is_virtual_thread() || has_thread_exited(sample->thread_id())) {
|
||||
write_blob(sample->thread(), writer, reset);
|
||||
write_blob(sample->thread(), writer);
|
||||
}
|
||||
}
|
||||
|
||||
static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_stacktrace_blob(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
if (sample->has_stacktrace()) {
|
||||
write_blob(sample->stacktrace(), writer, reset);
|
||||
write_blob(sample->stacktrace(), writer);
|
||||
}
|
||||
}
|
||||
|
||||
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer, bool reset) {
|
||||
static void write_blobs(const ObjectSample* sample, JfrCheckpointWriter& writer) {
|
||||
assert(sample != nullptr, "invariant");
|
||||
write_stacktrace_blob(sample, writer, reset);
|
||||
write_thread_blob(sample, writer, reset);
|
||||
write_type_set_blob(sample, writer, reset);
|
||||
write_stacktrace_blob(sample, writer);
|
||||
write_thread_blob(sample, writer);
|
||||
write_type_set_blob(sample, writer);
|
||||
}
|
||||
|
||||
class BlobWriter {
|
||||
@@ -365,18 +381,14 @@ class BlobWriter {
|
||||
const ObjectSampler* _sampler;
|
||||
JfrCheckpointWriter& _writer;
|
||||
const jlong _last_sweep;
|
||||
bool _reset;
|
||||
public:
|
||||
BlobWriter(const ObjectSampler* sampler, JfrCheckpointWriter& writer, jlong last_sweep) :
|
||||
_sampler(sampler), _writer(writer), _last_sweep(last_sweep), _reset(false) {}
|
||||
_sampler(sampler), _writer(writer), _last_sweep(last_sweep) {}
|
||||
void sample_do(ObjectSample* sample) {
|
||||
if (sample->is_alive_and_older_than(_last_sweep)) {
|
||||
write_blobs(sample, _writer, _reset);
|
||||
write_blobs(sample, _writer);
|
||||
}
|
||||
}
|
||||
void set_reset() {
|
||||
_reset = true;
|
||||
}
|
||||
};
|
||||
|
||||
static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thread* thread) {
|
||||
@@ -385,9 +397,6 @@ static void write_sample_blobs(const ObjectSampler* sampler, bool emit_all, Thre
|
||||
JfrCheckpointWriter writer(thread, false);
|
||||
BlobWriter cbw(sampler, writer, last_sweep);
|
||||
iterate_samples(cbw, true);
|
||||
// reset blob write states
|
||||
cbw.set_reset();
|
||||
iterate_samples(cbw, true);
|
||||
}
|
||||
|
||||
void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread) {
|
||||
@@ -403,67 +412,17 @@ void ObjectSampleCheckpoint::write(const ObjectSampler* sampler, EdgeStore* edge
|
||||
}
|
||||
}
|
||||
|
||||
// A linked list of saved type set blobs for the epoch.
|
||||
// The link consist of a reference counted handle.
|
||||
static JfrBlobHandle saved_type_set_blobs;
|
||||
|
||||
static void release_state_for_previous_epoch() {
|
||||
// decrements the reference count and the list is reinitialized
|
||||
saved_type_set_blobs = JfrBlobHandle();
|
||||
}
|
||||
|
||||
class BlobInstaller {
|
||||
public:
|
||||
~BlobInstaller() {
|
||||
release_state_for_previous_epoch();
|
||||
}
|
||||
void sample_do(ObjectSample* sample) {
|
||||
if (!sample->is_dead()) {
|
||||
sample->set_type_set(saved_type_set_blobs);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static void install_type_set_blobs() {
|
||||
if (saved_type_set_blobs.valid()) {
|
||||
BlobInstaller installer;
|
||||
iterate_samples(installer);
|
||||
}
|
||||
}
|
||||
|
||||
static void save_type_set_blob(JfrCheckpointWriter& writer) {
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrBlobHandle blob = writer.copy();
|
||||
if (saved_type_set_blobs.valid()) {
|
||||
saved_type_set_blobs->set_next(blob);
|
||||
} else {
|
||||
saved_type_set_blobs = blob;
|
||||
}
|
||||
}
|
||||
|
||||
// This routine has exclusive access to the sampler instance on entry.
|
||||
void ObjectSampleCheckpoint::on_type_set(JfrCheckpointWriter& writer) {
|
||||
void ObjectSampleCheckpoint::on_type_set(JavaThread* jt) {
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
|
||||
assert(ClassLoaderDataGraph_lock->owned_by_self(), "invariant");
|
||||
if (!ObjectSampler::has_unresolved_entry()) {
|
||||
return;
|
||||
}
|
||||
const ObjectSample* const last = ObjectSampler::sampler()->last();
|
||||
ObjectSample* const last = ObjectSampler::sampler()->last();
|
||||
assert(last != nullptr, "invariant");
|
||||
assert(last != ObjectSampler::sampler()->last_resolved(), "invariant");
|
||||
if (writer.has_data()) {
|
||||
save_type_set_blob(writer);
|
||||
}
|
||||
install_type_set_blobs();
|
||||
JfrReferenceCountedStorage::install(last, ObjectSampler::sampler()->last_resolved());
|
||||
ObjectSampler::sampler()->set_last_resolved(last);
|
||||
}
|
||||
|
||||
// This routine does NOT have exclusive access to the sampler instance on entry.
|
||||
void ObjectSampleCheckpoint::on_type_set_unload(JfrCheckpointWriter& writer) {
|
||||
assert(LeakProfiler::is_running(), "invariant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
if (writer.has_data() && ObjectSampler::has_unresolved_entry()) {
|
||||
save_type_set_blob(writer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -50,8 +50,7 @@ class ObjectSampleCheckpoint : AllStatic {
|
||||
static void write(const ObjectSampler* sampler, EdgeStore* edge_store, bool emit_all, Thread* thread);
|
||||
static void clear();
|
||||
public:
|
||||
static void on_type_set(JfrCheckpointWriter& writer);
|
||||
static void on_type_set_unload(JfrCheckpointWriter& writer);
|
||||
static void on_type_set(JavaThread* jt);
|
||||
static void on_thread_exit(traceid tid);
|
||||
static void on_rotation(const ObjectSampler* sampler);
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -233,7 +233,7 @@ class ObjectSample : public JfrCHeapObj {
|
||||
return _type_set.valid();
|
||||
}
|
||||
|
||||
void set_type_set(const JfrBlobHandle& ref) {
|
||||
void install_type_set(const JfrBlobHandle& ref) {
|
||||
if (_type_set != ref) {
|
||||
if (_type_set.valid()) {
|
||||
_type_set->set_next(ref);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -37,6 +37,7 @@
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/storage/jfrEpochStorage.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
|
||||
#include "jfr/recorder/stringpool/jfrStringPool.hpp"
|
||||
#include "jfr/support/jfrDeprecationManager.hpp"
|
||||
@@ -589,12 +590,14 @@ void JfrCheckpointManager::clear_type_set() {
|
||||
MutexLocker module_lock(Module_lock);
|
||||
JfrTypeSet::clear(&writer, &leakp_writer);
|
||||
}
|
||||
JfrDeprecationManager::on_type_set(leakp_writer, nullptr, thread);
|
||||
// We placed a blob in the Deprecated subsystem by moving the information
|
||||
// from the leakp writer. For the real writer, the data will not be
|
||||
// committed, because the JFR system is yet to be started.
|
||||
// Therefore, the writer is cancelled before its destructor is run,
|
||||
// to avoid writing unnecessary information into the checkpoint system.
|
||||
JfrAddRefCountedBlob add_blob(leakp_writer);
|
||||
JfrDeprecationManager::on_type_set(nullptr, thread);
|
||||
// We installed a blob in the JfrReferenceCountedStorage subsystem
|
||||
// by moving the information from the leakp writer.
|
||||
// For the real writer, the data will not be committed,
|
||||
// because the JFR system is yet to be started.
|
||||
// Therefore, we cancel the writer before its destructor is run
|
||||
// to avoid writing invalid information into the checkpoint system.
|
||||
writer.cancel();
|
||||
}
|
||||
|
||||
@@ -613,11 +616,11 @@ void JfrCheckpointManager::write_type_set() {
|
||||
MutexLocker module_lock(thread, Module_lock);
|
||||
JfrTypeSet::serialize(&writer, &leakp_writer, false, false);
|
||||
}
|
||||
JfrAddRefCountedBlob add_blob(leakp_writer);
|
||||
if (LeakProfiler::is_running()) {
|
||||
ObjectSampleCheckpoint::on_type_set(leakp_writer);
|
||||
ObjectSampleCheckpoint::on_type_set(thread);
|
||||
}
|
||||
// Place this call after ObjectSampleCheckpoint::on_type_set.
|
||||
JfrDeprecationManager::on_type_set(leakp_writer, _chunkwriter, thread);
|
||||
JfrDeprecationManager::on_type_set(_chunkwriter, thread);
|
||||
}
|
||||
write();
|
||||
}
|
||||
@@ -626,10 +629,7 @@ void JfrCheckpointManager::on_unloading_classes() {
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
JfrCheckpointWriter writer(Thread::current());
|
||||
JfrTypeSet::on_unloading_classes(&writer);
|
||||
if (LeakProfiler::is_running()) {
|
||||
ObjectSampleCheckpoint::on_type_set_unload(writer);
|
||||
}
|
||||
JfrDeprecationManager::on_type_set_unload(writer);
|
||||
JfrAddRefCountedBlob add_blob(writer, false /* move */, false /* reset */);
|
||||
}
|
||||
|
||||
static size_t flush_type_set(Thread* thread) {
|
||||
|
||||
@@ -54,6 +54,7 @@ struct JfrCheckpointContext {
|
||||
};
|
||||
|
||||
class JfrCheckpointWriter : public JfrCheckpointWriterBase {
|
||||
friend class JfrAddRefCountedBlob;
|
||||
friend class JfrCheckpointManager;
|
||||
friend class JfrDeprecationManager;
|
||||
friend class JfrSerializerRegistration;
|
||||
|
||||
@@ -310,7 +310,9 @@ static void set_serialized(const T* ptr) {
|
||||
assert(ptr != nullptr, "invariant");
|
||||
if (current_epoch()) {
|
||||
CLEAR_THIS_EPOCH_CLEARED_BIT(ptr);
|
||||
assert(!IS_THIS_EPOCH_CLEARED_BIT_SET(ptr), "invariant");
|
||||
}
|
||||
assert(IS_PREVIOUS_EPOCH_CLEARED_BIT_SET(ptr), "invariant");
|
||||
SET_SERIALIZED(ptr);
|
||||
assert(IS_SERIALIZED(ptr), "invariant");
|
||||
}
|
||||
@@ -929,9 +931,11 @@ void set_serialized<Method>(MethodPtr method) {
|
||||
assert(method != nullptr, "invariant");
|
||||
if (current_epoch()) {
|
||||
CLEAR_THIS_EPOCH_METHOD_CLEARED_BIT(method);
|
||||
assert(!IS_THIS_EPOCH_METHOD_CLEARED_BIT_SET(method), "invariant");
|
||||
}
|
||||
assert(unloading() ? true : METHOD_IS_NOT_SERIALIZED(method), "invariant");
|
||||
SET_METHOD_SERIALIZED(method);
|
||||
assert(IS_PREVIOUS_EPOCH_METHOD_CLEARED_BIT_SET(method), "invariant");
|
||||
assert(METHOD_IS_SERIALIZED(method), "invariant");
|
||||
}
|
||||
|
||||
|
||||
@@ -96,6 +96,8 @@ class ClearArtifact {
|
||||
assert(IS_NOT_TRANSIENT(value), "invariant");
|
||||
SET_PREVIOUS_EPOCH_CLEARED_BIT(value);
|
||||
CLEAR_PREVIOUS_EPOCH_METHOD_AND_CLASS(value);
|
||||
assert(IS_THIS_EPOCH_CLEARED_BIT_SET(value), "invariant");
|
||||
assert(IS_PREVIOUS_EPOCH_CLEARED_BIT_SET(value), "invariant");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
@@ -111,6 +113,8 @@ class ClearArtifact<const Method*> {
|
||||
assert(METHOD_IS_NOT_TRANSIENT(method), "invariant");
|
||||
SET_PREVIOUS_EPOCH_METHOD_CLEARED_BIT(method);
|
||||
CLEAR_PREVIOUS_EPOCH_METHOD_FLAG(method);
|
||||
assert(IS_THIS_EPOCH_METHOD_CLEARED_BIT_SET(method), "invariant");
|
||||
assert(IS_PREVIOUS_EPOCH_METHOD_CLEARED_BIT_SET(method), "invariant");
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -50,22 +50,22 @@ static traceid atomic_inc(traceid volatile* const dest, traceid stride = 1) {
|
||||
|
||||
static traceid next_class_id() {
|
||||
static volatile traceid class_id_counter = LAST_TYPE_ID + 1; // + 1 is for the void.class primitive
|
||||
return atomic_inc(&class_id_counter) << TRACE_ID_SHIFT;
|
||||
return (atomic_inc(&class_id_counter) << TRACE_ID_SHIFT) | EPOCH_CLEARED_BITS;
|
||||
}
|
||||
|
||||
static traceid next_module_id() {
|
||||
static volatile traceid module_id_counter = 0;
|
||||
return atomic_inc(&module_id_counter) << TRACE_ID_SHIFT;
|
||||
return (atomic_inc(&module_id_counter) << TRACE_ID_SHIFT) | EPOCH_CLEARED_BITS;
|
||||
}
|
||||
|
||||
static traceid next_package_id() {
|
||||
static volatile traceid package_id_counter = 0;
|
||||
return atomic_inc(&package_id_counter) << TRACE_ID_SHIFT;
|
||||
return (atomic_inc(&package_id_counter) << TRACE_ID_SHIFT) | EPOCH_CLEARED_BITS;
|
||||
}
|
||||
|
||||
static traceid next_class_loader_data_id() {
|
||||
static volatile traceid cld_id_counter = 0;
|
||||
return atomic_inc(&cld_id_counter) << TRACE_ID_SHIFT;
|
||||
return (atomic_inc(&cld_id_counter) << TRACE_ID_SHIFT) | EPOCH_CLEARED_BITS;
|
||||
}
|
||||
|
||||
static bool found_jdk_internal_event_klass = false;
|
||||
@@ -201,18 +201,18 @@ traceid JfrTraceId::load_raw(jclass jc) {
|
||||
// used by CDS / APPCDS as part of "remove_unshareable_info"
|
||||
void JfrTraceId::remove(const Klass* k) {
|
||||
assert(k != nullptr, "invariant");
|
||||
// Mask off and store the event flags.
|
||||
// Mask off and store the event flags and epoch clear bits.
|
||||
// This mechanism will retain the event specific flags
|
||||
// in the archive, allowing for event flag restoration
|
||||
// when renewing the traceid on klass revival.
|
||||
k->set_trace_id(EVENT_KLASS_MASK(k));
|
||||
k->set_trace_id(EPOCH_CLEARED_BITS | EVENT_KLASS_MASK(k));
|
||||
}
|
||||
|
||||
// used by CDS / APPCDS as part of "remove_unshareable_info"
|
||||
void JfrTraceId::remove(const Method* method) {
|
||||
assert(method != nullptr, "invariant");
|
||||
// Clear all bits.
|
||||
method->set_trace_flags(0);
|
||||
// Clear tag bits and set epoch cleared bits.
|
||||
method->set_trace_flags(static_cast<uint16_t>(EPOCH_CLEARED_BITS));
|
||||
}
|
||||
|
||||
// used by CDS / APPCDS as part of "restore_unshareable_info"
|
||||
|
||||
@@ -157,9 +157,9 @@ inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass) {
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
if (should_tag(method)) {
|
||||
// the method is already logically tagged, just like the klass,
|
||||
// but because of redefinition, the latest Method*
|
||||
@@ -174,9 +174,9 @@ inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Metho
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load_leakp_previuos_epoch(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_PREVIOUS_EPOCH(klass), "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_PREVIOUS_EPOCH(klass), "invariant");
|
||||
if (METHOD_FLAG_NOT_USED_PREVIOUS_EPOCH(method)) {
|
||||
// the method is already logically tagged, just like the klass,
|
||||
// but because of redefinition, the latest Method*
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -48,6 +48,7 @@
|
||||
#define EPOCH_0_CLEARED_BIT (EPOCH_0_CLEARED_META_BIT << META_SHIFT)
|
||||
#define EPOCH_1_CLEARED_META_BIT (BIT << 1)
|
||||
#define EPOCH_1_CLEARED_BIT (EPOCH_1_CLEARED_META_BIT << META_SHIFT)
|
||||
#define EPOCH_CLEARED_BITS (EPOCH_1_CLEARED_BIT | EPOCH_0_CLEARED_BIT)
|
||||
#define LEAKP_META_BIT (BIT << 2)
|
||||
#define LEAKP_BIT (LEAKP_META_BIT << META_SHIFT)
|
||||
#define TRANSIENT_META_BIT (BIT << 3)
|
||||
@@ -136,6 +137,8 @@
|
||||
#define IS_TRANSIENT(ptr) (TRACE_ID_PREDICATE(ptr, TRANSIENT_BIT))
|
||||
#define IS_NOT_TRANSIENT(ptr) (!(IS_TRANSIENT(ptr)))
|
||||
#define SET_SERIALIZED(ptr) (TRACE_ID_META_TAG(ptr, SERIALIZED_META_BIT))
|
||||
#define IS_THIS_EPOCH_CLEARED_BIT_SET(ptr) (TRACE_ID_PREDICATE(ptr, (THIS_EPOCH_BIT << META_SHIFT)))
|
||||
#define IS_PREVIOUS_EPOCH_CLEARED_BIT_SET(ptr) (TRACE_ID_PREDICATE(ptr, (PREVIOUS_EPOCH_BIT << META_SHIFT)))
|
||||
#define IS_SERIALIZED(ptr) (TRACE_ID_PREDICATE(ptr, SERIALIZED_BIT))
|
||||
#define IS_NOT_SERIALIZED(ptr) (!(IS_SERIALIZED(ptr)))
|
||||
#define SHOULD_TAG(ptr) (NOT_USED_THIS_EPOCH(ptr))
|
||||
@@ -161,5 +164,7 @@
|
||||
#define CLEAR_THIS_EPOCH_METHOD_CLEARED_BIT(ptr) (METHOD_META_MASK_CLEAR(ptr,(~(THIS_EPOCH_BIT))))
|
||||
#define IS_THIS_EPOCH_METHOD_CLEARED(ptr) (METHOD_FLAG_PREDICATE(method, THIS_EPOCH_BIT))
|
||||
#define IS_PREVIOUS_EPOCH_METHOD_CLEARED(ptr) (METHOD_FLAG_PREDICATE(method, PREVIOUS_EPOCH_BIT))
|
||||
#define IS_THIS_EPOCH_METHOD_CLEARED_BIT_SET(ptr) (METHOD_FLAG_PREDICATE(ptr, (THIS_EPOCH_BIT << META_SHIFT)))
|
||||
#define IS_PREVIOUS_EPOCH_METHOD_CLEARED_BIT_SET(ptr) (METHOD_FLAG_PREDICATE(ptr, (PREVIOUS_EPOCH_BIT << META_SHIFT)))
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEIDMACROS_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -35,7 +35,7 @@ static const u2 JFR_VERSION_MAJOR = 2;
|
||||
static const u2 JFR_VERSION_MINOR = 1;
|
||||
|
||||
// strictly monotone
|
||||
static jlong nanos_now() {
|
||||
jlong JfrChunk::nanos_now() {
|
||||
static jlong last = 0;
|
||||
|
||||
jlong seconds;
|
||||
@@ -47,8 +47,6 @@ static jlong nanos_now() {
|
||||
const jlong now = seconds * 1000000000 + nanos;
|
||||
if (now > last) {
|
||||
last = now;
|
||||
} else {
|
||||
++last;
|
||||
}
|
||||
return last;
|
||||
}
|
||||
@@ -147,7 +145,7 @@ void JfrChunk::update_start_ticks() {
|
||||
}
|
||||
|
||||
void JfrChunk::update_start_nanos() {
|
||||
const jlong now = nanos_now();
|
||||
const jlong now = JfrChunk::nanos_now();
|
||||
assert(now >= _start_nanos, "invariant");
|
||||
assert(now >= _last_update_nanos, "invariant");
|
||||
_start_nanos = _last_update_nanos = now;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -34,6 +34,8 @@ const u1 PAD = 0;
|
||||
class JfrChunk : public JfrCHeapObj {
|
||||
friend class JfrChunkWriter;
|
||||
friend class JfrChunkHeadWriter;
|
||||
public:
|
||||
static jlong nanos_now();
|
||||
private:
|
||||
char* _path;
|
||||
int64_t _start_ticks;
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "jfr/leakprofiler/sampling/objectSampler.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/support/jfrDeprecationManager.hpp"
|
||||
|
||||
// Currently only two subsystems use type set blobs. Save a blob only if either has an unresolved entry.
|
||||
static inline bool save_blob_predicate() {
|
||||
return JfrDeprecationManager::has_unresolved_entry() || ObjectSampler::has_unresolved_entry();
|
||||
}
|
||||
|
||||
JfrAddRefCountedBlob::JfrAddRefCountedBlob(JfrCheckpointWriter& writer, bool move /* true */, bool reset /* true */) : _reset(reset) {
|
||||
if (writer.has_data()) {
|
||||
if (save_blob_predicate()) {
|
||||
JfrReferenceCountedStorage::save_blob(writer, move);
|
||||
} else if (move) {
|
||||
writer.cancel();
|
||||
}
|
||||
}
|
||||
DEBUG_ONLY(if (reset) JfrReferenceCountedStorage::set_scope();)
|
||||
}
|
||||
|
||||
JfrAddRefCountedBlob::~JfrAddRefCountedBlob() {
|
||||
if (_reset) {
|
||||
JfrReferenceCountedStorage::reset();
|
||||
}
|
||||
}
|
||||
|
||||
JfrBlobHandle JfrReferenceCountedStorage::_type_sets = JfrBlobHandle();
|
||||
DEBUG_ONLY(bool JfrReferenceCountedStorage::_scope = false;)
|
||||
|
||||
void JfrReferenceCountedStorage::save_blob(JfrCheckpointWriter& writer, bool move /* false */) {
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrBlobHandle blob = move ? writer.move() : writer.copy();
|
||||
if (_type_sets.valid()) {
|
||||
_type_sets->set_next(blob);
|
||||
return;
|
||||
}
|
||||
_type_sets = blob;
|
||||
}
|
||||
|
||||
void JfrReferenceCountedStorage::reset() {
|
||||
assert(_scope, "invariant");
|
||||
if (_type_sets.valid()) {
|
||||
_type_sets = JfrBlobHandle();
|
||||
}
|
||||
DEBUG_ONLY(_scope = false;)
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void JfrReferenceCountedStorage::set_scope() {
|
||||
assert(!_scope, "invariant");
|
||||
_scope = true;
|
||||
}
|
||||
#endif
|
||||
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_STORAGE_JFRREFERENCECOUNTEDSTORAGE_HPP
|
||||
#define SHARE_JFR_RECORDER_STORAGE_JFRREFERENCECOUNTEDSTORAGE_HPP
|
||||
|
||||
#include "jfr/utilities/jfrBlob.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class JfrCheckpointWriter;
|
||||
|
||||
// RAII helper class for adding blobs to the storage.
|
||||
class JfrAddRefCountedBlob : public StackObj {
|
||||
private:
|
||||
bool _reset;
|
||||
public:
|
||||
JfrAddRefCountedBlob(JfrCheckpointWriter& writer, bool move = true, bool reset = true);
|
||||
~JfrAddRefCountedBlob();
|
||||
};
|
||||
|
||||
// The debug aid 'scope' implies the proper RAII save construct is placed on stack.
|
||||
// This is a necessary condition for installing reference counted storage to nodes.
|
||||
class JfrReferenceCountedStorage : AllStatic {
|
||||
friend class JfrAddRefCountedBlob;
|
||||
private:
|
||||
static JfrBlobHandle _type_sets; // linked-list of blob handles saved during epoch.
|
||||
DEBUG_ONLY(static bool _scope;)
|
||||
|
||||
static void save_blob(JfrCheckpointWriter& writer, bool move = false);
|
||||
static void reset();
|
||||
DEBUG_ONLY(static void set_scope();)
|
||||
|
||||
public:
|
||||
template <typename T>
|
||||
static void install(T* node, const T* end) {
|
||||
assert(_scope, "invariant");
|
||||
if (_type_sets.valid()) {
|
||||
while (node != end) {
|
||||
node->install_type_set(_type_sets);
|
||||
node = node->next();
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_STORAGE_JFRREFERENCECOUNTEDSTORAGE_HPP
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -116,8 +116,8 @@ bool JfrDeprecatedStackTraceWriter::process(const JfrDeprecatedEdge* edge) {
|
||||
return true;
|
||||
}
|
||||
|
||||
JfrDeprecatedEventWriter::JfrDeprecatedEventWriter(JfrChunkWriter& cw, bool stacktrace) :
|
||||
_now(JfrTicks::now()),_cw(cw), _for_removal(only_for_removal()), _stacktrace(stacktrace), _did_write(false) {}
|
||||
JfrDeprecatedEventWriter::JfrDeprecatedEventWriter(JfrChunkWriter& cw, JfrCheckpointWriter& tsw, bool stacktrace) :
|
||||
_now(JfrTicks::now()),_cw(cw), _tsw(tsw), _for_removal(only_for_removal()), _stacktrace(stacktrace) {}
|
||||
|
||||
static size_t calculate_event_size(const JfrDeprecatedEdge* edge, JfrChunkWriter& cw, const JfrTicks& now, bool stacktrace) {
|
||||
assert(edge != nullptr, "invariant");
|
||||
@@ -141,14 +141,31 @@ static void write_event(const JfrDeprecatedEdge* edge, JfrChunkWriter& cw, const
|
||||
cw.write(edge->for_removal());
|
||||
}
|
||||
|
||||
static void write_type_set(const JfrDeprecatedEdge* edge, JfrCheckpointWriter& tsw) {
|
||||
if (!edge->has_type_set()) {
|
||||
return;
|
||||
}
|
||||
edge->type_set()->exclusive_write(tsw);
|
||||
}
|
||||
|
||||
bool JfrDeprecatedEventWriter::process(const JfrDeprecatedEdge* edge) {
|
||||
assert(edge != nullptr, "invariant");
|
||||
if (_for_removal && !edge->for_removal()) {
|
||||
return true;
|
||||
}
|
||||
write_event(edge, _cw,_now, _stacktrace);
|
||||
if (!_did_write) {
|
||||
_did_write = true;
|
||||
}
|
||||
write_event(edge, _cw, _now, _stacktrace);
|
||||
write_type_set(edge, _tsw);
|
||||
return true;
|
||||
}
|
||||
|
||||
JfrDeprecatedEventClear::JfrDeprecatedEventClear() {}
|
||||
|
||||
bool JfrDeprecatedEventClear::process(const JfrDeprecatedEdge* edge) {
|
||||
assert(edge != nullptr, "invariant");
|
||||
if (!edge->has_type_set()) {
|
||||
return true;
|
||||
}
|
||||
edge->type_set()->reset_write_state();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -56,12 +56,17 @@ class JfrDeprecatedEventWriter : public StackObj {
|
||||
private:
|
||||
JfrTicks _now;
|
||||
JfrChunkWriter& _cw;
|
||||
JfrCheckpointWriter& _tsw;
|
||||
bool _for_removal;
|
||||
bool _stacktrace;
|
||||
bool _did_write;
|
||||
public:
|
||||
JfrDeprecatedEventWriter(JfrChunkWriter& cw, bool stacktrace);
|
||||
bool did_write() const { return _did_write; }
|
||||
JfrDeprecatedEventWriter(JfrChunkWriter& cw, JfrCheckpointWriter& tsw, bool stacktrace);
|
||||
bool process(const JfrDeprecatedEdge* edge);
|
||||
};
|
||||
|
||||
class JfrDeprecatedEventClear : public StackObj {
|
||||
public:
|
||||
JfrDeprecatedEventClear();
|
||||
bool process(const JfrDeprecatedEdge* edge);
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceId.inline.hpp"
|
||||
#include "jfr/recorder/repository/jfrChunkWriter.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/recorder/storage/jfrReferenceCountedStorage.hpp"
|
||||
#include "jfr/support/jfrDeprecationEventWriter.hpp"
|
||||
#include "jfr/support/jfrDeprecationManager.hpp"
|
||||
#include "jfr/support/jfrKlassUnloading.hpp"
|
||||
@@ -66,6 +67,7 @@ static inline traceid load_traceid(const Method* method) {
|
||||
JfrDeprecatedEdge::JfrDeprecatedEdge(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* jt) :
|
||||
_invocation_time(JfrTicks::now()),
|
||||
_stacktrace(),
|
||||
_type_set(),
|
||||
_next(nullptr),
|
||||
_deprecated_ik(method->method_holder()),
|
||||
_deprecated_methodid(load_traceid(method)),
|
||||
@@ -94,11 +96,25 @@ const JfrBlobHandle& JfrDeprecatedEdge::stacktrace() const {
|
||||
return _stacktrace;
|
||||
}
|
||||
|
||||
bool JfrDeprecatedEdge::has_type_set() const {
|
||||
return _type_set.valid();
|
||||
}
|
||||
|
||||
const JfrBlobHandle& JfrDeprecatedEdge::type_set() const {
|
||||
assert(has_type_set(), "invariant");
|
||||
return _type_set;
|
||||
}
|
||||
|
||||
void JfrDeprecatedEdge::install_type_set(const JfrBlobHandle& type_set) {
|
||||
assert(!has_type_set(), "invariant");
|
||||
_type_set = type_set;
|
||||
}
|
||||
|
||||
typedef JfrLinkedList<JfrDeprecatedEdge> DeprecatedEdgeList;
|
||||
|
||||
static DeprecatedEdgeList _list; // Newly constructed edges are concurrently added to this list.
|
||||
static DeprecatedEdgeList _pending_list; // During epoch rotation (safepoint) entries in _list are moved onto _pending_list
|
||||
static DeprecatedEdgeList _resolved_list; // Fully resolved edges (event and stacktrace blobs).
|
||||
static DeprecatedEdgeList _resolved_list; // Fully resolved edges (event, stacktrace and typeset blobs).
|
||||
|
||||
static JfrDeprecatedEdge* allocate_edge(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* jt) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
|
||||
@@ -225,10 +241,6 @@ static void transfer_list() {
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_level_setting_update(int64_t new_level) {
|
||||
JfrDeprecatedEventWriterState::on_level_setting_update(new_level);
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_safepoint_clear() {
|
||||
assert(!_enqueue_klasses, "invariant");
|
||||
// We are now starting JFR, so begin enqueuing tagged klasses.
|
||||
@@ -270,6 +282,23 @@ static void add_to_leakp_set(const JfrDeprecatedEdge* edge) {
|
||||
static DeprecatedEdgeList::NodePtr _pending_head = nullptr;
|
||||
static DeprecatedEdgeList::NodePtr _pending_tail = nullptr;
|
||||
|
||||
inline DeprecatedEdgeList::NodePtr pending_head() {
|
||||
return Atomic::load(&_pending_head);
|
||||
}
|
||||
|
||||
// The test for a pending head can be read concurrently from a thread doing class unloading.
|
||||
inline static bool has_pending_head() {
|
||||
return pending_head() != nullptr;
|
||||
}
|
||||
|
||||
inline static bool no_pending_head() {
|
||||
return !has_pending_head();
|
||||
}
|
||||
|
||||
inline static void set_pending_head(DeprecatedEdgeList::NodePtr head) {
|
||||
Atomic::store(&_pending_head, head);
|
||||
}
|
||||
|
||||
class PendingListProcessor {
|
||||
private:
|
||||
JfrCheckpointWriter& _writer;
|
||||
@@ -281,66 +310,57 @@ class PendingListProcessor {
|
||||
JfrDeprecatedStackTraceWriter::install_stacktrace_blob(edge, _writer, _jt);
|
||||
assert(edge->has_stacktrace(), "invariant");
|
||||
add_to_leakp_set(edge);
|
||||
if (_pending_head == nullptr) {
|
||||
_pending_head = edge;
|
||||
if (no_pending_head()) {
|
||||
set_pending_head(edge);
|
||||
}
|
||||
_pending_tail = edge;
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void JfrDeprecationManager::prepare_type_set(JavaThread* jt) {
|
||||
_pending_head = nullptr;
|
||||
// Resets the pending head and tail.
|
||||
// Resets blob write states for nodes on the resolved list, dirtied in the previous epoch.
|
||||
static void reset_type_set_blobs() {
|
||||
set_pending_head(nullptr);
|
||||
_pending_tail = nullptr;
|
||||
if (_resolved_list.is_nonempty()) {
|
||||
JfrDeprecatedEventClear clear;
|
||||
_resolved_list.iterate(clear);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::prepare_type_set(JavaThread* jt) {
|
||||
reset_type_set_blobs();
|
||||
if (_pending_list.is_nonempty()) {
|
||||
JfrKlassUnloading::sort(true);
|
||||
JfrCheckpointWriter writer(true /* prev epoch */, jt, false /* header */);
|
||||
PendingListProcessor plp(writer, jt);
|
||||
_pending_list.iterate(plp);
|
||||
assert(_pending_head != nullptr, "invariant");
|
||||
assert(has_pending_head(), "invariant");
|
||||
assert(_pending_tail != nullptr, "invariant");
|
||||
assert(_pending_tail->next() == nullptr, "invariant");
|
||||
// Excise already resolved edges to link them.
|
||||
_pending_tail->set_next(_resolved_list.cut());
|
||||
// Re-insertion.
|
||||
_resolved_list.add_list(_pending_head);
|
||||
_resolved_list.add_list(pending_head());
|
||||
_pending_list.clear();
|
||||
}
|
||||
assert(_pending_list.is_empty(), "invariant");
|
||||
}
|
||||
|
||||
// A linked-list of blob handles.
|
||||
static JfrBlobHandle type_set_blobs;
|
||||
|
||||
static inline void write_type_set_blobs(JfrCheckpointWriter& writer) {
|
||||
type_set_blobs->write(writer);
|
||||
}
|
||||
|
||||
static void save_type_set_blob(JfrCheckpointWriter& writer, bool copy = false) {
|
||||
assert(writer.has_data(), "invariant");
|
||||
const JfrBlobHandle blob = copy ? writer.copy() : writer.move();
|
||||
if (type_set_blobs.valid()) {
|
||||
type_set_blobs->set_next(blob);
|
||||
} else {
|
||||
type_set_blobs = blob;
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_type_set_unload(JfrCheckpointWriter& writer) {
|
||||
if (writer.has_data()) {
|
||||
save_type_set_blob(writer, true);
|
||||
}
|
||||
bool JfrDeprecationManager::has_unresolved_entry() {
|
||||
return _list.is_nonempty() || has_pending_head() || _pending_list.is_nonempty();
|
||||
}
|
||||
|
||||
static inline bool has_stacktrace() {
|
||||
return JfrEventSetting::has_stacktrace(JfrDeprecatedInvocationEvent);
|
||||
}
|
||||
|
||||
static inline bool write_events(JfrChunkWriter& cw) {
|
||||
static inline void write_events(JfrChunkWriter& cw, Thread* thread, bool on_error) {
|
||||
assert(_resolved_list.is_nonempty(), "invariant");
|
||||
JfrDeprecatedEventWriter ebw(cw, has_stacktrace());
|
||||
JfrCheckpointWriter type_set_writer(!on_error, thread, false);
|
||||
JfrDeprecatedEventWriter ebw(cw, type_set_writer, has_stacktrace());
|
||||
_resolved_list.iterate(ebw);
|
||||
return ebw.did_write();
|
||||
}
|
||||
|
||||
static inline void write_stacktraces(JfrChunkWriter& cw) {
|
||||
@@ -349,34 +369,30 @@ static inline void write_stacktraces(JfrChunkWriter& cw) {
|
||||
_resolved_list.iterate(scw);
|
||||
}
|
||||
|
||||
static inline void write_type_sets(Thread* thread, bool on_error) {
|
||||
JfrCheckpointWriter writer(!on_error, thread, false);
|
||||
write_type_set_blobs(writer);
|
||||
}
|
||||
|
||||
// First, we consolidate all stacktrace blobs into a single TYPE_STACKTRACE checkpoint and serialize it to the chunk.
|
||||
// Secondly, we serialize all events to the chunk.
|
||||
// Thirdly, the type set blobs are written into the JfrCheckpoint system, to be serialized to the chunk
|
||||
// just after we return from here.
|
||||
// First, we consolidate all stack trace blobs into a single TYPE_STACKTRACE checkpoint
|
||||
// and serialize it to the chunk. Then, all events are serialized, and unique type set blobs
|
||||
// written into the JfrCheckpoint system to be serialized to the chunk upon return.
|
||||
void JfrDeprecationManager::write_edges(JfrChunkWriter& cw, Thread* thread, bool on_error /* false */) {
|
||||
if (_resolved_list.is_nonempty() && JfrEventSetting::is_enabled(JfrDeprecatedInvocationEvent)) {
|
||||
if (has_stacktrace()) {
|
||||
write_stacktraces(cw);
|
||||
}
|
||||
if (write_events(cw)) {
|
||||
write_type_sets(thread, on_error);
|
||||
}
|
||||
write_events(cw, thread, on_error);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_type_set(JfrCheckpointWriter& writer, JfrChunkWriter* cw, Thread* thread) {
|
||||
void JfrDeprecationManager::on_type_set(JfrChunkWriter* cw, Thread* thread) {
|
||||
assert(_pending_list.is_empty(), "invariant");
|
||||
if (_pending_head != nullptr) {
|
||||
save_type_set_blob(writer);
|
||||
} else {
|
||||
writer.cancel();
|
||||
if (has_pending_head()) {
|
||||
assert(_pending_tail != nullptr, "invariant");
|
||||
// Install type set blobs for the pending, i.e. unresolved nodes.
|
||||
JfrReferenceCountedStorage::install(pending_head(), _pending_tail->next());
|
||||
}
|
||||
if (cw != nullptr) {
|
||||
write_edges(*cw, thread);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrDeprecationManager::on_level_setting_update(int64_t new_level) {
|
||||
JfrDeprecatedEventWriterState::on_level_setting_update(new_level);
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -42,6 +42,7 @@ class JfrDeprecatedEdge : public CHeapObj<mtTracing> {
|
||||
private:
|
||||
JfrTicks _invocation_time;
|
||||
JfrBlobHandle _stacktrace;
|
||||
JfrBlobHandle _type_set;
|
||||
JfrDeprecatedEdge* _next;
|
||||
InstanceKlass* _deprecated_ik;
|
||||
traceid _deprecated_methodid;
|
||||
@@ -58,7 +59,7 @@ class JfrDeprecatedEdge : public CHeapObj<mtTracing> {
|
||||
public:
|
||||
JfrDeprecatedEdge(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* jt);
|
||||
|
||||
const JfrDeprecatedEdge* next() const { return _next; }
|
||||
JfrDeprecatedEdge* next() const { return _next; }
|
||||
void set_next(JfrDeprecatedEdge* edge) { _next = edge; }
|
||||
|
||||
bool has_event() const;
|
||||
@@ -68,6 +69,10 @@ class JfrDeprecatedEdge : public CHeapObj<mtTracing> {
|
||||
const JfrBlobHandle& stacktrace() const;
|
||||
void install_stacktrace_blob(JavaThread* jt);
|
||||
|
||||
bool has_type_set() const;
|
||||
const JfrBlobHandle& type_set() const;
|
||||
void install_type_set(const JfrBlobHandle& type_set);
|
||||
|
||||
const InstanceKlass* deprecated_ik() const { return _deprecated_ik; }
|
||||
traceid deprecated_methodid() const { return _deprecated_methodid; }
|
||||
|
||||
@@ -89,11 +94,11 @@ class JfrDeprecationManager : AllStatic {
|
||||
static void on_safepoint_write();
|
||||
static void on_recorder_stop();
|
||||
static void prepare_type_set(JavaThread* jt);
|
||||
static void on_type_set(JfrCheckpointWriter& writer, JfrChunkWriter* cw, Thread* thread);
|
||||
static void on_type_set_unload(JfrCheckpointWriter& writer);
|
||||
static void on_type_set(JfrChunkWriter* cw, Thread* thread);
|
||||
static void write_edges(JfrChunkWriter& cw, Thread* thread, bool on_error = false);
|
||||
static void on_link(const Method* method, Method* sender, int bci, u1 frame_type, JavaThread* thread);
|
||||
static void on_level_setting_update(int64_t new_level);
|
||||
static bool has_unresolved_entry();
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRDEPRECATIONMANAGER_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -47,13 +47,13 @@ class JfrIntrinsicSupport : AllStatic {
|
||||
#define JFR_HAVE_INTRINSICS
|
||||
|
||||
#define JFR_TEMPLATES(template) \
|
||||
template(jdk_jfr_internal_HiddenWait, "jdk/jfr/internal/HiddenWait") \
|
||||
template(jdk_jfr_internal_JVM, "jdk/jfr/internal/JVM") \
|
||||
template(jdk_jfr_internal_event_EventWriterFactory, "jdk/jfr/internal/event/EventWriterFactory") \
|
||||
template(jdk_jfr_internal_event_EventConfiguration_signature, "Ljdk/jfr/internal/event/EventConfiguration;") \
|
||||
template(getEventWriter_signature, "()Ljdk/jfr/internal/event/EventWriter;") \
|
||||
template(eventConfiguration_name, "eventConfiguration") \
|
||||
template(commit_name, "commit") \
|
||||
template(jfr_chunk_rotation_monitor, "jdk/jfr/internal/JVM$ChunkRotationMonitor") \
|
||||
|
||||
#define JFR_INTRINSICS(do_intrinsic, do_class, do_name, do_signature, do_alias) \
|
||||
do_intrinsic(_counterTime, jdk_jfr_internal_JVM, counterTime_name, void_long_signature, F_SN) \
|
||||
|
||||
@@ -44,11 +44,13 @@
|
||||
#define REMOVE_METHOD_ID(method) JfrTraceId::remove(method);
|
||||
#define RESTORE_ID(k) JfrTraceId::restore(k);
|
||||
|
||||
static constexpr const uint16_t cleared_epoch_bits = 512 | 256;
|
||||
|
||||
class JfrTraceFlag {
|
||||
private:
|
||||
mutable uint16_t _flags;
|
||||
public:
|
||||
JfrTraceFlag() : _flags(0) {}
|
||||
JfrTraceFlag() : _flags(cleared_epoch_bits) {}
|
||||
bool is_set(uint16_t flag) const {
|
||||
return (_flags & flag) != 0;
|
||||
}
|
||||
@@ -96,9 +98,8 @@ class JfrTraceFlag {
|
||||
uint8_t* trace_meta_addr() const { \
|
||||
return _trace_flags.meta_addr(); \
|
||||
} \
|
||||
void copy_trace_flags(uint8_t src_flags) const { \
|
||||
uint8_t flags = *_trace_flags.flags_addr(); \
|
||||
_trace_flags.set_flags(flags | src_flags); \
|
||||
void copy_trace_flags(uint16_t rhs_flags) const { \
|
||||
_trace_flags.set_flags(_trace_flags.flags() | rhs_flags); \
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
|
||||
|
||||
@@ -55,6 +55,7 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
@@ -173,7 +174,7 @@ void ConstantPoolCache::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
|
||||
}
|
||||
if (invoke_code == Bytecodes::_invokestatic) {
|
||||
assert(method->method_holder()->is_initialized() ||
|
||||
method->method_holder()->is_init_thread(JavaThread::current()),
|
||||
method->method_holder()->is_reentrant_initialization(JavaThread::current()),
|
||||
"invalid class initialization state for invoke_static");
|
||||
|
||||
if (!VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
@@ -268,11 +269,20 @@ ResolvedMethodEntry* ConstantPoolCache::set_method_handle(int method_index, cons
|
||||
// A losing writer waits on the lock until the winner writes the method and leaves
|
||||
// the lock, so that when the losing writer returns, he can use the linked
|
||||
// cache entry.
|
||||
|
||||
// Lock fields to write
|
||||
Bytecodes::Code invoke_code = Bytecodes::_invokehandle;
|
||||
MutexLocker ml(constant_pool()->pool_holder()->init_monitor());
|
||||
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
|
||||
|
||||
JavaThread* current = JavaThread::current();
|
||||
objArrayHandle resolved_references(current, constant_pool()->resolved_references());
|
||||
// Use the resolved_references() lock for this cpCache entry.
|
||||
// resolved_references are created for all classes with Invokedynamic, MethodHandle
|
||||
// or MethodType constant pool cache entries.
|
||||
assert(resolved_references() != nullptr,
|
||||
"a resolved_references array should have been created for this class");
|
||||
ObjectLocker ol(resolved_references, current);
|
||||
|
||||
ResolvedMethodEntry* method_entry = resolved_method_entry_at(method_index);
|
||||
if (method_entry->is_resolved(invoke_code)) {
|
||||
return method_entry;
|
||||
}
|
||||
@@ -310,7 +320,6 @@ ResolvedMethodEntry* ConstantPoolCache::set_method_handle(int method_index, cons
|
||||
// Store appendix, if any.
|
||||
if (has_appendix) {
|
||||
const int appendix_index = method_entry->resolved_references_index();
|
||||
objArrayOop resolved_references = constant_pool()->resolved_references();
|
||||
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
|
||||
assert(resolved_references->obj_at(appendix_index) == nullptr, "init just once");
|
||||
resolved_references->obj_at_put(appendix_index, appendix());
|
||||
@@ -555,7 +564,14 @@ bool ConstantPoolCache::save_and_throw_indy_exc(
|
||||
assert(PENDING_EXCEPTION->is_a(vmClasses::LinkageError_klass()),
|
||||
"No LinkageError exception");
|
||||
|
||||
MutexLocker ml(THREAD, cpool->pool_holder()->init_monitor());
|
||||
// Use the resolved_references() lock for this cpCache entry.
|
||||
// resolved_references are created for all classes with Invokedynamic, MethodHandle
|
||||
// or MethodType constant pool cache entries.
|
||||
JavaThread* current = THREAD;
|
||||
objArrayHandle resolved_references(current, cpool->resolved_references());
|
||||
assert(resolved_references() != nullptr,
|
||||
"a resolved_references array should have been created for this class");
|
||||
ObjectLocker ol(resolved_references, current);
|
||||
|
||||
// if the indy_info is resolved or the indy_resolution_failed flag is set then another
|
||||
// thread either succeeded in resolving the method or got a LinkageError
|
||||
@@ -578,11 +594,21 @@ bool ConstantPoolCache::save_and_throw_indy_exc(
|
||||
|
||||
oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
|
||||
ResourceMark rm;
|
||||
MutexLocker ml(constant_pool()->pool_holder()->init_monitor());
|
||||
|
||||
// Use the resolved_references() lock for this cpCache entry.
|
||||
// resolved_references are created for all classes with Invokedynamic, MethodHandle
|
||||
// or MethodType constant pool cache entries.
|
||||
JavaThread* current = JavaThread::current();
|
||||
constantPoolHandle cp(current, constant_pool());
|
||||
|
||||
objArrayHandle resolved_references(current, cp->resolved_references());
|
||||
assert(resolved_references() != nullptr,
|
||||
"a resolved_references array should have been created for this class");
|
||||
ObjectLocker ol(resolved_references, current);
|
||||
assert(index >= 0, "Indy index must be positive at this point");
|
||||
|
||||
if (resolved_indy_entry_at(index)->method() != nullptr) {
|
||||
return constant_pool()->resolved_reference_from_indy(index);
|
||||
return cp->resolved_reference_from_indy(index);
|
||||
}
|
||||
|
||||
if (resolved_indy_entry_at(index)->resolution_failed()) {
|
||||
@@ -590,9 +616,7 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
|
||||
// resolution. Ignore our success and throw their exception.
|
||||
guarantee(index >= 0, "Invalid indy index");
|
||||
int encoded_index = ResolutionErrorTable::encode_indy_index(index);
|
||||
JavaThread* THREAD = JavaThread::current(); // For exception macros.
|
||||
constantPoolHandle cp(THREAD, constant_pool());
|
||||
ConstantPool::throw_resolution_error(cp, encoded_index, THREAD);
|
||||
ConstantPool::throw_resolution_error(cp, encoded_index, current);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@@ -616,7 +640,6 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
|
||||
|
||||
if (has_appendix) {
|
||||
const int appendix_index = resolved_indy_entry_at(index)->resolved_references_index();
|
||||
objArrayOop resolved_references = constant_pool()->resolved_references();
|
||||
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
|
||||
assert(resolved_references->obj_at(appendix_index) == nullptr, "init just once");
|
||||
resolved_references->obj_at_put(appendix_index, appendix());
|
||||
|
||||
@@ -86,6 +86,7 @@
|
||||
#include "runtime/orderAccess.hpp"
|
||||
#include "runtime/os.inline.hpp"
|
||||
#include "runtime/reflection.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
#include "runtime/threads.hpp"
|
||||
#include "services/classLoadingService.hpp"
|
||||
#include "services/finalizerService.hpp"
|
||||
@@ -497,9 +498,6 @@ Array<int>* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) {
|
||||
return vtable_indices;
|
||||
}
|
||||
|
||||
static Monitor* create_init_monitor(const char* name) {
|
||||
return new Monitor(Mutex::safepoint, name);
|
||||
}
|
||||
|
||||
InstanceKlass::InstanceKlass() {
|
||||
assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS");
|
||||
@@ -517,7 +515,6 @@ InstanceKlass::InstanceKlass(const ClassFileParser& parser, KlassKind kind, Refe
|
||||
_nest_host_index(0),
|
||||
_init_state(allocated),
|
||||
_reference_type(reference_type),
|
||||
_init_monitor(create_init_monitor("InstanceKlassInitMonitor_lock")),
|
||||
_init_thread(nullptr)
|
||||
{
|
||||
set_vtable_length(parser.vtable_size());
|
||||
@@ -745,6 +742,28 @@ objArrayOop InstanceKlass::signers() const {
|
||||
return java_lang_Class::signers(java_mirror());
|
||||
}
|
||||
|
||||
oop InstanceKlass::init_lock() const {
|
||||
// return the init lock from the mirror
|
||||
oop lock = java_lang_Class::init_lock(java_mirror());
|
||||
// Prevent reordering with any access of initialization state
|
||||
OrderAccess::loadload();
|
||||
assert(lock != nullptr || !is_not_initialized(), // initialized or in_error state
|
||||
"only fully initialized state can have a null lock");
|
||||
return lock;
|
||||
}
|
||||
|
||||
// Set the initialization lock to null so the object can be GC'ed. Any racing
|
||||
// threads to get this lock will see a null lock and will not lock.
|
||||
// That's okay because they all check for initialized state after getting
|
||||
// the lock and return.
|
||||
void InstanceKlass::fence_and_clear_init_lock() {
|
||||
// make sure previous stores are all done, notably the init_state.
|
||||
OrderAccess::storestore();
|
||||
java_lang_Class::clear_init_lock(java_mirror());
|
||||
assert(!is_not_initialized(), "class must be initialized now");
|
||||
}
|
||||
|
||||
|
||||
// See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
|
||||
// process. The step comments refers to the procedure described in that section.
|
||||
// Note: implementation moved to static method to expose the this pointer.
|
||||
@@ -772,49 +791,6 @@ void InstanceKlass::link_class(TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
void InstanceKlass::check_link_state_and_wait(JavaThread* current) {
|
||||
MonitorLocker ml(current, _init_monitor);
|
||||
|
||||
bool debug_logging_enabled = log_is_enabled(Debug, class, init);
|
||||
|
||||
// Another thread is linking this class, wait.
|
||||
while (is_being_linked() && !is_init_thread(current)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" waiting for linking of %s by thread \"%s\"",
|
||||
current->name(), external_name(), init_thread_name());
|
||||
}
|
||||
ml.wait();
|
||||
}
|
||||
|
||||
// This thread is recursively linking this class, continue
|
||||
if (is_being_linked() && is_init_thread(current)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" recursively linking %s",
|
||||
current->name(), external_name());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// If this class wasn't linked already, set state to being_linked
|
||||
if (!is_linked()) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" linking %s",
|
||||
current->name(), external_name());
|
||||
}
|
||||
set_init_state(being_linked);
|
||||
set_init_thread(current);
|
||||
} else {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(current);
|
||||
log_debug(class, init)("Thread \"%s\" found %s already linked",
|
||||
current->name(), external_name());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called to verify that a class can link during initialization, without
|
||||
// throwing a VerifyError.
|
||||
bool InstanceKlass::link_class_or_fail(TRAPS) {
|
||||
@@ -893,8 +869,9 @@ bool InstanceKlass::link_class_impl(TRAPS) {
|
||||
|
||||
// verification & rewriting
|
||||
{
|
||||
LockLinkState init_lock(this, jt);
|
||||
|
||||
HandleMark hm(THREAD);
|
||||
Handle h_init_lock(THREAD, init_lock());
|
||||
ObjectLocker ol(h_init_lock, jt);
|
||||
// rewritten will have been set if loader constraint error found
|
||||
// on an earlier link attempt
|
||||
// don't verify or rewrite if already rewritten
|
||||
@@ -952,7 +929,21 @@ bool InstanceKlass::link_class_impl(TRAPS) {
|
||||
// In case itable verification is ever added.
|
||||
// itable().verify(tty, true);
|
||||
#endif
|
||||
set_initialization_state_and_notify(linked, THREAD);
|
||||
if (UseVtableBasedCHA && Universe::is_fully_initialized()) {
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Now mark all code that assumes the class is not linked.
|
||||
// Set state under the Compile_lock also.
|
||||
MutexLocker ml(THREAD, Compile_lock);
|
||||
|
||||
set_init_state(linked);
|
||||
CodeCache::mark_dependents_on(&deopt_scope, this);
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
} else {
|
||||
set_init_state(linked);
|
||||
}
|
||||
if (JvmtiExport::should_post_class_prepare()) {
|
||||
JvmtiExport::post_class_prepare(THREAD, this);
|
||||
}
|
||||
@@ -1082,7 +1073,6 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
DTRACE_CLASSINIT_PROBE(required, -1);
|
||||
|
||||
bool wait = false;
|
||||
bool throw_error = false;
|
||||
|
||||
JavaThread* jt = THREAD;
|
||||
|
||||
@@ -1091,24 +1081,27 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
// refer to the JVM book page 47 for description of steps
|
||||
// Step 1
|
||||
{
|
||||
MonitorLocker ml(jt, _init_monitor);
|
||||
Handle h_init_lock(THREAD, init_lock());
|
||||
ObjectLocker ol(h_init_lock, jt);
|
||||
|
||||
// Step 2
|
||||
while (is_being_initialized() && !is_init_thread(jt)) {
|
||||
// If we were to use wait() instead of waitInterruptibly() then
|
||||
// we might end up throwing IE from link/symbol resolution sites
|
||||
// that aren't expected to throw. This would wreak havoc. See 6320309.
|
||||
while (is_being_initialized() && !is_reentrant_initialization(jt)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(jt);
|
||||
log_debug(class, init)("Thread \"%s\" waiting for initialization of %s by thread \"%s\"",
|
||||
jt->name(), external_name(), init_thread_name());
|
||||
}
|
||||
|
||||
wait = true;
|
||||
jt->set_class_to_be_initialized(this);
|
||||
ml.wait();
|
||||
ol.wait_uninterruptibly(jt);
|
||||
jt->set_class_to_be_initialized(nullptr);
|
||||
}
|
||||
|
||||
// Step 3
|
||||
if (is_being_initialized() && is_init_thread(jt)) {
|
||||
if (is_being_initialized() && is_reentrant_initialization(jt)) {
|
||||
if (debug_logging_enabled) {
|
||||
ResourceMark rm(jt);
|
||||
log_debug(class, init)("Thread \"%s\" recursively initializing %s",
|
||||
@@ -1136,7 +1129,19 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
log_debug(class, init)("Thread \"%s\" found %s is in error state",
|
||||
jt->name(), external_name());
|
||||
}
|
||||
throw_error = true;
|
||||
|
||||
DTRACE_CLASSINIT_PROBE_WAIT(erroneous, -1, wait);
|
||||
ResourceMark rm(THREAD);
|
||||
Handle cause(THREAD, get_initialization_error(THREAD));
|
||||
|
||||
stringStream ss;
|
||||
ss.print("Could not initialize class %s", external_name());
|
||||
if (cause.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), ss.as_string());
|
||||
} else {
|
||||
THROW_MSG_CAUSE(vmSymbols::java_lang_NoClassDefFoundError(),
|
||||
ss.as_string(), cause);
|
||||
}
|
||||
} else {
|
||||
|
||||
// Step 6
|
||||
@@ -1150,22 +1155,6 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
}
|
||||
}
|
||||
|
||||
// Throw error outside lock
|
||||
if (throw_error) {
|
||||
DTRACE_CLASSINIT_PROBE_WAIT(erroneous, -1, wait);
|
||||
ResourceMark rm(THREAD);
|
||||
Handle cause(THREAD, get_initialization_error(THREAD));
|
||||
|
||||
stringStream ss;
|
||||
ss.print("Could not initialize class %s", external_name());
|
||||
if (cause.is_null()) {
|
||||
THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), ss.as_string());
|
||||
} else {
|
||||
THROW_MSG_CAUSE(vmSymbols::java_lang_NoClassDefFoundError(),
|
||||
ss.as_string(), cause);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 7
|
||||
// Next, if C is a class rather than an interface, initialize it's super class and super
|
||||
// interfaces.
|
||||
@@ -1223,7 +1212,7 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
|
||||
// Step 9
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
set_initialization_state_and_notify(fully_initialized, THREAD);
|
||||
set_initialization_state_and_notify(fully_initialized, CHECK);
|
||||
debug_only(vtable().verify(tty, true);)
|
||||
}
|
||||
else {
|
||||
@@ -1256,43 +1245,26 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
}
|
||||
|
||||
|
||||
void InstanceKlass::set_initialization_state_and_notify(ClassState state, JavaThread* current) {
|
||||
MonitorLocker ml(current, _init_monitor);
|
||||
|
||||
if (state == linked && UseVtableBasedCHA && Universe::is_fully_initialized()) {
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
// Now mark all code that assumes the class is not linked.
|
||||
// Set state under the Compile_lock also.
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
|
||||
CodeCache::mark_dependents_on(&deopt_scope, this);
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
void InstanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
|
||||
Handle h_init_lock(THREAD, init_lock());
|
||||
if (h_init_lock() != nullptr) {
|
||||
ObjectLocker ol(h_init_lock, THREAD);
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
fence_and_clear_init_lock();
|
||||
ol.notify_all(CHECK);
|
||||
} else {
|
||||
assert(h_init_lock() != nullptr, "The initialization state should never be set twice");
|
||||
set_init_thread(nullptr); // reset _init_thread before changing _init_state
|
||||
set_init_state(state);
|
||||
}
|
||||
ml.notify_all();
|
||||
}
|
||||
|
||||
// Update hierarchy. This is done before the new klass has been added to the SystemDictionary. The Compile_lock
|
||||
// is grabbed, to ensure that the compiler is not using the class hierarchy.
|
||||
void InstanceKlass::add_to_hierarchy(JavaThread* current) {
|
||||
void InstanceKlass::add_to_hierarchy_impl(JavaThread* current) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "must NOT be at safepoint");
|
||||
|
||||
// In case we are not using CHA based vtables we need to make sure the loaded
|
||||
// deopt is completed before anyone links this class.
|
||||
// Linking is done with _init_monitor held, by loading and deopting with it
|
||||
// held we make sure the deopt is completed before linking.
|
||||
if (!UseVtableBasedCHA) {
|
||||
init_monitor()->lock();
|
||||
}
|
||||
|
||||
DeoptimizationScope deopt_scope;
|
||||
{
|
||||
MutexLocker ml(current, Compile_lock);
|
||||
@@ -1314,12 +1286,26 @@ void InstanceKlass::add_to_hierarchy(JavaThread* current) {
|
||||
}
|
||||
// Perform the deopt handshake outside Compile_lock.
|
||||
deopt_scope.deoptimize_marked();
|
||||
}
|
||||
|
||||
if (!UseVtableBasedCHA) {
|
||||
init_monitor()->unlock();
|
||||
void InstanceKlass::add_to_hierarchy(JavaThread* current) {
|
||||
|
||||
if (UseVtableBasedCHA || !Universe::is_fully_initialized()) {
|
||||
add_to_hierarchy_impl(current);
|
||||
} else {
|
||||
// In case we are not using CHA based vtables we need to make sure the loaded
|
||||
// deopt is completed before anyone links this class.
|
||||
// Linking is done with init_lock held, by loading and deopting with it
|
||||
// held we make sure the deopt is completed before linking.
|
||||
Handle h_init_lock(current, init_lock());
|
||||
ObjectLocker ol(h_init_lock, current);
|
||||
add_to_hierarchy_impl(current);
|
||||
|
||||
// This doesn't need a notify because the wait is only on the class initialization path.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
InstanceKlass* InstanceKlass::implementor() const {
|
||||
InstanceKlass* volatile* ik = adr_implementor();
|
||||
if (ik == nullptr) {
|
||||
@@ -2586,7 +2572,6 @@ void InstanceKlass::remove_unshareable_info() {
|
||||
_nest_host = nullptr;
|
||||
init_shared_package_entry();
|
||||
_dep_context_last_cleaned = 0;
|
||||
_init_monitor = nullptr;
|
||||
|
||||
remove_unshareable_flags();
|
||||
}
|
||||
@@ -2690,9 +2675,6 @@ void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handl
|
||||
if (DiagnoseSyncOnValueBasedClasses && has_value_based_class_annotation()) {
|
||||
set_is_value_based();
|
||||
}
|
||||
|
||||
// restore the monitor
|
||||
_init_monitor = create_init_monitor("InstanceKlassInitMonitorRestored_lock");
|
||||
}
|
||||
|
||||
// Check if a class or any of its supertypes has a version older than 50.
|
||||
@@ -2788,9 +2770,6 @@ void InstanceKlass::release_C_heap_structures(bool release_sub_metadata) {
|
||||
methods_do(method_release_C_heap_structures);
|
||||
}
|
||||
|
||||
// Destroy the init_monitor
|
||||
delete _init_monitor;
|
||||
|
||||
// Deallocate oop map cache
|
||||
if (_oop_map_cache != nullptr) {
|
||||
delete _oop_map_cache;
|
||||
@@ -3482,7 +3461,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
|
||||
#define BULLET " - "
|
||||
|
||||
static const char* state_names[] = {
|
||||
"allocated", "loaded", "being_linked", "linked", "being_initialized", "fully_initialized", "initialization_error"
|
||||
"allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
|
||||
};
|
||||
|
||||
static void print_vtable(intptr_t* start, int len, outputStream* st) {
|
||||
@@ -4132,17 +4111,13 @@ void JNIid::verify(Klass* holder) {
|
||||
}
|
||||
|
||||
void InstanceKlass::set_init_state(ClassState state) {
|
||||
if (state > loaded) {
|
||||
assert_lock_strong(_init_monitor);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
bool good_state = is_shared() ? (_init_state <= state)
|
||||
: (_init_state < state);
|
||||
bool link_failed = _init_state == being_linked && state == loaded;
|
||||
assert(good_state || state == allocated || link_failed, "illegal state transition");
|
||||
assert(good_state || state == allocated, "illegal state transition");
|
||||
#endif
|
||||
assert(_init_thread == nullptr, "should be cleared before state change");
|
||||
Atomic::store(&_init_state, state);
|
||||
_init_state = state;
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
|
||||
@@ -152,7 +152,6 @@ class InstanceKlass: public Klass {
|
||||
enum ClassState : u1 {
|
||||
allocated, // allocated (but not yet linked)
|
||||
loaded, // loaded and inserted in class hierarchy (but not linked yet)
|
||||
being_linked, // currently running verifier and rewriter
|
||||
linked, // successfully linked/verified (but not initialized yet)
|
||||
being_initialized, // currently running class initializer
|
||||
fully_initialized, // initialized (successful final state)
|
||||
@@ -226,14 +225,20 @@ class InstanceKlass: public Klass {
|
||||
|
||||
volatile u2 _idnum_allocated_count; // JNI/JVMTI: increments with the addition of methods, old ids don't change
|
||||
|
||||
// _is_marked_dependent can be set concurrently, thus cannot be part of the
|
||||
// _misc_flags.
|
||||
bool _is_marked_dependent; // used for marking during flushing and deoptimization
|
||||
|
||||
// Class states are defined as ClassState (see above).
|
||||
// Place the _init_state here to utilize the unused 2-byte after
|
||||
// _idnum_allocated_count.
|
||||
volatile ClassState _init_state; // state of class
|
||||
|
||||
u1 _reference_type; // reference type
|
||||
u1 _reference_type; // reference type
|
||||
|
||||
// State is set either at parse time or while executing, atomically to not disturb other state
|
||||
InstanceKlassFlags _misc_flags;
|
||||
|
||||
Monitor* _init_monitor; // mutual exclusion to _init_state and _init_thread.
|
||||
JavaThread* volatile _init_thread; // Pointer to current thread doing initialization (to handle recursive initialization)
|
||||
|
||||
OopMapCache* volatile _oop_map_cache; // OopMapCache for all methods in the klass (allocated lazily)
|
||||
@@ -497,41 +502,23 @@ public:
|
||||
TRAPS);
|
||||
|
||||
JavaThread* init_thread() { return Atomic::load(&_init_thread); }
|
||||
// We can safely access the name as long as we hold the _init_monitor.
|
||||
const char* init_thread_name() {
|
||||
assert(_init_monitor->owned_by_self(), "Must hold _init_monitor here");
|
||||
return init_thread()->name_raw();
|
||||
}
|
||||
|
||||
public:
|
||||
// initialization state
|
||||
bool is_loaded() const { return init_state() >= loaded; }
|
||||
bool is_linked() const { return init_state() >= linked; }
|
||||
bool is_being_linked() const { return init_state() == being_linked; }
|
||||
bool is_initialized() const { return init_state() == fully_initialized; }
|
||||
bool is_not_initialized() const { return init_state() < being_initialized; }
|
||||
bool is_being_initialized() const { return init_state() == being_initialized; }
|
||||
bool is_in_error_state() const { return init_state() == initialization_error; }
|
||||
bool is_init_thread(JavaThread *thread) { return thread == init_thread(); }
|
||||
ClassState init_state() const { return Atomic::load(&_init_state); }
|
||||
bool is_loaded() const { return _init_state >= loaded; }
|
||||
bool is_linked() const { return _init_state >= linked; }
|
||||
bool is_initialized() const { return _init_state == fully_initialized; }
|
||||
bool is_not_initialized() const { return _init_state < being_initialized; }
|
||||
bool is_being_initialized() const { return _init_state == being_initialized; }
|
||||
bool is_in_error_state() const { return _init_state == initialization_error; }
|
||||
bool is_reentrant_initialization(Thread *thread) { return thread == _init_thread; }
|
||||
ClassState init_state() const { return _init_state; }
|
||||
const char* init_state_name() const;
|
||||
bool is_rewritten() const { return _misc_flags.rewritten(); }
|
||||
|
||||
class LockLinkState : public StackObj {
|
||||
InstanceKlass* _ik;
|
||||
JavaThread* _current;
|
||||
public:
|
||||
LockLinkState(InstanceKlass* ik, JavaThread* current) : _ik(ik), _current(current) {
|
||||
ik->check_link_state_and_wait(current);
|
||||
}
|
||||
~LockLinkState() {
|
||||
if (!_ik->is_linked()) {
|
||||
// Reset to loaded if linking failed.
|
||||
_ik->set_initialization_state_and_notify(loaded, _current);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// is this a sealed class
|
||||
bool is_sealed() const;
|
||||
|
||||
@@ -829,7 +816,7 @@ public:
|
||||
|
||||
// initialization
|
||||
void call_class_initializer(TRAPS);
|
||||
void set_initialization_state_and_notify(ClassState state, JavaThread* current);
|
||||
void set_initialization_state_and_notify(ClassState state, TRAPS);
|
||||
|
||||
// OopMapCache support
|
||||
OopMapCache* oop_map_cache() { return _oop_map_cache; }
|
||||
@@ -841,6 +828,10 @@ public:
|
||||
void set_jni_ids(JNIid* ids) { _jni_ids = ids; }
|
||||
JNIid* jni_id_for(int offset);
|
||||
|
||||
private:
|
||||
void add_to_hierarchy_impl(JavaThread* current);
|
||||
|
||||
public:
|
||||
// maintenance of deoptimization dependencies
|
||||
inline DependencyContext dependencies();
|
||||
void mark_dependent_nmethods(DeoptimizationScope* deopt_scope, KlassDepChange& changes);
|
||||
@@ -1055,7 +1046,7 @@ public:
|
||||
public:
|
||||
u2 idnum_allocated_count() const { return _idnum_allocated_count; }
|
||||
|
||||
private:
|
||||
private:
|
||||
// initialization state
|
||||
void set_init_state(ClassState state);
|
||||
void set_rewritten() { _misc_flags.set_rewritten(true); }
|
||||
@@ -1072,6 +1063,12 @@ public:
|
||||
jmethodID update_jmethod_id(jmethodID* jmeths, Method* method, int idnum);
|
||||
|
||||
public:
|
||||
// Lock for (1) initialization; (2) access to the ConstantPool of this class.
|
||||
// Must be one per class and it has to be a VM internal object so java code
|
||||
// cannot lock it (like the mirror).
|
||||
// It has to be an object not a Mutex because it's held through java calls.
|
||||
oop init_lock() const;
|
||||
|
||||
// Returns the array class for the n'th dimension
|
||||
virtual ArrayKlass* array_klass(int n, TRAPS);
|
||||
virtual ArrayKlass* array_klass_or_null(int n);
|
||||
@@ -1081,10 +1078,9 @@ public:
|
||||
virtual ArrayKlass* array_klass_or_null();
|
||||
|
||||
static void clean_initialization_error_table();
|
||||
|
||||
Monitor* init_monitor() const { return _init_monitor; }
|
||||
private:
|
||||
void check_link_state_and_wait(JavaThread* current);
|
||||
void fence_and_clear_init_lock();
|
||||
|
||||
bool link_class_impl (TRAPS);
|
||||
bool verify_code (TRAPS);
|
||||
void initialize_impl (TRAPS);
|
||||
|
||||
@@ -54,6 +54,7 @@ uint32_t Symbol::pack_hash_and_refcount(short hash, int refcount) {
|
||||
}
|
||||
|
||||
Symbol::Symbol(const u1* name, int length, int refcount) {
|
||||
assert(length <= max_length(), "SymbolTable should have caught this!");
|
||||
_hash_and_refcount = pack_hash_and_refcount((short)os::random(), refcount);
|
||||
_length = (u2)length;
|
||||
// _body[0..1] are allocated in the header just by coincidence in the current
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -130,6 +130,7 @@ class Symbol : public MetaspaceObj {
|
||||
return (int)heap_word_size(byte_size(length));
|
||||
}
|
||||
|
||||
// Constructor is private for use only by SymbolTable.
|
||||
Symbol(const u1* name, int length, int refcount);
|
||||
|
||||
static short extract_hash(uint32_t value) { return (short)(value >> 16); }
|
||||
|
||||
@@ -1950,6 +1950,22 @@ bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNod
|
||||
|
||||
}
|
||||
|
||||
// Check that all locks/unlocks associated with object come from balanced regions.
|
||||
bool AbstractLockNode::is_balanced() {
|
||||
Node* obj = obj_node();
|
||||
for (uint j = 0; j < obj->outcnt(); j++) {
|
||||
Node* n = obj->raw_out(j);
|
||||
if (n->is_AbstractLock() &&
|
||||
n->as_AbstractLock()->obj_node()->eqv_uncast(obj)) {
|
||||
BoxLockNode* n_box = n->as_AbstractLock()->box_node()->as_BoxLock();
|
||||
if (n_box->is_unbalanced()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
const char* AbstractLockNode::_kind_names[] = {"Regular", "NonEscObj", "Coarsened", "Nested"};
|
||||
|
||||
const char * AbstractLockNode::kind_as_string() const {
|
||||
@@ -2056,6 +2072,8 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
int unlocks = 0;
|
||||
if (Verbose) {
|
||||
tty->print_cr("=== Locks coarsening ===");
|
||||
tty->print("Obj: ");
|
||||
obj_node()->dump();
|
||||
}
|
||||
for (int i = 0; i < lock_ops.length(); i++) {
|
||||
AbstractLockNode* lock = lock_ops.at(i);
|
||||
@@ -2064,6 +2082,8 @@ Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
else
|
||||
unlocks++;
|
||||
if (Verbose) {
|
||||
tty->print("Box %d: ", i);
|
||||
box_node()->dump();
|
||||
tty->print(" %d: ", i);
|
||||
lock->dump();
|
||||
}
|
||||
|
||||
@@ -1154,6 +1154,10 @@ public:
|
||||
void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
|
||||
void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
|
||||
|
||||
// Check that all locks/unlocks associated with object come from balanced regions.
|
||||
// They can become unbalanced after coarsening optimization or on OSR entry.
|
||||
bool is_balanced();
|
||||
|
||||
// locking does not modify its arguments
|
||||
virtual bool may_modify(const TypeOopPtr* t_oop, PhaseValues* phase){ return false; }
|
||||
|
||||
|
||||
@@ -574,18 +574,23 @@ bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
|
||||
// CmpP/N used by the If controlling the cast.
|
||||
if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
|
||||
Node* iff = use->in(0)->in(0);
|
||||
if (iff->Opcode() == Op_If && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
|
||||
// We may have Opaque4 node between If and Bool nodes.
|
||||
// Bail out in such case - we need to preserve Opaque4 for correct
|
||||
// processing predicates after loop opts.
|
||||
bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
|
||||
if (can_reduce) {
|
||||
Node* iff_cmp = iff->in(1)->in(1);
|
||||
int opc = iff_cmp->Opcode();
|
||||
if ((opc == Op_CmpP || opc == Op_CmpN) && !can_reduce_cmp(n, iff_cmp)) {
|
||||
can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
|
||||
}
|
||||
if (!can_reduce) {
|
||||
#ifndef PRODUCT
|
||||
if (TraceReduceAllocationMerges) {
|
||||
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
|
||||
n->dump(5);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
if (TraceReduceAllocationMerges) {
|
||||
tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
|
||||
n->dump(5);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -651,7 +656,12 @@ Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
|
||||
if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
|
||||
con = _igvn->zerocon(t->basic_type());
|
||||
} else {
|
||||
Node* curr_cmp = curr_ctrl->in(0)->in(1)->in(1); // true/false -> if -> bool -> cmp
|
||||
// can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
|
||||
assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
|
||||
Node* bol = curr_ctrl->in(0)->in(1);
|
||||
assert(bol->is_Bool(), "unexpected node %s", bol->Name());
|
||||
Node* curr_cmp = bol->in(1);
|
||||
assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
|
||||
con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
|
||||
}
|
||||
|
||||
@@ -3501,12 +3511,11 @@ bool ConnectionGraph::not_global_escape(Node *n) {
|
||||
// and locked code region (identified by BoxLockNode) is balanced:
|
||||
// all compiled code paths have corresponding Lock/Unlock pairs.
|
||||
bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
|
||||
BoxLockNode* box = alock->box_node()->as_BoxLock();
|
||||
if (!box->is_unbalanced() && not_global_escape(alock->obj_node())) {
|
||||
if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
|
||||
if (EliminateNestedLocks) {
|
||||
// We can mark whole locking region as Local only when only
|
||||
// one object is used for locking.
|
||||
box->set_local();
|
||||
alock->box_node()->as_BoxLock()->set_local();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -755,6 +755,7 @@ bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) {
|
||||
bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) {
|
||||
return ctrl != nullptr &&
|
||||
ctrl->is_Proj() &&
|
||||
ctrl->outcnt() == 1 && // No side-effects
|
||||
ctrl->in(0) != nullptr &&
|
||||
ctrl->in(0)->Opcode() == Op_If &&
|
||||
ctrl->in(0)->outcnt() == 2 &&
|
||||
@@ -1328,7 +1329,7 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) {
|
||||
|
||||
if (cmpi_folds(igvn)) {
|
||||
Node* ctrl = in(0);
|
||||
if (is_ctrl_folds(ctrl, igvn) && ctrl->outcnt() == 1) {
|
||||
if (is_ctrl_folds(ctrl, igvn)) {
|
||||
// A integer comparison immediately dominated by another integer
|
||||
// comparison
|
||||
ProjNode* success = nullptr;
|
||||
|
||||
@@ -5463,42 +5463,72 @@ void LibraryCallKit::create_new_uncommon_trap(CallStaticJavaNode* uncommon_trap_
|
||||
uncommon_trap_call->set_req(0, top()); // not used anymore, kill it
|
||||
}
|
||||
|
||||
// Common checks for array sorting intrinsics arguments.
|
||||
// Returns `true` if checks passed.
|
||||
bool LibraryCallKit::check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt) {
|
||||
// check address of the class
|
||||
if (elementType == nullptr || elementType->is_top()) {
|
||||
return false; // dead path
|
||||
}
|
||||
const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
|
||||
if (elem_klass == nullptr) {
|
||||
return false; // dead path
|
||||
}
|
||||
// java_mirror_type() returns non-null for compile-time Class constants only
|
||||
ciType* elem_type = elem_klass->java_mirror_type();
|
||||
if (elem_type == nullptr) {
|
||||
return false;
|
||||
}
|
||||
bt = elem_type->basic_type();
|
||||
// Disable the intrinsic if the CPU does not support SIMD sort
|
||||
if (!Matcher::supports_simd_sort(bt)) {
|
||||
return false;
|
||||
}
|
||||
// check address of the array
|
||||
if (obj == nullptr || obj->is_top()) {
|
||||
return false; // dead path
|
||||
}
|
||||
const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
|
||||
if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM) {
|
||||
return false; // failed input validation
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------inline_array_partition-----------------------
|
||||
bool LibraryCallKit::inline_array_partition() {
|
||||
address stubAddr = StubRoutines::select_array_partition_function();
|
||||
if (stubAddr == nullptr) {
|
||||
return false; // Intrinsic's stub is not implemented on this platform
|
||||
}
|
||||
assert(callee()->signature()->size() == 9, "arrayPartition has 8 parameters (one long)");
|
||||
|
||||
Node* elementType = null_check(argument(0));
|
||||
// no receiver because it is a static method
|
||||
Node* elementType = argument(0);
|
||||
Node* obj = argument(1);
|
||||
Node* offset = argument(2);
|
||||
Node* offset = argument(2); // long
|
||||
Node* fromIndex = argument(4);
|
||||
Node* toIndex = argument(5);
|
||||
Node* indexPivot1 = argument(6);
|
||||
Node* indexPivot2 = argument(7);
|
||||
// PartitionOperation: argument(8) is ignored
|
||||
|
||||
Node* pivotIndices = nullptr;
|
||||
BasicType bt = T_ILLEGAL;
|
||||
|
||||
if (!check_array_sort_arguments(elementType, obj, bt)) {
|
||||
return false;
|
||||
}
|
||||
null_check(obj);
|
||||
// If obj is dead, only null-path is taken.
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
// Set the original stack and the reexecute bit for the interpreter to reexecute
|
||||
// the bytecode that invokes DualPivotQuicksort.partition() if deoptimization happens.
|
||||
{ PreserveReexecuteState preexecs(this);
|
||||
jvms()->set_should_reexecute(true);
|
||||
|
||||
const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
|
||||
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
|
||||
BasicType bt = elem_type->basic_type();
|
||||
// Disable the intrinsic if the CPU does not support SIMD sort
|
||||
if (!Matcher::supports_simd_sort(bt)) {
|
||||
return false;
|
||||
}
|
||||
address stubAddr = nullptr;
|
||||
stubAddr = StubRoutines::select_array_partition_function();
|
||||
// stub not loaded
|
||||
if (stubAddr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
// get the address of the array
|
||||
const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
|
||||
if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM ) {
|
||||
return false; // failed input validation
|
||||
}
|
||||
Node* obj_adr = make_unsafe_address(obj, offset);
|
||||
|
||||
// create the pivotIndices array of type int and size = 2
|
||||
@@ -5531,31 +5561,29 @@ bool LibraryCallKit::inline_array_partition() {
|
||||
|
||||
//------------------------------inline_array_sort-----------------------
|
||||
bool LibraryCallKit::inline_array_sort() {
|
||||
address stubAddr = StubRoutines::select_arraysort_function();
|
||||
if (stubAddr == nullptr) {
|
||||
return false; // Intrinsic's stub is not implemented on this platform
|
||||
}
|
||||
assert(callee()->signature()->size() == 7, "arraySort has 6 parameters (one long)");
|
||||
|
||||
Node* elementType = null_check(argument(0));
|
||||
// no receiver because it is a static method
|
||||
Node* elementType = argument(0);
|
||||
Node* obj = argument(1);
|
||||
Node* offset = argument(2);
|
||||
Node* offset = argument(2); // long
|
||||
Node* fromIndex = argument(4);
|
||||
Node* toIndex = argument(5);
|
||||
// SortOperation: argument(6) is ignored
|
||||
|
||||
const TypeInstPtr* elem_klass = gvn().type(elementType)->isa_instptr();
|
||||
ciType* elem_type = elem_klass->const_oop()->as_instance()->java_mirror_type();
|
||||
BasicType bt = elem_type->basic_type();
|
||||
// Disable the intrinsic if the CPU does not support SIMD sort
|
||||
if (!Matcher::supports_simd_sort(bt)) {
|
||||
BasicType bt = T_ILLEGAL;
|
||||
|
||||
if (!check_array_sort_arguments(elementType, obj, bt)) {
|
||||
return false;
|
||||
}
|
||||
address stubAddr = nullptr;
|
||||
stubAddr = StubRoutines::select_arraysort_function();
|
||||
//stub not loaded
|
||||
if (stubAddr == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// get address of the array
|
||||
const TypeAryPtr* obj_t = _gvn.type(obj)->isa_aryptr();
|
||||
if (obj_t == nullptr || obj_t->elem() == Type::BOTTOM ) {
|
||||
return false; // failed input validation
|
||||
null_check(obj);
|
||||
// If obj is dead, only null-path is taken.
|
||||
if (stopped()) {
|
||||
return true;
|
||||
}
|
||||
Node* obj_adr = make_unsafe_address(obj, offset);
|
||||
|
||||
@@ -7554,8 +7582,6 @@ bool LibraryCallKit::inline_intpoly_montgomeryMult_P256() {
|
||||
OptoRuntime::intpoly_montgomeryMult_P256_Type(),
|
||||
stubAddr, stubName, TypePtr::BOTTOM,
|
||||
a_start, b_start, r_start);
|
||||
Node* result = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
|
||||
set_result(result);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -279,6 +279,7 @@ class LibraryCallKit : public GraphKit {
|
||||
JVMState* arraycopy_restore_alloc_state(AllocateArrayNode* alloc, int& saved_reexecute_sp);
|
||||
void arraycopy_move_allocation_here(AllocateArrayNode* alloc, Node* dest, JVMState* saved_jvms_before_guards, int saved_reexecute_sp,
|
||||
uint new_idx);
|
||||
bool check_array_sort_arguments(Node* elementType, Node* obj, BasicType& bt);
|
||||
bool inline_array_sort();
|
||||
bool inline_array_partition();
|
||||
typedef enum { LS_get_add, LS_get_set, LS_cmp_swap, LS_cmp_swap_weak, LS_cmp_exchange } LoadStoreKind;
|
||||
|
||||
@@ -46,7 +46,7 @@ private:
|
||||
Eliminated // All lock/unlock in region were eliminated
|
||||
} _kind;
|
||||
|
||||
#ifdef ASSERT
|
||||
#ifndef PRODUCT
|
||||
const char* _kind_name[6] = {
|
||||
"Regular",
|
||||
"Local",
|
||||
@@ -124,7 +124,9 @@ public:
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual void format( PhaseRegAlloc *, outputStream *st ) const;
|
||||
virtual void dump_spec(outputStream *st) const { st->print(" Lock %d",_slot); }
|
||||
virtual void dump_spec(outputStream *st) const {
|
||||
st->print(" Lock slot: %d, Kind: %s", _slot, _kind_name[(int)_kind]);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@@ -795,18 +795,25 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
||||
// Avoid duplicated float compare.
|
||||
if (phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return nullptr;
|
||||
|
||||
// Ignore cost if CMOVE can be moved outside the loop.
|
||||
if (used_inside_loop && cost >= ConditionalMoveLimit) {
|
||||
return nullptr;
|
||||
float infrequent_prob = PROB_UNLIKELY_MAG(3);
|
||||
// Ignore cost and blocks frequency if CMOVE can be moved outside the loop.
|
||||
if (used_inside_loop) {
|
||||
if (cost >= ConditionalMoveLimit) return nullptr; // Too much goo
|
||||
|
||||
// BlockLayoutByFrequency optimization moves infrequent branch
|
||||
// from hot path. No point in CMOV'ing in such case (110 is used
|
||||
// instead of 100 to take into account not exactness of float value).
|
||||
if (BlockLayoutByFrequency) {
|
||||
infrequent_prob = MAX2(infrequent_prob, (float)BlockLayoutMinDiamondPercentage/110.0f);
|
||||
}
|
||||
}
|
||||
// Check for highly predictable branch. No point in CMOV'ing if
|
||||
// we are going to predict accurately all the time.
|
||||
constexpr float infrequent_prob = PROB_UNLIKELY_MAG(2);
|
||||
if (C->use_cmove() && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) {
|
||||
//keep going
|
||||
} else if (iff->_prob < infrequent_prob || iff->_prob > (1.0f - infrequent_prob)) {
|
||||
} else if (iff->_prob < infrequent_prob ||
|
||||
iff->_prob > (1.0f - infrequent_prob))
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// --------------
|
||||
// Now replace all Phis with CMOV's
|
||||
@@ -2987,52 +2994,101 @@ RegionNode* PhaseIdealLoop::insert_region_before_proj(ProjNode* proj) {
|
||||
return reg;
|
||||
}
|
||||
|
||||
//------------------------------ insert_cmpi_loop_exit -------------------------------------
|
||||
// Clone a signed compare loop exit from an unsigned compare and
|
||||
// insert it before the unsigned cmp on the stay-in-loop path.
|
||||
// All new nodes inserted in the dominator tree between the original
|
||||
// if and it's projections. The original if test is replaced with
|
||||
// a constant to force the stay-in-loop path.
|
||||
// Idea
|
||||
// ----
|
||||
// Partial Peeling tries to rotate the loop in such a way that it can later be turned into a counted loop. Counted loops
|
||||
// require a signed loop exit test. When calling this method, we've only found a suitable unsigned test to partial peel
|
||||
// with. Therefore, we try to split off a signed loop exit test from the unsigned test such that it can be used as new
|
||||
// loop exit while keeping the unsigned test unchanged and preserving the same behavior as if we've used the unsigned
|
||||
// test alone instead:
|
||||
//
|
||||
// This is done to make sure that the original if and it's projections
|
||||
// still dominate the same set of control nodes, that the ctrl() relation
|
||||
// from data nodes to them is preserved, and that their loop nesting is
|
||||
// preserved.
|
||||
// Before Partial Peeling:
|
||||
// Loop:
|
||||
// <peeled section>
|
||||
// Split off signed loop exit test
|
||||
// <-- CUT HERE -->
|
||||
// Unchanged unsigned loop exit test
|
||||
// <rest of unpeeled section>
|
||||
// goto Loop
|
||||
//
|
||||
// before
|
||||
// if(i <u limit) unsigned compare loop exit
|
||||
// After Partial Peeling:
|
||||
// <cloned peeled section>
|
||||
// Cloned split off signed loop exit test
|
||||
// Loop:
|
||||
// Unchanged unsigned loop exit test
|
||||
// <rest of unpeeled section>
|
||||
// <peeled section>
|
||||
// Split off signed loop exit test
|
||||
// goto Loop
|
||||
//
|
||||
// Details
|
||||
// -------
|
||||
// Before:
|
||||
// if (i <u limit) Unsigned loop exit condition
|
||||
// / |
|
||||
// v v
|
||||
// exit-proj stay-in-loop-proj
|
||||
//
|
||||
// after
|
||||
// if(stay-in-loop-const) original if
|
||||
// / |
|
||||
// / v
|
||||
// / if(i < limit) new signed test
|
||||
// Split off a signed loop exit test (i.e. with CmpI) from an unsigned loop exit test (i.e. with CmpU) and insert it
|
||||
// before the CmpU on the stay-in-loop path and keep both tests:
|
||||
//
|
||||
// if (i <u limit) Signed loop exit test
|
||||
// / |
|
||||
// / if (i <u limit) Unsigned loop exit test
|
||||
// / / |
|
||||
// / / v
|
||||
// / / if(i <u limit) new cloned unsigned test
|
||||
// / / / |
|
||||
// v v v |
|
||||
// region |
|
||||
// | |
|
||||
// dum-if |
|
||||
// / | |
|
||||
// ether | |
|
||||
// v v
|
||||
// v v v
|
||||
// exit-region stay-in-loop-proj
|
||||
//
|
||||
// Implementation
|
||||
// --------------
|
||||
// We need to make sure that the new signed loop exit test is properly inserted into the graph such that the unsigned
|
||||
// loop exit test still dominates the same set of control nodes, the ctrl() relation from data nodes to both loop
|
||||
// exit tests is preserved, and their loop nesting is correct.
|
||||
//
|
||||
// To achieve that, we clone the unsigned loop exit test completely (leave it unchanged), insert the signed loop exit
|
||||
// test above it and kill the original unsigned loop exit test by setting it's condition to a constant
|
||||
// (i.e. stay-in-loop-const in graph below) such that IGVN can fold it later:
|
||||
//
|
||||
// if (stay-in-loop-const) Killed original unsigned loop exit test
|
||||
// / |
|
||||
// / v
|
||||
// / if (i < limit) Split off signed loop exit test
|
||||
// / / |
|
||||
// / / v
|
||||
// / / if (i <u limit) Cloned unsigned loop exit test
|
||||
// / / / |
|
||||
// v v v |
|
||||
// exit-region |
|
||||
// | |
|
||||
// dummy-if |
|
||||
// / | |
|
||||
// dead | |
|
||||
// v v
|
||||
// exit-proj stay-in-loop-proj
|
||||
//
|
||||
IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *loop) {
|
||||
// Note: The dummy-if is inserted to create a region to merge the loop exits between the original to be killed unsigned
|
||||
// loop exit test and its exit projection while keeping the exit projection (also see insert_region_before_proj()).
|
||||
//
|
||||
// Requirements
|
||||
// ------------
|
||||
// Note that we can only split off a signed loop exit test from the unsigned loop exit test when the behavior is exactly
|
||||
// the same as before with only a single unsigned test. This is only possible if certain requirements are met.
|
||||
// Otherwise, we need to bail out (see comments in the code below).
|
||||
IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree* loop) {
|
||||
const bool Signed = true;
|
||||
const bool Unsigned = false;
|
||||
|
||||
BoolNode* bol = if_cmpu->in(1)->as_Bool();
|
||||
if (bol->_test._test != BoolTest::lt) return nullptr;
|
||||
if (bol->_test._test != BoolTest::lt) {
|
||||
return nullptr;
|
||||
}
|
||||
CmpNode* cmpu = bol->in(1)->as_Cmp();
|
||||
if (cmpu->Opcode() != Op_CmpU) return nullptr;
|
||||
assert(cmpu->Opcode() == Op_CmpU, "must be unsigned comparison");
|
||||
|
||||
int stride = stride_of_possible_iv(if_cmpu);
|
||||
if (stride == 0) return nullptr;
|
||||
if (stride == 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Node* lp_proj = stay_in_loop(if_cmpu, loop);
|
||||
guarantee(lp_proj != nullptr, "null loop node");
|
||||
@@ -3044,14 +3100,93 @@ IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *lo
|
||||
// We therefore can't add a single exit condition.
|
||||
return nullptr;
|
||||
}
|
||||
// The loop exit condition is !(i <u limit) ==> (i < 0 || i >= limit).
|
||||
// Split out the exit condition (i < 0) for stride < 0 or (i >= limit) for stride > 0.
|
||||
Node* limit = nullptr;
|
||||
// The unsigned loop exit condition is
|
||||
// !(i <u limit)
|
||||
// = i >=u limit
|
||||
//
|
||||
// First, we note that for any x for which
|
||||
// 0 <= x <= INT_MAX
|
||||
// we can convert x to an unsigned int and still get the same guarantee:
|
||||
// 0 <= (uint) x <= INT_MAX = (uint) INT_MAX
|
||||
// 0 <=u (uint) x <=u INT_MAX = (uint) INT_MAX (LEMMA)
|
||||
//
|
||||
// With that in mind, if
|
||||
// limit >= 0 (COND)
|
||||
// then the unsigned loop exit condition
|
||||
// i >=u limit (ULE)
|
||||
// is equivalent to
|
||||
// i < 0 || i >= limit (SLE-full)
|
||||
// because either i is negative and therefore always greater than MAX_INT when converting to unsigned
|
||||
// (uint) i >=u MAX_INT >= limit >= 0
|
||||
// or otherwise
|
||||
// i >= limit >= 0
|
||||
// holds due to (LEMMA).
|
||||
//
|
||||
// For completeness, a counterexample with limit < 0:
|
||||
// Assume i = -3 and limit = -2:
|
||||
// i < 0
|
||||
// -2 < 0
|
||||
// is true and thus also "i < 0 || i >= limit". But
|
||||
// i >=u limit
|
||||
// -3 >=u -2
|
||||
// is false.
|
||||
Node* limit = cmpu->in(2);
|
||||
const TypeInt* type_limit = _igvn.type(limit)->is_int();
|
||||
if (type_limit->_lo < 0) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// We prove below that we can extract a single signed loop exit condition from (SLE-full), depending on the stride:
|
||||
// stride < 0:
|
||||
// i < 0 (SLE = SLE-negative)
|
||||
// stride > 0:
|
||||
// i >= limit (SLE = SLE-positive)
|
||||
// such that we have the following graph before Partial Peeling with stride > 0 (similar for stride < 0):
|
||||
//
|
||||
// Loop:
|
||||
// <peeled section>
|
||||
// i >= limit (SLE-positive)
|
||||
// <-- CUT HERE -->
|
||||
// i >=u limit (ULE)
|
||||
// <rest of unpeeled section>
|
||||
// goto Loop
|
||||
//
|
||||
// We exit the loop if:
|
||||
// (SLE) is true OR (ULE) is true
|
||||
// However, if (SLE) is true then (ULE) also needs to be true to ensure the exact same behavior. Otherwise, we wrongly
|
||||
// exit a loop that should not have been exited if we did not apply Partial Peeling. More formally, we need to ensure:
|
||||
// (SLE) IMPLIES (ULE)
|
||||
// This indeed holds when (COND) is given:
|
||||
// - stride > 0:
|
||||
// i >= limit // (SLE = SLE-positive)
|
||||
// i >= limit >= 0 // (COND)
|
||||
// i >=u limit >= 0 // (LEMMA)
|
||||
// which is the unsigned loop exit condition (ULE).
|
||||
// - stride < 0:
|
||||
// i < 0 // (SLE = SLE-negative)
|
||||
// (uint) i >u MAX_INT // (NEG) all negative values are greater than MAX_INT when converted to unsigned
|
||||
// MAX_INT >= limit >= 0 // (COND)
|
||||
// MAX_INT >=u limit >= 0 // (LEMMA)
|
||||
// and thus from (NEG) and (LEMMA):
|
||||
// i >=u limit
|
||||
// which is the unsigned loop exit condition (ULE).
|
||||
//
|
||||
//
|
||||
// After Partial Peeling, we have the following structure for stride > 0 (similar for stride < 0):
|
||||
// <cloned peeled section>
|
||||
// i >= limit (SLE-positive)
|
||||
// Loop:
|
||||
// i >=u limit (ULE)
|
||||
// <rest of unpeeled section>
|
||||
// <peeled section>
|
||||
// i >= limit (SLE-positive)
|
||||
// goto Loop
|
||||
Node* rhs_cmpi;
|
||||
if (stride > 0) {
|
||||
limit = cmpu->in(2);
|
||||
rhs_cmpi = limit; // For i >= limit
|
||||
} else {
|
||||
limit = _igvn.makecon(TypeInt::ZERO);
|
||||
set_ctrl(limit, C->root());
|
||||
rhs_cmpi = _igvn.makecon(TypeInt::ZERO); // For i < 0
|
||||
set_ctrl(rhs_cmpi, C->root());
|
||||
}
|
||||
// Create a new region on the exit path
|
||||
RegionNode* reg = insert_region_before_proj(lp_exit);
|
||||
@@ -3059,7 +3194,7 @@ IfNode* PhaseIdealLoop::insert_cmpi_loop_exit(IfNode* if_cmpu, IdealLoopTree *lo
|
||||
|
||||
// Clone the if-cmpu-true-false using a signed compare
|
||||
BoolTest::mask rel_i = stride > 0 ? bol->_test._test : BoolTest::ge;
|
||||
ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, limit, lp_continue);
|
||||
ProjNode* cmpi_exit = insert_if_before_proj(cmpu->in(1), Signed, rel_i, rhs_cmpi, lp_continue);
|
||||
reg->add_req(cmpi_exit);
|
||||
|
||||
// Clone the if-cmpu-true-false
|
||||
|
||||
@@ -870,6 +870,10 @@ public:
|
||||
assert(verify_jvms(jvms), "jvms must match");
|
||||
return in(_jvmadj + jvms->monitor_box_offset(idx));
|
||||
}
|
||||
Node* scalarized_obj(const JVMState* jvms, uint idx) const {
|
||||
assert(verify_jvms(jvms), "jvms must match");
|
||||
return in(_jvmadj + jvms->scloff() + idx);
|
||||
}
|
||||
void set_local(const JVMState* jvms, uint idx, Node *c) {
|
||||
assert(verify_jvms(jvms), "jvms must match");
|
||||
set_req(_jvmadj + jvms->locoff() + idx, c);
|
||||
|
||||
@@ -2045,7 +2045,7 @@ void PhaseMacroExpand::mark_eliminated_box(Node* box, Node* obj) {
|
||||
|
||||
//-----------------------mark_eliminated_locking_nodes-----------------------
|
||||
void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) {
|
||||
if (alock->box_node()->as_BoxLock()->is_unbalanced()) {
|
||||
if (!alock->is_balanced()) {
|
||||
return; // Can't do any more elimination for this locking region
|
||||
}
|
||||
if (EliminateNestedLocks) {
|
||||
|
||||
@@ -2984,6 +2984,9 @@ StoreNode* MergePrimitiveArrayStores::run() {
|
||||
type2aelembytes(bt) != _store->memory_size()) {
|
||||
return nullptr;
|
||||
}
|
||||
if (_store->is_unsafe_access()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// The _store must be the "last" store in a chain. If we find a use we could merge with
|
||||
// then that use or a store further down is the "last" store.
|
||||
@@ -3017,11 +3020,13 @@ bool MergePrimitiveArrayStores::is_compatible_store(const StoreNode* other_store
|
||||
int opc = _store->Opcode();
|
||||
assert(opc == Op_StoreB || opc == Op_StoreC || opc == Op_StoreI, "precondition");
|
||||
assert(_store->adr_type()->isa_aryptr() != nullptr, "must be array store");
|
||||
assert(!_store->is_unsafe_access(), "no unsafe accesses");
|
||||
|
||||
if (other_store == nullptr ||
|
||||
_store->Opcode() != other_store->Opcode() ||
|
||||
other_store->adr_type() == nullptr ||
|
||||
other_store->adr_type()->isa_aryptr() == nullptr) {
|
||||
other_store->adr_type()->isa_aryptr() == nullptr ||
|
||||
other_store->is_unsafe_access()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -974,6 +974,27 @@ bool PhaseOutput::contains_as_owner(GrowableArray<MonitorValue*> *monarray, Obje
|
||||
return false;
|
||||
}
|
||||
|
||||
// Determine if there is a scalar replaced object description represented by 'ov'.
|
||||
bool PhaseOutput::contains_as_scalarized_obj(JVMState* jvms, MachSafePointNode* sfn,
|
||||
GrowableArray<ScopeValue*>* objs,
|
||||
ObjectValue* ov) const {
|
||||
for (int i = 0; i < jvms->scl_size(); i++) {
|
||||
Node* n = sfn->scalarized_obj(jvms, i);
|
||||
// Other kinds of nodes that we may encounter here, for instance constants
|
||||
// representing values of fields of objects scalarized, aren't relevant for
|
||||
// us, since they don't map to ObjectValue.
|
||||
if (!n->is_SafePointScalarObject()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ObjectValue* other = (ObjectValue*) sv_for_node_id(objs, n->_idx);
|
||||
if (ov == other) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
//--------------------------Process_OopMap_Node--------------------------------
|
||||
void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
// Handle special safepoint nodes for synchronization
|
||||
@@ -1137,7 +1158,10 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
|
||||
for (int j = 0; j< merge->possible_objects()->length(); j++) {
|
||||
ObjectValue* ov = merge->possible_objects()->at(j)->as_ObjectValue();
|
||||
bool is_root = locarray->contains(ov) || exparray->contains(ov) || contains_as_owner(monarray, ov);
|
||||
bool is_root = locarray->contains(ov) ||
|
||||
exparray->contains(ov) ||
|
||||
contains_as_owner(monarray, ov) ||
|
||||
contains_as_scalarized_obj(jvms, sfn, objs, ov);
|
||||
ov->set_root(is_root);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,6 +209,9 @@ public:
|
||||
|
||||
bool starts_bundle(const Node *n) const;
|
||||
bool contains_as_owner(GrowableArray<MonitorValue*> *monarray, ObjectValue *ov) const;
|
||||
bool contains_as_scalarized_obj(JVMState* jvms, MachSafePointNode* sfn,
|
||||
GrowableArray<ScopeValue*>* objs,
|
||||
ObjectValue* ov) const;
|
||||
|
||||
// Dump formatted assembly
|
||||
#if defined(SUPPORT_OPTO_ASSEMBLY)
|
||||
|
||||
@@ -1414,8 +1414,8 @@ const TypeFunc* OptoRuntime::intpoly_montgomeryMult_P256_Type() {
|
||||
|
||||
// result type needed
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms + 0] = TypeInt::INT; // carry bits in output
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
|
||||
fields[TypeFunc::Parms + 0] = nullptr; // void
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
@@ -1434,7 +1434,7 @@ const TypeFunc* OptoRuntime::intpoly_assign_Type() {
|
||||
|
||||
// result type needed
|
||||
fields = TypeTuple::fields(1);
|
||||
fields[TypeFunc::Parms + 0] = NULL; // void
|
||||
fields[TypeFunc::Parms + 0] = nullptr; // void
|
||||
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
|
||||
return TypeFunc::make(domain, range);
|
||||
}
|
||||
|
||||
@@ -3717,27 +3717,55 @@ void SuperWord::adjust_pre_loop_limit_to_align_main_loop_vectors() {
|
||||
TRACE_ALIGN_VECTOR_NODE(mask_AW);
|
||||
TRACE_ALIGN_VECTOR_NODE(adjust_pre_iter);
|
||||
|
||||
// 4: Compute (3a, b):
|
||||
// 4: The computation of the new pre-loop limit could overflow (for 3a) or
|
||||
// underflow (for 3b) the int range. This is problematic in combination
|
||||
// with Range Check Elimination (RCE), which determines a "safe" range
|
||||
// where a RangeCheck will always succeed. RCE adjusts the pre-loop limit
|
||||
// such that we only enter the main-loop once we have reached the "safe"
|
||||
// range, and adjusts the main-loop limit so that we exit the main-loop
|
||||
// before we leave the "safe" range. After RCE, the range of the main-loop
|
||||
// can only be safely narrowed, and should never be widened. Hence, the
|
||||
// pre-loop limit can only be increased (for stride > 0), but an add
|
||||
// overflow might decrease it, or decreased (for stride < 0), but a sub
|
||||
// underflow might increase it. To prevent that, we perform the Sub / Add
|
||||
// and Max / Min with long operations.
|
||||
old_limit = new ConvI2LNode(old_limit);
|
||||
orig_limit = new ConvI2LNode(orig_limit);
|
||||
adjust_pre_iter = new ConvI2LNode(adjust_pre_iter);
|
||||
phase()->register_new_node(old_limit, pre_ctrl);
|
||||
phase()->register_new_node(orig_limit, pre_ctrl);
|
||||
phase()->register_new_node(adjust_pre_iter, pre_ctrl);
|
||||
TRACE_ALIGN_VECTOR_NODE(old_limit);
|
||||
TRACE_ALIGN_VECTOR_NODE(orig_limit);
|
||||
TRACE_ALIGN_VECTOR_NODE(adjust_pre_iter);
|
||||
|
||||
// 5: Compute (3a, b):
|
||||
// new_limit = old_limit + adjust_pre_iter (stride > 0)
|
||||
// new_limit = old_limit - adjust_pre_iter (stride < 0)
|
||||
//
|
||||
Node* new_limit = nullptr;
|
||||
if (stride < 0) {
|
||||
new_limit = new SubINode(old_limit, adjust_pre_iter);
|
||||
new_limit = new SubLNode(old_limit, adjust_pre_iter);
|
||||
} else {
|
||||
new_limit = new AddINode(old_limit, adjust_pre_iter);
|
||||
new_limit = new AddLNode(old_limit, adjust_pre_iter);
|
||||
}
|
||||
phase()->register_new_node(new_limit, pre_ctrl);
|
||||
TRACE_ALIGN_VECTOR_NODE(new_limit);
|
||||
|
||||
// 5: Compute (15a, b):
|
||||
// 6: Compute (15a, b):
|
||||
// Prevent pre-loop from going past the original limit of the loop.
|
||||
Node* constrained_limit =
|
||||
(stride > 0) ? (Node*) new MinINode(new_limit, orig_limit)
|
||||
: (Node*) new MaxINode(new_limit, orig_limit);
|
||||
(stride > 0) ? (Node*) new MinLNode(phase()->C, new_limit, orig_limit)
|
||||
: (Node*) new MaxLNode(phase()->C, new_limit, orig_limit);
|
||||
phase()->register_new_node(constrained_limit, pre_ctrl);
|
||||
TRACE_ALIGN_VECTOR_NODE(constrained_limit);
|
||||
|
||||
// 6: Hack the pre-loop limit
|
||||
// 7: We know that the result is in the int range, there is never truncation
|
||||
constrained_limit = new ConvL2INode(constrained_limit);
|
||||
phase()->register_new_node(constrained_limit, pre_ctrl);
|
||||
TRACE_ALIGN_VECTOR_NODE(constrained_limit);
|
||||
|
||||
// 8: Hack the pre-loop limit
|
||||
igvn().replace_input_of(pre_opaq, 1, constrained_limit);
|
||||
}
|
||||
|
||||
|
||||
@@ -2339,7 +2339,7 @@ JvmtiModuleClosure::get_all_modules(JvmtiEnv* env, jint* module_count_ptr, jobje
|
||||
}
|
||||
|
||||
// Iterate over all the modules loaded to the system.
|
||||
ClassLoaderDataGraph::modules_do(&do_module);
|
||||
ClassLoaderDataGraph::modules_do_keepalive(&do_module);
|
||||
|
||||
jint len = _tbl->length();
|
||||
guarantee(len > 0, "at least one module must be present");
|
||||
|
||||
@@ -105,7 +105,7 @@ JvmtiGetLoadedClasses::getLoadedClasses(JvmtiEnv *env, jint* classCountPtr, jcla
|
||||
// Iterate through all classes in ClassLoaderDataGraph
|
||||
// and collect them using the LoadedClassesClosure
|
||||
MutexLocker mcld(ClassLoaderDataGraph_lock);
|
||||
ClassLoaderDataGraph::loaded_classes_do(&closure);
|
||||
ClassLoaderDataGraph::loaded_classes_do_keepalive(&closure);
|
||||
}
|
||||
|
||||
return closure.get_result(env, classCountPtr, classesPtr);
|
||||
|
||||
@@ -1174,7 +1174,7 @@ jvmtiError VM_RedefineClasses::compare_and_normalize_class_versions(
|
||||
}
|
||||
}
|
||||
}
|
||||
JFR_ONLY(k_new_method->copy_trace_flags(*k_old_method->trace_flags_addr());)
|
||||
JFR_ONLY(k_new_method->copy_trace_flags(k_old_method->trace_flags());)
|
||||
log_trace(redefine, class, normalize)
|
||||
("Method matched: new: %s [%d] == old: %s [%d]",
|
||||
k_new_method->name_and_sig_as_C_string(), ni, k_old_method->name_and_sig_as_C_string(), oi);
|
||||
|
||||
@@ -947,7 +947,6 @@ void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool quer
|
||||
InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
|
||||
|
||||
// process locals & expression stack
|
||||
ResourceMark rm(thread);
|
||||
InterpreterOopMap mask;
|
||||
if (query_oop_map_cache) {
|
||||
m->mask_for(m, bci, &mask);
|
||||
|
||||
@@ -1442,7 +1442,7 @@ bool ObjectMonitor::check_owner(TRAPS) {
|
||||
static inline bool is_excluded(const Klass* monitor_klass) {
|
||||
assert(monitor_klass != nullptr, "invariant");
|
||||
NOT_JFR_RETURN_(false);
|
||||
JFR_ONLY(return vmSymbols::jfr_chunk_rotation_monitor() == monitor_klass->name();)
|
||||
JFR_ONLY(return vmSymbols::jdk_jfr_internal_HiddenWait() == monitor_klass->name();)
|
||||
}
|
||||
|
||||
static void post_monitor_wait_event(EventJavaMonitorWait* event,
|
||||
|
||||
@@ -1333,7 +1333,7 @@ methodHandle SharedRuntime::resolve_helper(bool is_virtual, bool is_optimized, T
|
||||
|
||||
if (invoke_code == Bytecodes::_invokestatic) {
|
||||
assert(callee_method->method_holder()->is_initialized() ||
|
||||
callee_method->method_holder()->is_init_thread(current),
|
||||
callee_method->method_holder()->is_reentrant_initialization(current),
|
||||
"invalid class initialization state for invoke_static");
|
||||
if (!VM_Version::supports_fast_class_init_checks() && callee_method->needs_clinit_barrier()) {
|
||||
// In order to keep class initialization check, do not patch call
|
||||
|
||||
@@ -821,6 +821,16 @@ int ObjectSynchronizer::wait(Handle obj, jlong millis, TRAPS) {
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
void ObjectSynchronizer::waitUninterruptibly(Handle obj, jlong millis, TRAPS) {
|
||||
if (millis < 0) {
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "timeout value is negative");
|
||||
}
|
||||
ObjectSynchronizer::inflate(THREAD,
|
||||
obj(),
|
||||
inflate_cause_wait)->wait(millis, false, THREAD);
|
||||
}
|
||||
|
||||
|
||||
void ObjectSynchronizer::notify(Handle obj, TRAPS) {
|
||||
JavaThread* current = THREAD;
|
||||
|
||||
|
||||
@@ -119,6 +119,11 @@ public:
|
||||
static bool quick_notify(oopDesc* obj, JavaThread* current, bool All);
|
||||
static bool quick_enter(oop obj, JavaThread* current, BasicLock* Lock);
|
||||
|
||||
// Special internal-use-only method for use by JVM infrastructure
|
||||
// that needs to wait() on a java-level object but that can't risk
|
||||
// throwing unexpected InterruptedExecutionExceptions.
|
||||
static void waitUninterruptibly(Handle obj, jlong Millis, TRAPS);
|
||||
|
||||
// Inflate light weight monitor to heavy weight monitor
|
||||
static ObjectMonitor* inflate(Thread* current, oop obj, const InflateCause cause);
|
||||
// Used to inflate a monitor as if it was done from the thread JavaThread.
|
||||
@@ -225,6 +230,7 @@ class ObjectLocker : public StackObj {
|
||||
|
||||
// Monitor behavior
|
||||
void wait(TRAPS) { ObjectSynchronizer::wait(_obj, 0, CHECK); } // wait forever
|
||||
void wait_uninterruptibly(TRAPS) { ObjectSynchronizer::waitUninterruptibly(_obj, 0, CHECK); } // wait forever
|
||||
void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); }
|
||||
};
|
||||
|
||||
|
||||
@@ -205,8 +205,9 @@ void javaVFrame::print_lock_info_on(outputStream* st, int frame_count) {
|
||||
Klass* k = obj->klass();
|
||||
st->print_cr("\t- %s <" INTPTR_FORMAT "> (a %s)", "parking to wait for ", p2i(obj), k->external_name());
|
||||
}
|
||||
else if (thread()->osthread()->get_state() == CONDVAR_WAIT) {
|
||||
// We are waiting on the native class initialization monitor.
|
||||
else if (thread()->osthread()->get_state() == OBJECT_WAIT) {
|
||||
// We are waiting on an Object monitor but Object.wait() isn't the
|
||||
// top-frame, so we should be waiting on a Class initialization monitor.
|
||||
InstanceKlass* k = thread()->class_to_be_initialized();
|
||||
if (k != nullptr) {
|
||||
st->print_cr("\t- waiting on the Class initialization monitor for %s", k->external_name());
|
||||
|
||||
@@ -246,6 +246,7 @@
|
||||
nonstatic_field(InstanceKlass, _nonstatic_oop_map_size, int) \
|
||||
volatile_nonstatic_field(InstanceKlass, _init_state, InstanceKlass::ClassState) \
|
||||
volatile_nonstatic_field(InstanceKlass, _init_thread, JavaThread*) \
|
||||
nonstatic_field(InstanceKlass, _is_marked_dependent, bool) \
|
||||
nonstatic_field(InstanceKlass, _itable_len, int) \
|
||||
nonstatic_field(InstanceKlass, _nest_host_index, u2) \
|
||||
nonstatic_field(InstanceKlass, _reference_type, u1) \
|
||||
@@ -2163,7 +2164,6 @@
|
||||
\
|
||||
declare_constant(InstanceKlass::allocated) \
|
||||
declare_constant(InstanceKlass::loaded) \
|
||||
declare_constant(InstanceKlass::being_linked) \
|
||||
declare_constant(InstanceKlass::linked) \
|
||||
declare_constant(InstanceKlass::being_initialized) \
|
||||
declare_constant(InstanceKlass::fully_initialized) \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -128,6 +128,22 @@ bool ClassLoadingService::set_verbose(bool verbose) {
|
||||
return verbose;
|
||||
}
|
||||
|
||||
bool ClassLoadingService::get_verbose() {
|
||||
for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) {
|
||||
// set_verbose looks for a non-exact match for class+load,
|
||||
// so look for all tag sets that match class+load*
|
||||
if (ts->contains(LogTag::_class) &&
|
||||
ts->contains(LogTag::_load)) {
|
||||
LogLevelType l = ts->level_for(LogConfiguration::StdoutLog);
|
||||
if (l != LogLevel::Info && l != LogLevel::Debug && l != LogLevel::Trace) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Caller to this function must own Management_lock
|
||||
void ClassLoadingService::reset_trace_class_unloading() {
|
||||
assert(Management_lock->owned_by_self(), "Must own the Management_lock");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -53,6 +53,7 @@ class ClassLoadingService : public AllStatic {
|
||||
public:
|
||||
static void init() NOT_MANAGEMENT_RETURN;
|
||||
static bool set_verbose(bool verbose) NOT_MANAGEMENT_RETURN_(false);
|
||||
static bool get_verbose() NOT_MANAGEMENT_RETURN_(false);
|
||||
static void reset_trace_class_unloading() NOT_MANAGEMENT_RETURN;
|
||||
static jlong loaded_class_count() NOT_MANAGEMENT_RETURN_(0L);
|
||||
static jlong unloaded_class_count() NOT_MANAGEMENT_RETURN_(0L);
|
||||
@@ -63,7 +64,6 @@ class ClassLoadingService : public AllStatic {
|
||||
static jlong loaded_shared_class_bytes() NOT_MANAGEMENT_RETURN_(0L);
|
||||
static jlong unloaded_shared_class_bytes() NOT_MANAGEMENT_RETURN_(0L);
|
||||
static jlong class_method_data_size() NOT_MANAGEMENT_RETURN_(0L);
|
||||
static bool get_verbose() { return log_is_enabled(Info, class, load); }
|
||||
|
||||
static void notify_class_loaded(InstanceKlass* k, bool shared_class)
|
||||
NOT_MANAGEMENT_RETURN;
|
||||
|
||||
@@ -1089,6 +1089,14 @@ u4 DumperSupport::get_static_fields_size(InstanceKlass* ik, u2& field_count) {
|
||||
}
|
||||
}
|
||||
|
||||
// Also provide a pointer to the init_lock if present, so there aren't unreferenced int[0]
|
||||
// arrays.
|
||||
oop init_lock = ik->init_lock();
|
||||
if (init_lock != nullptr) {
|
||||
field_count++;
|
||||
size += sizeof(address);
|
||||
}
|
||||
|
||||
// We write the value itself plus a name and a one byte type tag per field.
|
||||
return checked_cast<u4>(size + field_count * (sizeof(address) + 1));
|
||||
}
|
||||
@@ -1126,6 +1134,14 @@ void DumperSupport::dump_static_fields(AbstractDumpWriter* writer, Klass* k) {
|
||||
prev = prev->previous_versions();
|
||||
}
|
||||
}
|
||||
|
||||
// Add init lock to the end if the class is not yet initialized
|
||||
oop init_lock = ik->init_lock();
|
||||
if (init_lock != nullptr) {
|
||||
writer->write_symbolID(vmSymbols::init_lock_name()); // name
|
||||
writer->write_u1(sig2tag(vmSymbols::int_array_signature())); // type
|
||||
writer->write_objectID(init_lock);
|
||||
}
|
||||
}
|
||||
|
||||
// dump the raw values of the instance fields of the given object
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -202,6 +202,21 @@ bool MemoryService::set_verbose(bool verbose) {
|
||||
return verbose;
|
||||
}
|
||||
|
||||
bool MemoryService::get_verbose() {
|
||||
for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) {
|
||||
// set_verbose only sets gc and not gc*, so check for an exact match
|
||||
const bool is_gc_exact_match = ts->contains(LogTag::_gc) && ts->ntags() == 1;
|
||||
if (is_gc_exact_match) {
|
||||
LogLevelType l = ts->level_for(LogConfiguration::StdoutLog);
|
||||
if (l == LogLevel::Info || l == LogLevel::Debug || l == LogLevel::Trace) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
Handle MemoryService::create_MemoryUsage_obj(MemoryUsage usage, TRAPS) {
|
||||
InstanceKlass* ik = Management::java_lang_management_MemoryUsage_klass(CHECK_NH);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -106,8 +106,8 @@ public:
|
||||
GCCause::Cause cause,
|
||||
bool allMemoryPoolsAffected, const char* notificationMessage = nullptr);
|
||||
|
||||
static bool get_verbose() { return log_is_enabled(Info, gc); }
|
||||
static bool set_verbose(bool verbose);
|
||||
static bool get_verbose();
|
||||
|
||||
// Create an instance of java/lang/management/MemoryUsage
|
||||
static Handle create_MemoryUsage_obj(MemoryUsage usage, TRAPS);
|
||||
|
||||
@@ -44,6 +44,9 @@
|
||||
#include "utilities/events.hpp"
|
||||
#include "utilities/exceptions.hpp"
|
||||
|
||||
// Limit exception message components to 64K (the same max as Symbols)
|
||||
#define MAX_LEN 65535
|
||||
|
||||
// Implementation of ThreadShadow
|
||||
void check_ThreadShadow() {
|
||||
const ByteSize offset1 = byte_offset_of(ThreadShadow, _pending_exception);
|
||||
@@ -116,10 +119,11 @@ bool Exceptions::special_exception(JavaThread* thread, const char* file, int lin
|
||||
const char* exc_value = h_exception.not_null() ? h_exception->print_value_string() :
|
||||
h_name != nullptr ? h_name->as_C_string() :
|
||||
"null";
|
||||
log_info(exceptions)("Thread cannot call Java so instead of throwing exception <%s%s%s> (" PTR_FORMAT ") \n"
|
||||
log_info(exceptions)("Thread cannot call Java so instead of throwing exception <%.*s%s%.*s> (" PTR_FORMAT ") \n"
|
||||
"at [%s, line %d]\nfor thread " PTR_FORMAT ",\n"
|
||||
"throwing pre-allocated exception: %s",
|
||||
exc_value, message ? ": " : "", message ? message : "",
|
||||
MAX_LEN, exc_value, message ? ": " : "",
|
||||
MAX_LEN, message ? message : "",
|
||||
p2i(h_exception()), file, line, p2i(thread),
|
||||
Universe::vm_exception()->print_value_string());
|
||||
// We do not care what kind of exception we get for a thread which
|
||||
@@ -145,10 +149,11 @@ void Exceptions::_throw(JavaThread* thread, const char* file, int line, Handle h
|
||||
|
||||
// tracing (do this up front - so it works during boot strapping)
|
||||
// Note, the print_value_string() argument is not called unless logging is enabled!
|
||||
log_info(exceptions)("Exception <%s%s%s> (" PTR_FORMAT ") \n"
|
||||
log_info(exceptions)("Exception <%.*s%s%.*s> (" PTR_FORMAT ") \n"
|
||||
"thrown [%s, line %d]\nfor thread " PTR_FORMAT,
|
||||
h_exception->print_value_string(),
|
||||
message ? ": " : "", message ? message : "",
|
||||
MAX_LEN, h_exception->print_value_string(),
|
||||
message ? ": " : "",
|
||||
MAX_LEN, message ? message : "",
|
||||
p2i(h_exception()), file, line, p2i(thread));
|
||||
|
||||
// for AbortVMOnException flag
|
||||
@@ -568,13 +573,13 @@ void Exceptions::log_exception(Handle exception, const char* message) {
|
||||
ResourceMark rm;
|
||||
const char* detail_message = java_lang_Throwable::message_as_utf8(exception());
|
||||
if (detail_message != nullptr) {
|
||||
log_info(exceptions)("Exception <%s: %s>\n thrown in %s",
|
||||
exception->print_value_string(),
|
||||
detail_message,
|
||||
message);
|
||||
log_info(exceptions)("Exception <%.*s: %.*s>\n thrown in %.*s",
|
||||
MAX_LEN, exception->print_value_string(),
|
||||
MAX_LEN, detail_message,
|
||||
MAX_LEN, message);
|
||||
} else {
|
||||
log_info(exceptions)("Exception <%s>\n thrown in %s",
|
||||
exception->print_value_string(),
|
||||
message);
|
||||
log_info(exceptions)("Exception <%.*s>\n thrown in %.*s",
|
||||
MAX_LEN, exception->print_value_string(),
|
||||
MAX_LEN, message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/utf8.hpp"
|
||||
@@ -431,12 +432,16 @@ int UNICODE::utf8_size(jbyte c) {
|
||||
|
||||
template<typename T>
|
||||
int UNICODE::utf8_length(const T* base, int length) {
|
||||
int result = 0;
|
||||
size_t result = 0;
|
||||
for (int index = 0; index < length; index++) {
|
||||
T c = base[index];
|
||||
result += utf8_size(c);
|
||||
int sz = utf8_size(c);
|
||||
if (result + sz > INT_MAX-1) {
|
||||
break;
|
||||
}
|
||||
result += sz;
|
||||
}
|
||||
return result;
|
||||
return checked_cast<int>(result);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
|
||||
@@ -237,6 +237,10 @@ public class Object {
|
||||
|
||||
/**
|
||||
* {@return a string representation of the object}
|
||||
*
|
||||
* Satisfying this method's contract implies a non-{@code null}
|
||||
* result must be returned.
|
||||
*
|
||||
* @apiNote
|
||||
* In general, the
|
||||
* {@code toString} method returns a string that
|
||||
|
||||
@@ -86,7 +86,7 @@ import java.lang.classfile.instruction.TypeCheckInstruction;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static jdk.internal.classfile.impl.BytecodeHelpers.handleDescToHandleInfo;
|
||||
import jdk.internal.classfile.impl.TransformingCodeBuilder;
|
||||
|
||||
import jdk.internal.javac.PreviewFeature;
|
||||
|
||||
/**
|
||||
@@ -171,7 +171,7 @@ public sealed interface CodeBuilder
|
||||
default CodeBuilder transforming(CodeTransform transform, Consumer<CodeBuilder> handler) {
|
||||
var resolved = transform.resolve(this);
|
||||
resolved.startHandler().run();
|
||||
handler.accept(new TransformingCodeBuilder(this, resolved.consumer()));
|
||||
handler.accept(new ChainedCodeBuilder(this, resolved.consumer()));
|
||||
resolved.endHandler().run();
|
||||
return this;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<!doctype html>
|
||||
<!--
|
||||
Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
|
||||
This code is free software; you can redistribute it and/or modify it
|
||||
@@ -26,7 +26,6 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Value-based Classes</title>
|
||||
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
|
||||
</head>
|
||||
<body>
|
||||
<h1 id="ValueBased">{@index "Value-based Classes"}</h1>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
<!doctype html>
|
||||
<!--
|
||||
Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
|
||||
This code is free software; you can redistribute it and/or modify it
|
||||
@@ -26,7 +26,6 @@
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Java Thread Primitive Deprecation</title>
|
||||
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
|
||||
</head>
|
||||
<body>
|
||||
<h1>Java Thread Primitive Deprecation</h1>
|
||||
|
||||
@@ -222,7 +222,7 @@ import java.util.stream.Stream;
|
||||
* <pre>
|
||||
* MemoryLayout.structLayout(
|
||||
* ValueLayout.JAVA_INT.withName("x"),
|
||||
* MemoryLayout.paddingLayout(32),
|
||||
* MemoryLayout.paddingLayout(4),
|
||||
* ValueLayout.JAVA_LONG.withName("y")
|
||||
* );
|
||||
* </pre>
|
||||
|
||||
@@ -369,7 +369,7 @@ import jdk.internal.foreign.layout.UnionLayoutImpl;
|
||||
* int size = ...
|
||||
* MemorySegment points = ...
|
||||
* for (int i = 0 ; i < size ; i++) {
|
||||
* ... POINT_ARR_X.get(segment, 0L, (long)i) ...
|
||||
* ... POINT_ARR_X.get(points, 0L, (long)i) ...
|
||||
* }
|
||||
* }
|
||||
*
|
||||
|
||||
@@ -630,6 +630,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* {@snippet lang=java :
|
||||
* asSlice(offset, newSize, 1);
|
||||
* }
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @see #asSlice(long, long, long)
|
||||
*
|
||||
@@ -646,6 +652,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* Returns a slice of this memory segment, at the given offset, with the provided
|
||||
* alignment constraint. The returned segment's address is the address of this
|
||||
* segment plus the given offset; its size is specified by the given argument.
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @param offset The new segment base offset (relative to the address of this segment),
|
||||
* specified in bytes
|
||||
@@ -670,6 +682,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* {@snippet lang=java :
|
||||
* asSlice(offset, layout.byteSize(), layout.byteAlignment());
|
||||
* }
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @see #asSlice(long, long, long)
|
||||
*
|
||||
@@ -693,6 +711,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* {@snippet lang=java :
|
||||
* asSlice(offset, byteSize() - offset);
|
||||
* }
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @see #asSlice(long, long)
|
||||
*
|
||||
@@ -706,6 +730,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
/**
|
||||
* Returns a new memory segment that has the same address and scope as this segment,
|
||||
* but with the provided size.
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @param newSize the size of the returned segment
|
||||
* @return a new memory segment that has the same address and scope as
|
||||
@@ -741,6 +771,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* That is, the cleanup action receives a segment that is associated with the global
|
||||
* scope, and is accessible from any thread. The size of the segment accepted by the
|
||||
* cleanup action is {@link #byteSize()}.
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @apiNote The cleanup action (if present) should take care not to leak the received
|
||||
* segment to external clients that might access the segment after its
|
||||
@@ -786,6 +822,12 @@ public sealed interface MemorySegment permits AbstractMemorySegmentImpl {
|
||||
* That is, the cleanup action receives a segment that is associated with the global
|
||||
* scope, and is accessible from any thread. The size of the segment accepted by the
|
||||
* cleanup action is {@code newSize}.
|
||||
* <p>
|
||||
* If this segment is {@linkplain MemorySegment#isReadOnly() read-only},
|
||||
* the returned segment is also {@linkplain MemorySegment#isReadOnly() read-only}.
|
||||
* <p>
|
||||
* The returned memory segment shares a region of backing memory with this segment.
|
||||
* Hence, no memory will be allocated or freed by this method.
|
||||
*
|
||||
* @apiNote The cleanup action (if present) should take care not to leak the received
|
||||
* segment to external clients that might access the segment after its
|
||||
|
||||
@@ -39,7 +39,6 @@ import java.util.List;
|
||||
import java.util.ListIterator;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import jdk.internal.constant.ConstantUtils;
|
||||
import jdk.internal.constant.MethodTypeDescImpl;
|
||||
import jdk.internal.constant.ReferenceClassDescImpl;
|
||||
import sun.security.action.GetBooleanAction;
|
||||
@@ -47,10 +46,7 @@ import sun.security.action.GetBooleanAction;
|
||||
import static java.lang.classfile.ClassFile.*;
|
||||
import java.lang.classfile.attribute.StackMapFrameInfo;
|
||||
import java.lang.classfile.attribute.StackMapTableAttribute;
|
||||
import java.lang.constant.ConstantDescs;
|
||||
import static java.lang.constant.ConstantDescs.*;
|
||||
import java.lang.constant.DirectMethodHandleDesc;
|
||||
import java.lang.constant.DynamicConstantDesc;
|
||||
|
||||
/**
|
||||
* ProxyGenerator contains the code to generate a dynamic proxy class
|
||||
@@ -65,7 +61,10 @@ final class ProxyGenerator {
|
||||
ClassFile.of(ClassFile.StackMapsOption.DROP_STACK_MAPS);
|
||||
|
||||
private static final ClassDesc
|
||||
CD_ClassLoader = ReferenceClassDescImpl.ofValidated("Ljava/lang/ClassLoader;"),
|
||||
CD_Class_array = ReferenceClassDescImpl.ofValidated("[Ljava/lang/Class;"),
|
||||
CD_ClassNotFoundException = ReferenceClassDescImpl.ofValidated("Ljava/lang/ClassNotFoundException;"),
|
||||
CD_NoClassDefFoundError = ReferenceClassDescImpl.ofValidated("Ljava/lang/NoClassDefFoundError;"),
|
||||
CD_IllegalAccessException = ReferenceClassDescImpl.ofValidated("Ljava/lang/IllegalAccessException;"),
|
||||
CD_InvocationHandler = ReferenceClassDescImpl.ofValidated("Ljava/lang/reflect/InvocationHandler;"),
|
||||
CD_Method = ReferenceClassDescImpl.ofValidated("Ljava/lang/reflect/Method;"),
|
||||
@@ -81,8 +80,9 @@ final class ProxyGenerator {
|
||||
MTD_void_String = MethodTypeDescImpl.ofValidated(CD_void, CD_String),
|
||||
MTD_void_Throwable = MethodTypeDescImpl.ofValidated(CD_void, CD_Throwable),
|
||||
MTD_Class = MethodTypeDescImpl.ofValidated(CD_Class),
|
||||
MTD_Class_array = MethodTypeDescImpl.ofValidated(CD_Class_array),
|
||||
MTD_Method_String_Class_array = MethodTypeDescImpl.ofValidated(CD_Method, ConstantDescs.CD_String, CD_Class_array),
|
||||
MTD_Class_String_boolean_ClassLoader = MethodTypeDescImpl.ofValidated(CD_Class, CD_String, CD_boolean, CD_ClassLoader),
|
||||
MTD_ClassLoader = MethodTypeDescImpl.ofValidated(CD_ClassLoader),
|
||||
MTD_Method_String_Class_array = MethodTypeDescImpl.ofValidated(CD_Method, CD_String, CD_Class_array),
|
||||
MTD_MethodHandles$Lookup = MethodTypeDescImpl.ofValidated(CD_MethodHandles_Lookup),
|
||||
MTD_MethodHandles$Lookup_MethodHandles$Lookup = MethodTypeDescImpl.ofValidated(CD_MethodHandles_Lookup, CD_MethodHandles_Lookup),
|
||||
MTD_Object_Object_Method_ObjectArray = MethodTypeDescImpl.ofValidated(CD_Object, CD_Object, CD_Method, CD_Object_array),
|
||||
@@ -107,34 +107,33 @@ final class ProxyGenerator {
|
||||
"jdk.proxy.ProxyGenerator.saveGeneratedFiles"));
|
||||
|
||||
/* Preloaded ProxyMethod objects for methods in java.lang.Object */
|
||||
private static final ProxyMethod HASH_CODE_METHOD;
|
||||
private static final ProxyMethod EQUALS_METHOD;
|
||||
private static final ProxyMethod TO_STRING_METHOD;
|
||||
private static final Method OBJECT_HASH_CODE_METHOD;
|
||||
private static final Method OBJECT_EQUALS_METHOD;
|
||||
private static final Method OBJECT_TO_STRING_METHOD;
|
||||
|
||||
static {
|
||||
try {
|
||||
HASH_CODE_METHOD = new ProxyMethod(Object.class.getMethod("hashCode"));
|
||||
EQUALS_METHOD = new ProxyMethod(Object.class.getMethod("equals", Object.class));
|
||||
TO_STRING_METHOD = new ProxyMethod(Object.class.getMethod("toString"));
|
||||
OBJECT_HASH_CODE_METHOD = Object.class.getMethod("hashCode");
|
||||
OBJECT_EQUALS_METHOD = Object.class.getMethod("equals", Object.class);
|
||||
OBJECT_TO_STRING_METHOD = Object.class.getMethod("toString");
|
||||
} catch (NoSuchMethodException e) {
|
||||
throw new NoSuchMethodError(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private final ConstantPoolBuilder cp;
|
||||
private final List<StackMapFrameInfo.VerificationTypeInfo> throwableStack;
|
||||
private final List<StackMapFrameInfo.VerificationTypeInfo> classLoaderLocal, throwableStack;
|
||||
private final NameAndTypeEntry exInit;
|
||||
private final ClassEntry object, proxy, ute;
|
||||
private final ClassEntry objectCE, proxyCE, uteCE, classCE;
|
||||
private final FieldRefEntry handlerField;
|
||||
private final InterfaceMethodRefEntry invoke;
|
||||
private final MethodRefEntry uteInit;
|
||||
private final DirectMethodHandleDesc bsm;
|
||||
private final InterfaceMethodRefEntry invocationHandlerInvoke;
|
||||
private final MethodRefEntry uteInit, classGetMethod, classForName, throwableGetMessage;
|
||||
|
||||
|
||||
/**
|
||||
* Name of proxy class
|
||||
* ClassEntry for this proxy class
|
||||
*/
|
||||
private ClassEntry classEntry;
|
||||
private final ClassEntry thisClassCE;
|
||||
|
||||
/**
|
||||
* Proxy interfaces
|
||||
@@ -153,6 +152,12 @@ final class ProxyGenerator {
|
||||
*/
|
||||
private final Map<String, List<ProxyMethod>> proxyMethods = new LinkedHashMap<>();
|
||||
|
||||
/**
|
||||
* Ordinal of next ProxyMethod object added to proxyMethods.
|
||||
* Indexes are reserved for hashcode(0), equals(1), toString(2).
|
||||
*/
|
||||
private int proxyMethodCount = 3;
|
||||
|
||||
/**
|
||||
* Construct a ProxyGenerator to generate a proxy class with the
|
||||
* specified name and for the given interfaces.
|
||||
@@ -163,18 +168,23 @@ final class ProxyGenerator {
|
||||
private ProxyGenerator(ClassLoader loader, String className, List<Class<?>> interfaces,
|
||||
int accessFlags) {
|
||||
this.cp = ConstantPoolBuilder.of();
|
||||
this.classEntry = cp.classEntry(ReferenceClassDescImpl.ofValidatedBinaryName(className));
|
||||
this.thisClassCE = cp.classEntry(ReferenceClassDescImpl.ofValidatedBinaryName(className));
|
||||
this.interfaces = interfaces;
|
||||
this.accessFlags = accessFlags;
|
||||
this.throwableStack = List.of(StackMapFrameInfo.ObjectVerificationTypeInfo.of(cp.classEntry(CD_Throwable)));
|
||||
var throwable = cp.classEntry(CD_Throwable);
|
||||
this.classLoaderLocal = List.of(StackMapFrameInfo.ObjectVerificationTypeInfo.of(cp.classEntry(CD_ClassLoader)));
|
||||
this.throwableStack = List.of(StackMapFrameInfo.ObjectVerificationTypeInfo.of(throwable));
|
||||
this.exInit = cp.nameAndTypeEntry(INIT_NAME, MTD_void_String);
|
||||
this.object = cp.classEntry(CD_Object);
|
||||
this.proxy = cp.classEntry(CD_Proxy);
|
||||
this.handlerField = cp.fieldRefEntry(proxy, cp.nameAndTypeEntry(NAME_HANDLER_FIELD, CD_InvocationHandler));
|
||||
this.invoke = cp.interfaceMethodRefEntry(CD_InvocationHandler, "invoke", MTD_Object_Object_Method_ObjectArray);
|
||||
this.ute = cp.classEntry(CD_UndeclaredThrowableException);
|
||||
this.uteInit = cp.methodRefEntry(ute, cp.nameAndTypeEntry(INIT_NAME, MTD_void_Throwable));
|
||||
this.bsm = ConstantDescs.ofConstantBootstrap(classEntry.asSymbol(), "$getMethod", CD_Method, CD_Class, CD_String, CD_MethodType);
|
||||
this.objectCE = cp.classEntry(CD_Object);
|
||||
this.proxyCE = cp.classEntry(CD_Proxy);
|
||||
this.classCE = cp.classEntry(CD_Class);
|
||||
this.handlerField = cp.fieldRefEntry(proxyCE, cp.nameAndTypeEntry(NAME_HANDLER_FIELD, CD_InvocationHandler));
|
||||
this.invocationHandlerInvoke = cp.interfaceMethodRefEntry(CD_InvocationHandler, "invoke", MTD_Object_Object_Method_ObjectArray);
|
||||
this.uteCE = cp.classEntry(CD_UndeclaredThrowableException);
|
||||
this.uteInit = cp.methodRefEntry(uteCE, cp.nameAndTypeEntry(INIT_NAME, MTD_void_Throwable));
|
||||
this.classGetMethod = cp.methodRefEntry(classCE, cp.nameAndTypeEntry("getMethod", MTD_Method_String_Class_array));
|
||||
this.classForName = cp.methodRefEntry(classCE, cp.nameAndTypeEntry("forName", MTD_Class_String_boolean_ClassLoader));
|
||||
this.throwableGetMessage = cp.methodRefEntry(throwable, cp.nameAndTypeEntry("getMessage", MTD_String));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -441,9 +451,9 @@ final class ProxyGenerator {
|
||||
* java.lang.Object take precedence over duplicate methods in the
|
||||
* proxy interfaces.
|
||||
*/
|
||||
addProxyMethod(HASH_CODE_METHOD);
|
||||
addProxyMethod(EQUALS_METHOD);
|
||||
addProxyMethod(TO_STRING_METHOD);
|
||||
addProxyMethod(new ProxyMethod(OBJECT_HASH_CODE_METHOD, "m0"));
|
||||
addProxyMethod(new ProxyMethod(OBJECT_EQUALS_METHOD, "m1"));
|
||||
addProxyMethod(new ProxyMethod(OBJECT_TO_STRING_METHOD, "m2"));
|
||||
|
||||
/*
|
||||
* Accumulate all of the methods from the proxy interfaces.
|
||||
@@ -464,20 +474,23 @@ final class ProxyGenerator {
|
||||
checkReturnTypes(sigmethods);
|
||||
}
|
||||
|
||||
return CF_CONTEXT.build(classEntry, cp, clb -> {
|
||||
clb.withSuperclass(proxy);
|
||||
return CF_CONTEXT.build(thisClassCE, cp, clb -> {
|
||||
clb.withSuperclass(proxyCE);
|
||||
clb.withFlags(accessFlags);
|
||||
clb.withInterfaces(toClassEntries(cp, interfaces));
|
||||
generateConstructor(clb);
|
||||
|
||||
for (List<ProxyMethod> sigmethods : proxyMethods.values()) {
|
||||
for (ProxyMethod pm : sigmethods) {
|
||||
// add static field for the Method object
|
||||
clb.withField(pm.methodFieldName, CD_Method, ACC_PRIVATE | ACC_STATIC | ACC_FINAL);
|
||||
|
||||
// Generate code for proxy method
|
||||
pm.generateMethod(this, clb);
|
||||
pm.generateMethod(clb);
|
||||
}
|
||||
}
|
||||
|
||||
generateBootstrapMethod(clb);
|
||||
generateStaticInitializer(clb);
|
||||
generateLookupAccessor(clb);
|
||||
});
|
||||
}
|
||||
@@ -520,7 +533,7 @@ final class ProxyGenerator {
|
||||
}
|
||||
}
|
||||
sigmethods.add(new ProxyMethod(m, sig, m.getSharedParameterTypes(), returnType,
|
||||
exceptionTypes, fromClass));
|
||||
exceptionTypes, fromClass, "m" + proxyMethodCount++));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -542,32 +555,56 @@ final class ProxyGenerator {
|
||||
clb.withMethodBody(INIT_NAME, MTD_void_InvocationHandler, ACC_PUBLIC, cob -> cob
|
||||
.aload(0)
|
||||
.aload(1)
|
||||
.invokespecial(cp.methodRefEntry(proxy, cp.nameAndTypeEntry(INIT_NAME, MTD_void_InvocationHandler)))
|
||||
.invokespecial(cp.methodRefEntry(proxyCE,
|
||||
cp.nameAndTypeEntry(INIT_NAME, MTD_void_InvocationHandler)))
|
||||
.return_());
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate CONDY bootstrap method for the proxy class to retrieve {@link Method} instances.
|
||||
* Generate the class initializer.
|
||||
* Discussion: Currently, for Proxy to work with SecurityManager,
|
||||
* we rely on the parameter classes of the methods to be computed
|
||||
* from Proxy instead of via user code paths like bootstrap method
|
||||
* lazy evaluation. That might change if we can pass in the live
|
||||
* Method objects directly..
|
||||
*/
|
||||
private void generateBootstrapMethod(ClassBuilder clb) {
|
||||
clb.withMethodBody(bsm.methodName(), bsm.invocationType(), ClassFile.ACC_PRIVATE | ClassFile.ACC_STATIC, cob -> {
|
||||
cob.aload(3) //interface Class
|
||||
.aload(4) //interface method name String
|
||||
.aload(5) //interface MethodType
|
||||
.invokevirtual(CD_MethodType, "parameterArray", MTD_Class_array)
|
||||
.invokevirtual(ConstantDescs.CD_Class, "getMethod", MTD_Method_String_Class_array)
|
||||
.areturn();
|
||||
Label failLabel = cob.newBoundLabel();
|
||||
ClassEntry nsme = cp.classEntry(CD_NoSuchMethodError);
|
||||
cob.exceptionCatch(cob.startLabel(), failLabel, failLabel, CD_NoSuchMethodException)
|
||||
.new_(nsme)
|
||||
private void generateStaticInitializer(ClassBuilder clb) {
|
||||
clb.withMethodBody(CLASS_INIT_NAME, MTD_void, ACC_STATIC, cob -> {
|
||||
// Put ClassLoader at local variable index 0, used by
|
||||
// Class.forName(String, boolean, ClassLoader) calls
|
||||
cob.ldc(thisClassCE)
|
||||
.invokevirtual(cp.methodRefEntry(classCE,
|
||||
cp.nameAndTypeEntry("getClassLoader", MTD_ClassLoader)))
|
||||
.astore(0);
|
||||
var ts = cob.newBoundLabel();
|
||||
for (List<ProxyMethod> sigmethods : proxyMethods.values()) {
|
||||
for (ProxyMethod pm : sigmethods) {
|
||||
pm.codeFieldInitialization(cob);
|
||||
}
|
||||
}
|
||||
cob.return_();
|
||||
var c1 = cob.newBoundLabel();
|
||||
var nsmError = cp.classEntry(CD_NoSuchMethodError);
|
||||
cob.exceptionCatch(ts, c1, c1, CD_NoSuchMethodException)
|
||||
.new_(nsmError)
|
||||
.dup_x1()
|
||||
.swap()
|
||||
.invokevirtual(cp.methodRefEntry(CD_Throwable, "getMessage", MTD_String))
|
||||
.invokespecial(cp.methodRefEntry(nsme, exInit))
|
||||
.athrow()
|
||||
.with(StackMapTableAttribute.of(List.of(
|
||||
StackMapFrameInfo.of(failLabel, List.of(), throwableStack))));
|
||||
.invokevirtual(throwableGetMessage)
|
||||
.invokespecial(cp.methodRefEntry(nsmError, exInit))
|
||||
.athrow();
|
||||
var c2 = cob.newBoundLabel();
|
||||
var ncdfError = cp.classEntry(CD_NoClassDefFoundError);
|
||||
cob.exceptionCatch(ts, c1, c2, CD_ClassNotFoundException)
|
||||
.new_(ncdfError)
|
||||
.dup_x1()
|
||||
.swap()
|
||||
.invokevirtual(throwableGetMessage)
|
||||
.invokespecial(cp.methodRefEntry(ncdfError, exInit))
|
||||
.athrow();
|
||||
cob.with(StackMapTableAttribute.of(List.of(
|
||||
StackMapFrameInfo.of(c1, classLoaderLocal, throwableStack),
|
||||
StackMapFrameInfo.of(c2, classLoaderLocal, throwableStack))));
|
||||
|
||||
});
|
||||
}
|
||||
|
||||
@@ -587,7 +624,7 @@ final class ProxyGenerator {
|
||||
ClassEntry iae = cp.classEntry(CD_IllegalAccessException);
|
||||
cob.aload(cob.parameterSlot(0))
|
||||
.invokevirtual(cp.methodRefEntry(mhl, cp.nameAndTypeEntry("lookupClass", MTD_Class)))
|
||||
.ldc(proxy)
|
||||
.ldc(proxyCE)
|
||||
.if_acmpne(failLabel)
|
||||
.aload(cob.parameterSlot(0))
|
||||
.invokevirtual(cp.methodRefEntry(mhl, cp.nameAndTypeEntry("hasFullPrivilegeAccess", MTD_boolean)))
|
||||
@@ -613,24 +650,29 @@ final class ProxyGenerator {
|
||||
* being generated: a method whose implementation will encode and
|
||||
* dispatch invocations to the proxy instance's invocation handler.
|
||||
*/
|
||||
private static class ProxyMethod {
|
||||
private class ProxyMethod {
|
||||
|
||||
private final Method method;
|
||||
private final String shortSignature;
|
||||
private final Class<?> fromClass;
|
||||
private final Class<?>[] parameterTypes;
|
||||
private final Class<?> returnType;
|
||||
private final String methodFieldName;
|
||||
private Class<?>[] exceptionTypes;
|
||||
private final FieldRefEntry methodField;
|
||||
|
||||
private ProxyMethod(Method method, String sig, Class<?>[] parameterTypes,
|
||||
Class<?> returnType, Class<?>[] exceptionTypes,
|
||||
Class<?> fromClass) {
|
||||
Class<?> fromClass, String methodFieldName) {
|
||||
this.method = method;
|
||||
this.shortSignature = sig;
|
||||
this.parameterTypes = parameterTypes;
|
||||
this.returnType = returnType;
|
||||
this.exceptionTypes = exceptionTypes;
|
||||
this.fromClass = fromClass;
|
||||
this.methodFieldName = methodFieldName;
|
||||
this.methodField = cp.fieldRefEntry(thisClassCE,
|
||||
cp.nameAndTypeEntry(methodFieldName, CD_Method));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -639,17 +681,16 @@ final class ProxyGenerator {
|
||||
* @param method The method for which to create a proxy
|
||||
* @param methodFieldName the fieldName to generate
|
||||
*/
|
||||
private ProxyMethod(Method method) {
|
||||
private ProxyMethod(Method method, String methodFieldName) {
|
||||
this(method, method.toShortSignature(),
|
||||
method.getSharedParameterTypes(), method.getReturnType(),
|
||||
method.getSharedExceptionTypes(), method.getDeclaringClass());
|
||||
method.getSharedExceptionTypes(), method.getDeclaringClass(), methodFieldName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate this method, including the code and exception table entry.
|
||||
*/
|
||||
private void generateMethod(ProxyGenerator pg, ClassBuilder clb) {
|
||||
var cp = pg.cp;
|
||||
private void generateMethod(ClassBuilder clb) {
|
||||
var pTypes = new ClassDesc[parameterTypes.length];
|
||||
for (int i = 0; i < pTypes.length; i++) {
|
||||
pTypes[i] = toClassDesc(parameterTypes[i]);
|
||||
@@ -661,17 +702,14 @@ final class ProxyGenerator {
|
||||
clb.withMethod(method.getName(), desc, accessFlags, mb ->
|
||||
mb.with(ExceptionsAttribute.of(toClassEntries(cp, List.of(exceptionTypes))))
|
||||
.withCode(cob -> {
|
||||
cob.aload(0)
|
||||
.getfield(pg.handlerField)
|
||||
.aload(0)
|
||||
.ldc(DynamicConstantDesc.of(pg.bsm,
|
||||
toClassDesc(fromClass),
|
||||
method.getName(),
|
||||
desc));
|
||||
cob.aload(cob.receiverSlot())
|
||||
.getfield(handlerField)
|
||||
.aload(cob.receiverSlot())
|
||||
.getstatic(methodField);
|
||||
if (parameterTypes.length > 0) {
|
||||
// Create an array and fill with the parameters converting primitives to wrappers
|
||||
cob.loadConstant(parameterTypes.length)
|
||||
.anewarray(pg.object);
|
||||
.anewarray(objectCE);
|
||||
for (int i = 0; i < parameterTypes.length; i++) {
|
||||
cob.dup()
|
||||
.loadConstant(i);
|
||||
@@ -682,7 +720,7 @@ final class ProxyGenerator {
|
||||
cob.aconst_null();
|
||||
}
|
||||
|
||||
cob.invokeinterface(pg.invoke);
|
||||
cob.invokeinterface(invocationHandlerInvoke);
|
||||
|
||||
if (returnType == void.class) {
|
||||
cob.pop()
|
||||
@@ -698,14 +736,14 @@ final class ProxyGenerator {
|
||||
cob.athrow(); // just rethrow the exception
|
||||
var c2 = cob.newBoundLabel();
|
||||
cob.exceptionCatchAll(cob.startLabel(), c1, c2)
|
||||
.new_(pg.ute)
|
||||
.new_(uteCE)
|
||||
.dup_x1()
|
||||
.swap()
|
||||
.invokespecial(pg.uteInit)
|
||||
.invokespecial(uteInit)
|
||||
.athrow()
|
||||
.with(StackMapTableAttribute.of(List.of(
|
||||
StackMapFrameInfo.of(c1, List.of(), pg.throwableStack),
|
||||
StackMapFrameInfo.of(c2, List.of(), pg.throwableStack))));
|
||||
StackMapFrameInfo.of(c1, List.of(), throwableStack),
|
||||
StackMapFrameInfo.of(c2, List.of(), throwableStack))));
|
||||
}
|
||||
}));
|
||||
}
|
||||
@@ -720,7 +758,7 @@ final class ProxyGenerator {
|
||||
if (type.isPrimitive()) {
|
||||
cob.loadLocal(TypeKind.from(type).asLoadable(), slot);
|
||||
PrimitiveTypeInfo prim = PrimitiveTypeInfo.get(type);
|
||||
cob.invokestatic(prim.wrapperMethodRef(cob.constantPool()));
|
||||
cob.invokestatic(prim.wrapperMethodRef(cp));
|
||||
} else {
|
||||
cob.aload(slot);
|
||||
}
|
||||
@@ -736,7 +774,7 @@ final class ProxyGenerator {
|
||||
PrimitiveTypeInfo prim = PrimitiveTypeInfo.get(type);
|
||||
|
||||
cob.checkcast(prim.wrapperClass)
|
||||
.invokevirtual(prim.unwrapMethodRef(cob.constantPool()))
|
||||
.invokevirtual(prim.unwrapMethodRef(cp))
|
||||
.return_(TypeKind.from(type).asLoadable());
|
||||
} else {
|
||||
cob.checkcast(toClassDesc(type))
|
||||
@@ -744,6 +782,57 @@ final class ProxyGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate code for initializing the static field that stores
|
||||
* the Method object for this proxy method. A class loader is
|
||||
* anticipated at local variable index 0.
|
||||
* The generated code must be run in an AccessController.doPrivileged
|
||||
* block if a SecurityManager is present, as otherwise the code
|
||||
* cannot pass {@code null} ClassLoader to forName.
|
||||
*/
|
||||
private void codeFieldInitialization(CodeBuilder cob) {
|
||||
var cp = cob.constantPool();
|
||||
codeClassForName(cob, fromClass);
|
||||
|
||||
cob.ldc(method.getName())
|
||||
.loadConstant(parameterTypes.length)
|
||||
.anewarray(classCE);
|
||||
|
||||
// Construct an array with the parameter types mapping primitives to Wrapper types
|
||||
for (int i = 0; i < parameterTypes.length; i++) {
|
||||
cob.dup()
|
||||
.loadConstant(i);
|
||||
if (parameterTypes[i].isPrimitive()) {
|
||||
PrimitiveTypeInfo prim = PrimitiveTypeInfo.get(parameterTypes[i]);
|
||||
cob.getstatic(prim.typeFieldRef(cp));
|
||||
} else {
|
||||
codeClassForName(cob, parameterTypes[i]);
|
||||
}
|
||||
cob.aastore();
|
||||
}
|
||||
// lookup the method
|
||||
cob.invokevirtual(classGetMethod)
|
||||
.putstatic(methodField);
|
||||
}
|
||||
|
||||
/*
|
||||
* =============== Code Generation Utility Methods ===============
|
||||
*/
|
||||
|
||||
/**
|
||||
* Generate code to invoke the Class.forName with the name of the given
|
||||
* class to get its Class object at runtime. The code is written to
|
||||
* the supplied stream. Note that the code generated by this method
|
||||
* may cause the checked ClassNotFoundException to be thrown. A class
|
||||
* loader is anticipated at local variable index 0.
|
||||
*/
|
||||
private void codeClassForName(CodeBuilder cob, Class<?> cl) {
|
||||
cob.ldc(cl.getName())
|
||||
.iconst_0() // false
|
||||
.aload(0)// classLoader
|
||||
.invokestatic(classForName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return method.toShortString();
|
||||
@@ -810,5 +899,9 @@ final class ProxyGenerator {
|
||||
public MethodRefEntry unwrapMethodRef(ConstantPoolBuilder cp) {
|
||||
return cp.methodRefEntry(wrapperClass, unwrapMethodName, unwrapMethodType);
|
||||
}
|
||||
|
||||
public FieldRefEntry typeFieldRef(ConstantPoolBuilder cp) {
|
||||
return cp.fieldRefEntry(wrapperClass, "TYPE", CD_Class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -29,7 +29,6 @@ package java.nio;
|
||||
|
||||
import java.lang.foreign.MemorySegment;
|
||||
import java.util.Objects;
|
||||
import jdk.internal.util.ArraysSupport;
|
||||
|
||||
/**
|
||||
#if[rw]
|
||||
@@ -706,9 +705,6 @@ class Heap$Type$Buffer$RW$
|
||||
addr, segment)));
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return ArraysSupport.hashCode(hb, ix(position()), remaining(), 1);
|
||||
}
|
||||
|
||||
#end[byte]
|
||||
|
||||
@@ -737,9 +733,6 @@ class Heap$Type$Buffer$RW$
|
||||
offset, segment);
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return ArraysSupport.hashCode(hb, ix(position()), remaining(), 1);
|
||||
}
|
||||
#end[char]
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -293,7 +293,7 @@ public interface Gatherer<T, A, R> {
|
||||
*
|
||||
* @implSpec This method always returns the same instance.
|
||||
*
|
||||
* @see Gatherer#finisher()
|
||||
* @see Gatherer#combiner()
|
||||
* @return the instance of the default combiner
|
||||
* @param <A> the type of the state of the returned combiner
|
||||
*/
|
||||
|
||||
@@ -68,24 +68,19 @@ public abstract sealed class AbstractPoolEntry {
|
||||
*/
|
||||
|
||||
private static final int TAG_SMEAR = 0x13C4B2D1;
|
||||
private static final int INT_PHI = 0x9E3779B9;
|
||||
static final int NON_ZERO = 0x40000000;
|
||||
|
||||
public static int hash1(int tag, int x1) {
|
||||
return phiMix(tag * TAG_SMEAR + x1);
|
||||
return (tag * TAG_SMEAR + x1) | NON_ZERO;
|
||||
}
|
||||
|
||||
public static int hash2(int tag, int x1, int x2) {
|
||||
return phiMix(tag * TAG_SMEAR + x1 + 31*x2);
|
||||
return (tag * TAG_SMEAR + x1 + 31 * x2) | NON_ZERO;
|
||||
}
|
||||
|
||||
// Ensure that hash is never zero
|
||||
public static int hashString(int stringHash) {
|
||||
return phiMix(stringHash | (1 << 30));
|
||||
}
|
||||
|
||||
public static int phiMix(int x) {
|
||||
int h = x * INT_PHI;
|
||||
return h ^ (h >> 16);
|
||||
return stringHash | NON_ZERO;
|
||||
}
|
||||
|
||||
public static Utf8Entry rawUtf8EntryFromStandardAttributeName(String name) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -51,13 +51,13 @@ public final class BlockCodeBuilderImpl
|
||||
|
||||
public void start() {
|
||||
topLocal = topLocal(parent);
|
||||
terminalMaxLocals = topLocal(terminal);
|
||||
terminal.with((LabelTarget) startLabel);
|
||||
terminalMaxLocals = terminal.curTopLocal();
|
||||
parent.with((LabelTarget) startLabel);
|
||||
}
|
||||
|
||||
public void end() {
|
||||
terminal.with((LabelTarget) endLabel);
|
||||
if (terminalMaxLocals != topLocal(terminal)) {
|
||||
parent.with((LabelTarget) endLabel);
|
||||
if (terminalMaxLocals != terminal.curTopLocal()) {
|
||||
throw new IllegalStateException("Interference in local variable slot management");
|
||||
}
|
||||
}
|
||||
@@ -73,10 +73,8 @@ public final class BlockCodeBuilderImpl
|
||||
private int topLocal(CodeBuilder parent) {
|
||||
return switch (parent) {
|
||||
case BlockCodeBuilderImpl b -> b.topLocal;
|
||||
case ChainedCodeBuilder b -> topLocal(b.terminal);
|
||||
case DirectCodeBuilder b -> b.curTopLocal();
|
||||
case BufferedCodeBuilder b -> b.curTopLocal();
|
||||
case TransformingCodeBuilder b -> topLocal(b.delegate);
|
||||
case ChainedCodeBuilder b -> b.terminal.curTopLocal();
|
||||
case TerminalCodeBuilder b -> b.curTopLocal();
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -76,9 +76,7 @@ public final class BootstrapMethodEntryImpl implements BootstrapMethodEntry {
|
||||
|
||||
static int computeHashCode(MethodHandleEntryImpl handle,
|
||||
List<? extends LoadableConstantEntry> arguments) {
|
||||
int hash = handle.hashCode();
|
||||
hash = 31 * hash + arguments.hashCode();
|
||||
return AbstractPoolEntry.phiMix(hash);
|
||||
return (31 * handle.hashCode() + arguments.hashCode()) | AbstractPoolEntry.NON_ZERO;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@@ -988,77 +988,77 @@ public abstract sealed class BoundAttribute<T extends Attribute<T>>
|
||||
public static AttributeMapper<?> standardAttribute(Utf8Entry name) {
|
||||
// critical bootstrap path, so no lambdas nor method handles here
|
||||
return switch (name.hashCode()) {
|
||||
case 0x78147009 ->
|
||||
case 0x46699ff2 ->
|
||||
name.equalsString(NAME_ANNOTATION_DEFAULT) ? annotationDefault() : null;
|
||||
case 0x665e3a3a ->
|
||||
case 0x5208e184 ->
|
||||
name.equalsString(NAME_BOOTSTRAP_METHODS) ? bootstrapMethods() : null;
|
||||
case 0xcb7e162 ->
|
||||
case 0xcb60907a ->
|
||||
name.equalsString(NAME_CHARACTER_RANGE_TABLE) ? characterRangeTable() : null;
|
||||
case 0x21e41e7e ->
|
||||
case 0x4020220d ->
|
||||
name.equalsString(NAME_CODE) ? code() : null;
|
||||
case 0x5a306b41 ->
|
||||
case 0xc20dd1fe ->
|
||||
name.equalsString(NAME_COMPILATION_ID) ? compilationId() : null;
|
||||
case 0x3e191c7c ->
|
||||
case 0xcab1940d ->
|
||||
name.equalsString(NAME_CONSTANT_VALUE) ? constantValue() : null;
|
||||
case 0x5e88ed0c ->
|
||||
case 0x558641d3 ->
|
||||
name.equalsString(NAME_DEPRECATED) ? deprecated() : null;
|
||||
case 0x7284695e ->
|
||||
case 0x51d443cd ->
|
||||
name.equalsString(NAME_ENCLOSING_METHOD) ? enclosingMethod() : null;
|
||||
case 0x21df25db ->
|
||||
case 0x687c1624 ->
|
||||
name.equalsString(NAME_EXCEPTIONS) ? exceptions() : null;
|
||||
case 0x11392da9 ->
|
||||
case 0x7adb2910 ->
|
||||
name.equalsString(NAME_INNER_CLASSES) ? innerClasses() : null;
|
||||
case 0x167536fc ->
|
||||
case 0x653f0551 ->
|
||||
name.equalsString(NAME_LINE_NUMBER_TABLE) ? lineNumberTable() : null;
|
||||
case 0x46939abc ->
|
||||
case 0x64c75927 ->
|
||||
name.equalsString(NAME_LOCAL_VARIABLE_TABLE) ? localVariableTable() : null;
|
||||
case 0x63ee67f4 ->
|
||||
case 0x6697f98d ->
|
||||
name.equalsString(NAME_LOCAL_VARIABLE_TYPE_TABLE) ? localVariableTypeTable() : null;
|
||||
case 0x2b597e15 ->
|
||||
case 0xdbb0cdcb ->
|
||||
name.equalsString(NAME_METHOD_PARAMETERS) ? methodParameters() : null;
|
||||
case 0x19f20ade ->
|
||||
case 0xc9b0928c ->
|
||||
name.equalsString(NAME_MODULE) ? module() : null;
|
||||
case 0x47f6395e ->
|
||||
case 0x41cd27e8 ->
|
||||
name.equalsString(NAME_MODULE_HASHES) ? moduleHashes() : null;
|
||||
case 0x54db809 ->
|
||||
case 0x7deb0a13 ->
|
||||
name.equalsString(NAME_MODULE_MAIN_CLASS) ? moduleMainClass() : null;
|
||||
case 0x1abd1c2c ->
|
||||
case 0x6706ff99 ->
|
||||
name.equalsString(NAME_MODULE_PACKAGES) ? modulePackages() : null;
|
||||
case 0x6ba46dd ->
|
||||
case 0x60272858 ->
|
||||
name.equalsString(NAME_MODULE_RESOLUTION) ? moduleResolution() : null;
|
||||
case 0x46f7d91d ->
|
||||
case 0x5646d73d ->
|
||||
name.equalsString(NAME_MODULE_TARGET) ? moduleTarget() : null;
|
||||
case 0x5137f53 ->
|
||||
case 0x50336c40 ->
|
||||
name.equalsString(NAME_NEST_HOST) ? nestHost() : null;
|
||||
case 0x4a8fa3b6 ->
|
||||
case 0x4735ab81 ->
|
||||
name.equalsString(NAME_NEST_MEMBERS) ? nestMembers() : null;
|
||||
case 0x55c73cb6 ->
|
||||
case 0x7100d9fe ->
|
||||
name.equalsString(NAME_PERMITTED_SUBCLASSES) ? permittedSubclasses() : null;
|
||||
case 0x3fe76d4e ->
|
||||
case 0xd1ab5871 ->
|
||||
name.equalsString(NAME_RECORD) ? record() : null;
|
||||
case 0x180d6925 ->
|
||||
case 0x7588550f ->
|
||||
name.equalsString(NAME_RUNTIME_INVISIBLE_ANNOTATIONS) ? runtimeInvisibleAnnotations() : null;
|
||||
case 0x7be22752 ->
|
||||
case 0xcc74da30 ->
|
||||
name.equalsString(NAME_RUNTIME_INVISIBLE_PARAMETER_ANNOTATIONS) ? runtimeInvisibleParameterAnnotations() : null;
|
||||
case 0x5299824 ->
|
||||
case 0xf67697f5 ->
|
||||
name.equalsString(NAME_RUNTIME_INVISIBLE_TYPE_ANNOTATIONS) ? runtimeInvisibleTypeAnnotations() : null;
|
||||
case 0x3534786e ->
|
||||
case 0xe0837d2a ->
|
||||
name.equalsString(NAME_RUNTIME_VISIBLE_ANNOTATIONS) ? runtimeVisibleAnnotations() : null;
|
||||
case 0xb4b4ac6 ->
|
||||
case 0xc945a075 ->
|
||||
name.equalsString(NAME_RUNTIME_VISIBLE_PARAMETER_ANNOTATIONS) ? runtimeVisibleParameterAnnotations() : null;
|
||||
case 0x6926482 ->
|
||||
case 0x611a3a90 ->
|
||||
name.equalsString(NAME_RUNTIME_VISIBLE_TYPE_ANNOTATIONS) ? runtimeVisibleTypeAnnotations() : null;
|
||||
case 0x16a42b7c ->
|
||||
case 0xf76fb898 ->
|
||||
name.equalsString(NAME_SIGNATURE) ? signature() : null;
|
||||
case 0x400ab245 ->
|
||||
case 0x6b41b047 ->
|
||||
name.equalsString(NAME_SOURCE_DEBUG_EXTENSION) ? sourceDebugExtension() : null;
|
||||
case 0x2af490d4 ->
|
||||
case 0x748c2857 ->
|
||||
name.equalsString(NAME_SOURCE_FILE) ? sourceFile() : null;
|
||||
case 0x303e0c58 ->
|
||||
case 0x6bf13a96 ->
|
||||
name.equalsString(NAME_SOURCE_ID) ? sourceId() : null;
|
||||
case 0x19c7d0cd ->
|
||||
case 0xfa85ee5a ->
|
||||
name.equalsString(NAME_STACK_MAP_TABLE) ? stackMapTable() : null;
|
||||
case 0x3dc79b7a ->
|
||||
case 0xf2670725 ->
|
||||
name.equalsString(NAME_SYNTHETIC) ? synthetic() : null;
|
||||
default -> null;
|
||||
};
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -43,7 +43,7 @@ import java.util.Optional;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public final class BufferedCodeBuilder
|
||||
implements TerminalCodeBuilder, LabelContext {
|
||||
implements TerminalCodeBuilder {
|
||||
private final SplitConstantPool constantPool;
|
||||
private final ClassFileImpl context;
|
||||
private final List<CodeElement> elements = new ArrayList<>();
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user