mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-08 18:39:40 +01:00
Compare commits
156 Commits
main
...
jdk-25.0.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
78770bfaef | ||
|
|
5100536d49 | ||
|
|
415f2adffb | ||
|
|
f92ad752ab | ||
|
|
4f265785a9 | ||
|
|
6c48f4ed70 | ||
|
|
c5d85e09e1 | ||
|
|
408ae8637c | ||
|
|
1962c746dc | ||
|
|
c02fce22ed | ||
|
|
73c28c2e3d | ||
|
|
1e2bf070f0 | ||
|
|
24936b9295 | ||
|
|
b5bec8db3f | ||
|
|
9bdf9ebadd | ||
|
|
99f80700d7 | ||
|
|
d30e89c381 | ||
|
|
1d92cd3517 | ||
|
|
6fcaf66539 | ||
|
|
7b69679175 | ||
|
|
bf31e50754 | ||
|
|
9fe2aa59ff | ||
|
|
3db8262445 | ||
|
|
033a121c96 | ||
|
|
1867effcc0 | ||
|
|
3eee56e456 | ||
|
|
5dab0808b1 | ||
|
|
987af5af16 | ||
|
|
e8f2cd8f3d | ||
|
|
e599ee4a88 | ||
|
|
3a8e9dfe85 | ||
|
|
347084bfbd | ||
|
|
5cc7a31b3f | ||
|
|
f1f6452e01 | ||
|
|
331adac38e | ||
|
|
e989c1d138 | ||
|
|
5129887dfe | ||
|
|
69ea85ee12 | ||
|
|
93260d639e | ||
|
|
b67fb82a03 | ||
|
|
a626c1d92c | ||
|
|
533211af73 | ||
|
|
07bb0e3e2f | ||
|
|
60196a6b6f | ||
|
|
0e6bf00550 | ||
|
|
e1926a6d0e | ||
|
|
03a67a969b | ||
|
|
cf92877aa5 | ||
|
|
05bf5e3a50 | ||
|
|
cc2cf97834 | ||
|
|
121f5a72e4 | ||
|
|
52e1e739af | ||
|
|
5ae719c8fc | ||
|
|
3ec6eb6482 | ||
|
|
fae2345971 | ||
|
|
6e490a465a | ||
|
|
2555b5a632 | ||
|
|
caac8172ad | ||
|
|
d1ea951d39 | ||
|
|
7aa3f31724 | ||
|
|
ce85123f3a | ||
|
|
20fc8f74d5 | ||
|
|
db6230991b | ||
|
|
dd82a0922b | ||
|
|
9f21845262 | ||
|
|
c5d0f1bc5e | ||
|
|
c374ac6df4 | ||
|
|
98bc22a969 | ||
|
|
05dab283f2 | ||
|
|
8229274b2d | ||
|
|
44f5dfef97 | ||
|
|
9adc480ec3 | ||
|
|
4d5211ccb0 | ||
|
|
e92f387ab5 | ||
|
|
96380509b3 | ||
|
|
9b99ed8b39 | ||
|
|
532b1c732e | ||
|
|
1de8943731 | ||
|
|
50751da562 | ||
|
|
83d69cab8b | ||
|
|
21cb2acda0 | ||
|
|
0e4422b284 | ||
|
|
1e985168d6 | ||
|
|
b8965318c1 | ||
|
|
afe6bd6910 | ||
|
|
5500a2d134 | ||
|
|
b3b5595362 | ||
|
|
5e716fd7d1 | ||
|
|
3e93b98baf | ||
|
|
1ce41821b5 | ||
|
|
829742bcb4 | ||
|
|
9a73987f9b | ||
|
|
622c743470 | ||
|
|
8707167ef3 | ||
|
|
e3bd9c6e1c | ||
|
|
993215f3dd | ||
|
|
8a98738f44 | ||
|
|
ab01396209 | ||
|
|
92268e17be | ||
|
|
94b6b99ba4 | ||
|
|
a98a5e54fc | ||
|
|
b245c517e3 | ||
|
|
0a151c68d6 | ||
|
|
554e38dd5a | ||
|
|
26d99e045a | ||
|
|
16addb192b | ||
|
|
b5b0b3a33a | ||
|
|
0dc9e8447b | ||
|
|
12ffb0c131 | ||
|
|
eaaaae5be9 | ||
|
|
926c900efa | ||
|
|
658f80e392 | ||
|
|
274a2dd729 | ||
|
|
a84946dde4 | ||
|
|
fdb3e37c71 | ||
|
|
e23c817521 | ||
|
|
0ad5402463 | ||
|
|
80cb773b7e | ||
|
|
a576952039 | ||
|
|
b89f364842 | ||
|
|
0694cc1d52 | ||
|
|
a3abaadc15 | ||
|
|
7cc1f82b84 | ||
|
|
636b56374e | ||
|
|
fe9efb75b0 | ||
|
|
ca6b165003 | ||
|
|
d5aa225451 | ||
|
|
79a85df074 | ||
|
|
41928aed7d | ||
|
|
3f6b0c69c3 | ||
|
|
36b185a930 | ||
|
|
c832f001e4 | ||
|
|
e5ac75a35b | ||
|
|
b79ca5f03b | ||
|
|
ee45ba9138 | ||
|
|
5bcea92eaa | ||
|
|
cc4e9716ac | ||
|
|
46cfc1e194 | ||
|
|
ae71782e77 | ||
|
|
753700182d | ||
|
|
eb727dcb51 | ||
|
|
b6cacfcbc8 | ||
|
|
d870a48880 | ||
|
|
2ea2f74f92 | ||
|
|
077ce2edc7 | ||
|
|
2a3294571a | ||
|
|
3877746eb9 | ||
|
|
3bd80fe3ba | ||
|
|
03232d4a5d | ||
|
|
4111730845 | ||
|
|
74ea38e406 | ||
|
|
839a91e14b | ||
|
|
aa4f79eaec | ||
|
|
bff98e7d4d | ||
|
|
c7df72ff0f | ||
|
|
80e066e733 |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -310,7 +310,7 @@ jobs:
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
with:
|
||||
platform: windows-x64
|
||||
msvc-toolset-version: '14.43'
|
||||
msvc-toolset-version: '14.44'
|
||||
msvc-toolset-architecture: 'x86.x64'
|
||||
configure-arguments: ${{ github.event.inputs.configure-arguments }}
|
||||
make-arguments: ${{ github.event.inputs.make-arguments }}
|
||||
@@ -322,7 +322,7 @@ jobs:
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
with:
|
||||
platform: windows-aarch64
|
||||
msvc-toolset-version: '14.43'
|
||||
msvc-toolset-version: '14.44'
|
||||
msvc-toolset-architecture: 'arm64'
|
||||
make-target: 'hotspot'
|
||||
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
[general]
|
||||
project=jdk
|
||||
project=jdk-updates
|
||||
jbs=JDK
|
||||
version=25
|
||||
version=25.0.1
|
||||
|
||||
[checks]
|
||||
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright
|
||||
|
||||
4
make/autoconf/configure
vendored
4
make/autoconf/configure
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -366,7 +366,7 @@ EOT
|
||||
|
||||
# Print additional help, e.g. a list of toolchains and JVM features.
|
||||
# This must be done by the autoconf script.
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf )
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
|
||||
|
||||
cat <<EOT
|
||||
|
||||
|
||||
@@ -28,15 +28,15 @@
|
||||
|
||||
DEFAULT_VERSION_FEATURE=25
|
||||
DEFAULT_VERSION_INTERIM=0
|
||||
DEFAULT_VERSION_UPDATE=0
|
||||
DEFAULT_VERSION_UPDATE=1
|
||||
DEFAULT_VERSION_PATCH=0
|
||||
DEFAULT_VERSION_EXTRA1=0
|
||||
DEFAULT_VERSION_EXTRA2=0
|
||||
DEFAULT_VERSION_EXTRA3=0
|
||||
DEFAULT_VERSION_DATE=2025-09-16
|
||||
DEFAULT_VERSION_DATE=2025-10-21
|
||||
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=25
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
DEFAULT_PROMOTED_VERSION_PRE=
|
||||
|
||||
@@ -542,10 +542,10 @@ class Bundle {
|
||||
if (pattern != null) {
|
||||
// Perform date-time format pattern conversion which is
|
||||
// applicable to both SimpleDateFormat and j.t.f.DateTimeFormatter.
|
||||
String transPattern = translateDateFormatLetters(calendarType, pattern, this::convertDateTimePatternLetter);
|
||||
String transPattern = translateDateFormatLetters(calendarType, key, pattern, this::convertDateTimePatternLetter);
|
||||
dateTimePatterns.add(i, transPattern);
|
||||
// Additionally, perform SDF specific date-time format pattern conversion
|
||||
sdfPatterns.add(i, translateDateFormatLetters(calendarType, transPattern, this::convertSDFLetter));
|
||||
sdfPatterns.add(i, translateDateFormatLetters(calendarType, key, transPattern, this::convertSDFLetter));
|
||||
} else {
|
||||
dateTimePatterns.add(i, null);
|
||||
sdfPatterns.add(i, null);
|
||||
@@ -568,7 +568,7 @@ class Bundle {
|
||||
}
|
||||
}
|
||||
|
||||
private String translateDateFormatLetters(CalendarType calendarType, String cldrFormat, ConvertDateTimeLetters converter) {
|
||||
private String translateDateFormatLetters(CalendarType calendarType, String patternKey, String cldrFormat, ConvertDateTimeLetters converter) {
|
||||
String pattern = cldrFormat;
|
||||
int length = pattern.length();
|
||||
boolean inQuote = false;
|
||||
@@ -587,7 +587,7 @@ class Bundle {
|
||||
if (nextc == '\'') {
|
||||
i++;
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -597,7 +597,7 @@ class Bundle {
|
||||
}
|
||||
if (!inQuote) {
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -614,7 +614,7 @@ class Bundle {
|
||||
}
|
||||
if (!(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z')) {
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -627,7 +627,7 @@ class Bundle {
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = c;
|
||||
count = 1;
|
||||
}
|
||||
@@ -637,7 +637,7 @@ class Bundle {
|
||||
}
|
||||
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
}
|
||||
if (cldrFormat.contentEquals(jrePattern)) {
|
||||
return cldrFormat;
|
||||
@@ -661,7 +661,7 @@ class Bundle {
|
||||
* on the support given by the SimpleDateFormat and the j.t.f.DateTimeFormatter
|
||||
* for date-time formatting.
|
||||
*/
|
||||
private void convertDateTimePatternLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
|
||||
private void convertDateTimePatternLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
|
||||
switch (cldrLetter) {
|
||||
case 'u':
|
||||
case 'U':
|
||||
@@ -683,7 +683,7 @@ class Bundle {
|
||||
* Perform a conversion of CLDR date-time format pattern letter which is
|
||||
* specific to the SimpleDateFormat.
|
||||
*/
|
||||
private void convertSDFLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
|
||||
private void convertSDFLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
|
||||
switch (cldrLetter) {
|
||||
case 'G':
|
||||
if (calendarType != CalendarType.GREGORIAN) {
|
||||
@@ -722,6 +722,17 @@ class Bundle {
|
||||
appendN('z', count, sb);
|
||||
break;
|
||||
|
||||
case 'y':
|
||||
// If the style is FULL/LONG for a Japanese Calendar, make the
|
||||
// count == 4 for Gan-nen
|
||||
if (calendarType == CalendarType.JAPANESE &&
|
||||
(patternKey.contains("full-") ||
|
||||
patternKey.contains("long-"))) {
|
||||
count = 4;
|
||||
}
|
||||
appendN(cldrLetter, count, sb);
|
||||
break;
|
||||
|
||||
case 'Z':
|
||||
if (count == 4 || count == 5) {
|
||||
sb.append("XXX");
|
||||
@@ -767,6 +778,7 @@ class Bundle {
|
||||
.collect(Collectors.toMap(
|
||||
e -> calendarPrefix + e.getKey(),
|
||||
e -> translateDateFormatLetters(calendarType,
|
||||
e.getKey(),
|
||||
(String)e.getValue(),
|
||||
this::convertDateTimePatternLetter)
|
||||
))
|
||||
@@ -775,7 +787,7 @@ class Bundle {
|
||||
|
||||
@FunctionalInterface
|
||||
private interface ConvertDateTimeLetters {
|
||||
void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb);
|
||||
void convert(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -456,13 +456,13 @@ SliderDemo.horizontal=Horizontal
|
||||
SliderDemo.vertical=Vertikal
|
||||
SliderDemo.plain=Einfach
|
||||
SliderDemo.a_plain_slider=Ein einfacher Schieberegler
|
||||
SliderDemo.majorticks=Grobteilungen
|
||||
SliderDemo.majorticksdescription=Ein Schieberegler mit Grobteilungsmarkierungen
|
||||
SliderDemo.ticks=Feinteilungen, Teilungen zum Einrasten und Labels
|
||||
SliderDemo.minorticks=Feinteilungen
|
||||
SliderDemo.minorticksdescription=Ein Schieberegler mit Grob- und Feinteilungen, mit Teilungen, in die der Schieberegler einrastet, wobei einige Teilungen mit einem sichtbaren Label versehen sind
|
||||
SliderDemo.majorticks=Hauptteilstriche
|
||||
SliderDemo.majorticksdescription=Ein Schieberegler mit Hauptteilstrichen
|
||||
SliderDemo.ticks=Hilfsteilstriche, zum Einrasten und Beschriften
|
||||
SliderDemo.minorticks=Hilfsteilstriche
|
||||
SliderDemo.minorticksdescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, in die der Schieberegler einrastet, wobei einige Teilstriche mit einer sichtbaren Beschriftung versehen sind
|
||||
SliderDemo.disabled=Deaktiviert
|
||||
SliderDemo.disableddescription=Ein Schieberegler mit Grob- und Feinteilungen, der nicht aktiviert ist (kann nicht bearbeitet werden)
|
||||
SliderDemo.disableddescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, der nicht aktiviert ist (kann nicht bearbeitet werden)
|
||||
|
||||
### SplitPane Demo ###
|
||||
|
||||
|
||||
@@ -292,7 +292,8 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
} else {
|
||||
assert(is_phantom, "only remaining strength");
|
||||
assert(!is_narrow, "phantom access cannot be narrow");
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
|
||||
// AOT saved adapters need relocation for this call.
|
||||
__ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
|
||||
}
|
||||
__ blr(lr);
|
||||
__ mov(rscratch1, r0);
|
||||
|
||||
@@ -8888,13 +8888,8 @@ instruct TailCalljmpInd(IPRegP jump_target, inline_cache_regP method_ptr) %{
|
||||
match(TailCall jump_target method_ptr);
|
||||
|
||||
ins_cost(CALL_COST);
|
||||
format %{ "MOV Rexception_pc, LR\n\t"
|
||||
"jump $jump_target \t! $method_ptr holds method" %}
|
||||
format %{ "jump $jump_target \t! $method_ptr holds method" %}
|
||||
ins_encode %{
|
||||
__ mov(Rexception_pc, LR); // this is used only to call
|
||||
// StubRoutines::forward_exception_entry()
|
||||
// which expects PC of exception in
|
||||
// R5. FIXME?
|
||||
__ jump($jump_target$$Register);
|
||||
%}
|
||||
ins_pipe(tail_call);
|
||||
@@ -8939,8 +8934,10 @@ instruct ForwardExceptionjmp()
|
||||
match(ForwardException);
|
||||
ins_cost(CALL_COST);
|
||||
|
||||
format %{ "b forward_exception_stub" %}
|
||||
format %{ "MOV Rexception_pc, LR\n\t"
|
||||
"b forward_exception_entry" %}
|
||||
ins_encode %{
|
||||
__ mov(Rexception_pc, LR);
|
||||
// OK to trash Rtemp, because Rtemp is used by stub
|
||||
__ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
|
||||
%}
|
||||
|
||||
@@ -3928,8 +3928,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
|
||||
Label L_outer_loop, L_inner_loop, L_last;
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
load_const_optimized(t0, VM_Version::_dscr_val | 7);
|
||||
mtdscr(t0);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
load_const_optimized(t0, VM_Version::_dscr_val | 7);
|
||||
mtdscr(t0);
|
||||
}
|
||||
|
||||
mtvrwz(VCRC, crc); // crc lives in VCRC, now
|
||||
|
||||
@@ -4073,8 +4075,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
|
||||
// ********** Main loop end **********
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
load_const_optimized(t0, VM_Version::_dscr_val);
|
||||
mtdscr(t0);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
load_const_optimized(t0, VM_Version::_dscr_val);
|
||||
mtdscr(t0);
|
||||
}
|
||||
|
||||
// ********** Simple loop for remaining 16 byte blocks **********
|
||||
{
|
||||
|
||||
@@ -952,8 +952,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start_pc = __ pc();
|
||||
Register tmp1 = R6_ARG4;
|
||||
// probably copy stub would have changed value reset it.
|
||||
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp1);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp1);
|
||||
}
|
||||
__ li(R3_RET, 0); // return 0
|
||||
__ blr();
|
||||
return start_pc;
|
||||
@@ -1070,9 +1072,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// If supported set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1092,8 +1095,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_10); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1344,8 +1349,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// If supported set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. It's not aligned 16-byte
|
||||
@@ -1365,8 +1372,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_9); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
__ bind(l_6);
|
||||
|
||||
@@ -1527,9 +1537,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1549,9 +1560,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_7); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1672,9 +1684,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1694,8 +1707,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_4);
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
__ cmpwi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, l_6);
|
||||
@@ -1788,9 +1803,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1810,8 +1826,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_5); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1910,9 +1928,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1932,8 +1951,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_4);
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
__ cmpwi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, l_1);
|
||||
|
||||
@@ -80,7 +80,9 @@ void VM_Version::initialize() {
|
||||
"%zu on this machine", PowerArchitecturePPC64);
|
||||
|
||||
// Power 8: Configure Data Stream Control Register.
|
||||
config_dscr();
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
config_dscr();
|
||||
}
|
||||
|
||||
if (!UseSIGTRAP) {
|
||||
MSG(TrapBasedICMissChecks);
|
||||
@@ -170,7 +172,8 @@ void VM_Version::initialize() {
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64 sha aes%s%s",
|
||||
"ppc64 sha aes%s%s%s",
|
||||
(has_mfdscr() ? " mfdscr" : ""),
|
||||
(has_darn() ? " darn" : ""),
|
||||
(has_brw() ? " brw" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
@@ -488,6 +491,7 @@ void VM_Version::determine_features() {
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// Keep R3_ARG1 unmodified, it contains &field (see below).
|
||||
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
|
||||
a->mfdscr(R0);
|
||||
a->darn(R7);
|
||||
a->brw(R5, R6);
|
||||
a->blr();
|
||||
@@ -524,6 +528,7 @@ void VM_Version::determine_features() {
|
||||
|
||||
// determine which instructions are legal.
|
||||
int feature_cntr = 0;
|
||||
if (code[feature_cntr++]) features |= mfdscr_m;
|
||||
if (code[feature_cntr++]) features |= darn_m;
|
||||
if (code[feature_cntr++]) features |= brw_m;
|
||||
|
||||
|
||||
@@ -32,12 +32,14 @@
|
||||
class VM_Version: public Abstract_VM_Version {
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
mfdscr,
|
||||
darn,
|
||||
brw,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
unknown_m = 0,
|
||||
mfdscr_m = (1 << mfdscr ),
|
||||
darn_m = (1 << darn ),
|
||||
brw_m = (1 << brw ),
|
||||
all_features_m = (unsigned long)-1
|
||||
@@ -67,8 +69,9 @@ public:
|
||||
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
// CPU instruction support
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; } // Power8, but may be unavailable (QEMU)
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
|
||||
@@ -2170,15 +2170,13 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
|
||||
cmov_cmp_fp_le(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::ge:
|
||||
assert(false, "Should go to BoolTest::le case");
|
||||
ShouldNotReachHere();
|
||||
cmov_cmp_fp_ge(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::lt:
|
||||
cmov_cmp_fp_lt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::gt:
|
||||
assert(false, "Should go to BoolTest::lt case");
|
||||
ShouldNotReachHere();
|
||||
cmov_cmp_fp_gt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
|
||||
@@ -1268,12 +1268,19 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
|
||||
}
|
||||
|
||||
// ----------- cmove, compare float -----------
|
||||
//
|
||||
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
|
||||
// so, just list behaviour of unordered ones as follow.
|
||||
//
|
||||
// Set dst (CMoveI (Binary cop (CmpF/D op1 op2)) (Binary dst src))
|
||||
// (If one or both inputs to the compare are NaN, then)
|
||||
// 1. (op1 lt op2) => true => CMove: dst = src
|
||||
// 2. (op1 le op2) => true => CMove: dst = src
|
||||
// 3. (op1 gt op2) => false => CMove: dst = dst
|
||||
// 4. (op1 ge op2) => false => CMove: dst = dst
|
||||
// 5. (op1 eq op2) => false => CMove: dst = dst
|
||||
// 6. (op1 ne op2) => true => CMove: dst = src
|
||||
|
||||
// Move src to dst only if cmp1 == cmp2,
|
||||
// otherwise leave dst unchanged, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 != cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1289,7 +1296,7 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 != cmp2, including the case of NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 == cmp2
|
||||
// fallthrough (i.e. move src to dst) if cmp1 == cmp2
|
||||
float_bne(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bne(cmp1, cmp2, no_set);
|
||||
@@ -1298,11 +1305,6 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Keep dst unchanged only if cmp1 == cmp2,
|
||||
// otherwise move src to dst, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 == cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1318,7 +1320,7 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 == cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
float_beq(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_beq(cmp1, cmp2, no_set);
|
||||
@@ -1327,14 +1329,6 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 < cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 > cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1350,7 +1344,7 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 > cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
float_bgt(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bgt(cmp1, cmp2, no_set);
|
||||
@@ -1359,14 +1353,30 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 <= cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 >= cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
fle_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
fle_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 >= cmp2
|
||||
float_blt(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_blt(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1382,7 +1392,7 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 >= cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
float_bge(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bge(cmp1, cmp2, no_set);
|
||||
@@ -1391,6 +1401,30 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
flt_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
flt_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 > cmp2
|
||||
float_ble(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_ble(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Float compare branch instructions
|
||||
|
||||
#define INSN(NAME, FLOATCMP, BRANCH) \
|
||||
@@ -5310,42 +5344,6 @@ void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, R
|
||||
add(final_dest_hi, dest_hi, carry);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply 32 bit by 32 bit first loop.
|
||||
*/
|
||||
void MacroAssembler::multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
Register idx, Register kdx) {
|
||||
// jlong carry, x[], y[], z[];
|
||||
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
|
||||
// long product = y[idx] * x[xstart] + carry;
|
||||
// z[kdx] = (int)product;
|
||||
// carry = product >>> 32;
|
||||
// }
|
||||
// z[xstart] = (int)carry;
|
||||
|
||||
Label L_first_loop, L_first_loop_exit;
|
||||
blez(idx, L_first_loop_exit);
|
||||
|
||||
shadd(t0, xstart, x, t0, LogBytesPerInt);
|
||||
lwu(x_xstart, Address(t0, 0));
|
||||
|
||||
bind(L_first_loop);
|
||||
subiw(idx, idx, 1);
|
||||
shadd(t0, idx, y, t0, LogBytesPerInt);
|
||||
lwu(y_idx, Address(t0, 0));
|
||||
mul(product, x_xstart, y_idx);
|
||||
add(product, product, carry);
|
||||
srli(carry, product, 32);
|
||||
subiw(kdx, kdx, 1);
|
||||
shadd(t0, kdx, z, t0, LogBytesPerInt);
|
||||
sw(product, Address(t0, 0));
|
||||
bgtz(idx, L_first_loop);
|
||||
|
||||
bind(L_first_loop_exit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply 64 bit by 64 bit first loop.
|
||||
*/
|
||||
@@ -5562,77 +5560,16 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
const Register carry = tmp5;
|
||||
const Register product = xlen;
|
||||
const Register x_xstart = tmp0;
|
||||
const Register jdx = tmp1;
|
||||
|
||||
mv(idx, ylen); // idx = ylen;
|
||||
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
|
||||
mv(carry, zr); // carry = 0;
|
||||
|
||||
Label L_multiply_64_x_64_loop, L_done;
|
||||
|
||||
Label L_done;
|
||||
subiw(xstart, xlen, 1);
|
||||
bltz(xstart, L_done);
|
||||
|
||||
const Register jdx = tmp1;
|
||||
|
||||
if (AvoidUnalignedAccesses) {
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(T_INT);
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
if ((base_offset % 8) == 0) {
|
||||
// multiply_64_x_64_loop emits 8-byte load/store to access two elements
|
||||
// at a time from int arrays x and y. When base_offset is 8 bytes, these
|
||||
// accesses are naturally aligned if both xlen and ylen are even numbers.
|
||||
orr(t0, xlen, ylen);
|
||||
test_bit(t0, t0, 0);
|
||||
beqz(t0, L_multiply_64_x_64_loop);
|
||||
}
|
||||
|
||||
Label L_second_loop_unaligned, L_third_loop, L_third_loop_exit;
|
||||
|
||||
multiply_32_x_32_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
|
||||
shadd(t0, xstart, z, t0, LogBytesPerInt);
|
||||
sw(carry, Address(t0, 0));
|
||||
|
||||
bind(L_second_loop_unaligned);
|
||||
mv(carry, zr);
|
||||
mv(jdx, ylen);
|
||||
subiw(xstart, xstart, 1);
|
||||
bltz(xstart, L_done);
|
||||
|
||||
subi(sp, sp, 2 * wordSize);
|
||||
sd(z, Address(sp, 0));
|
||||
sd(zr, Address(sp, wordSize));
|
||||
shadd(t0, xstart, z, t0, LogBytesPerInt);
|
||||
addi(z, t0, 4);
|
||||
shadd(t0, xstart, x, t0, LogBytesPerInt);
|
||||
lwu(product, Address(t0, 0));
|
||||
|
||||
blez(jdx, L_third_loop_exit);
|
||||
|
||||
bind(L_third_loop);
|
||||
subiw(jdx, jdx, 1);
|
||||
shadd(t0, jdx, y, t0, LogBytesPerInt);
|
||||
lwu(t0, Address(t0, 0));
|
||||
mul(t1, t0, product);
|
||||
add(t0, t1, carry);
|
||||
shadd(tmp6, jdx, z, t1, LogBytesPerInt);
|
||||
lwu(t1, Address(tmp6, 0));
|
||||
add(t0, t0, t1);
|
||||
sw(t0, Address(tmp6, 0));
|
||||
srli(carry, t0, 32);
|
||||
bgtz(jdx, L_third_loop);
|
||||
|
||||
bind(L_third_loop_exit);
|
||||
ld(z, Address(sp, 0));
|
||||
addi(sp, sp, 2 * wordSize);
|
||||
shadd(t0, xstart, z, t0, LogBytesPerInt);
|
||||
sw(carry, Address(t0, 0));
|
||||
|
||||
j(L_second_loop_unaligned);
|
||||
}
|
||||
|
||||
bind(L_multiply_64_x_64_loop);
|
||||
multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
|
||||
|
||||
Label L_second_loop_aligned;
|
||||
|
||||
@@ -660,7 +660,9 @@ class MacroAssembler: public Assembler {
|
||||
void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
|
||||
public:
|
||||
// We try to follow risc-v asm menomics.
|
||||
@@ -1382,10 +1384,6 @@ public:
|
||||
void adc(Register dst, Register src1, Register src2, Register carry);
|
||||
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
|
||||
Register src1, Register src2, Register carry);
|
||||
void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
Register idx, Register kdx);
|
||||
void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
|
||||
@@ -8431,6 +8431,17 @@ instruct castVV(vReg dst)
|
||||
ins_pipe(pipe_class_empty);
|
||||
%}
|
||||
|
||||
instruct castVVMask(vRegMask dst)
|
||||
%{
|
||||
match(Set dst (CastVV dst));
|
||||
|
||||
size(0);
|
||||
format %{ "# castVV of $dst" %}
|
||||
ins_encode(/* empty encoding */);
|
||||
ins_cost(0);
|
||||
ins_pipe(pipe_class_empty);
|
||||
%}
|
||||
|
||||
// ============================================================================
|
||||
// Convert Instructions
|
||||
|
||||
|
||||
@@ -203,15 +203,15 @@ void VM_Version::common_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
// Misc Intrinsics could depend on RVV
|
||||
// Misc Intrinsics that could depend on RVV.
|
||||
|
||||
if (UseZba || UseRVV) {
|
||||
if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
|
||||
}
|
||||
} else {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
warning("CRC32 intrinsic requires Zba or RVV instructions (not available on this CPU)");
|
||||
warning("CRC32 intrinsic are not available on this CPU.");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
@@ -325,20 +325,40 @@ void VM_Version::c2_initialize() {
|
||||
FLAG_SET_DEFAULT(UseMulAddIntrinsic, true);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
}
|
||||
} else if (UseMultiplyToLenIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.multiplyToLen() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
|
||||
}
|
||||
} else if (UseSquareToLenIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.squareToLen() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
|
||||
}
|
||||
} else if (UseMontgomeryMultiplyIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.montgomeryMultiply() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
|
||||
}
|
||||
} else if (UseMontgomerySquareIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.montgomerySquare() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
|
||||
}
|
||||
|
||||
// Adler32
|
||||
|
||||
@@ -15681,6 +15681,8 @@ void Assembler::pusha_uncached() { // 64bit
|
||||
// Push pair of original stack pointer along with remaining registers
|
||||
// at 16B aligned boundary.
|
||||
push2p(rax, r31);
|
||||
// Restore the original contents of RAX register.
|
||||
movq(rax, Address(rax));
|
||||
push2p(r30, r29);
|
||||
push2p(r28, r27);
|
||||
push2p(r26, r25);
|
||||
|
||||
@@ -4655,6 +4655,7 @@ static void convertF2I_slowpath(C2_MacroAssembler& masm, C2GeneralStub<Register,
|
||||
__ subptr(rsp, 8);
|
||||
__ movdbl(Address(rsp), src);
|
||||
__ call(RuntimeAddress(target));
|
||||
// APX REX2 encoding for pop(dst) increases the stub size by 1 byte.
|
||||
__ pop(dst);
|
||||
__ jmp(stub.continuation());
|
||||
#undef __
|
||||
@@ -4687,7 +4688,9 @@ void C2_MacroAssembler::convertF2I(BasicType dst_bt, BasicType src_bt, Register
|
||||
}
|
||||
}
|
||||
|
||||
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, 23, convertF2I_slowpath);
|
||||
// Using the APX extended general purpose registers increases the instruction encoding size by 1 byte.
|
||||
int max_size = 23 + (UseAPX ? 1 : 0);
|
||||
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, max_size, convertF2I_slowpath);
|
||||
jcc(Assembler::equal, stub->entry());
|
||||
bind(stub->continuation());
|
||||
}
|
||||
|
||||
@@ -353,7 +353,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
|
||||
// The rest is saved with the optimized path
|
||||
|
||||
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4;
|
||||
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
|
||||
__ subptr(rsp, num_saved_regs * wordSize);
|
||||
uint slot = num_saved_regs;
|
||||
if (dst != rax) {
|
||||
@@ -367,6 +367,25 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r9);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r10);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r11);
|
||||
// Save APX extended registers r16–r31 if enabled
|
||||
if (UseAPX) {
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r16);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r17);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r18);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r19);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r20);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r21);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r22);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r23);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r24);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r25);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r26);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r27);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r28);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r29);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r30);
|
||||
__ movptr(Address(rsp, (--slot) * wordSize), r31);
|
||||
}
|
||||
// r12-r15 are callee saved in all calling conventions
|
||||
assert(slot == 0, "must use all slots");
|
||||
|
||||
@@ -398,6 +417,25 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
|
||||
}
|
||||
|
||||
// Restore APX extended registers r31–r16 if previously saved
|
||||
if (UseAPX) {
|
||||
__ movptr(r31, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r30, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r29, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r28, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r27, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r26, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r25, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r24, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r23, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r22, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r21, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r20, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r19, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r18, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r17, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r16, Address(rsp, (slot++) * wordSize));
|
||||
}
|
||||
__ movptr(r11, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r10, Address(rsp, (slot++) * wordSize));
|
||||
__ movptr(r9, Address(rsp, (slot++) * wordSize));
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
|
||||
do_arch_blob(initial, PRODUCT_ONLY(20000) NOT_PRODUCT(21000) WINDOWS_ONLY(+1000)) \
|
||||
do_stub(initial, verify_mxcsr) \
|
||||
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
|
||||
verify_mxcsr_entry) \
|
||||
@@ -239,7 +239,7 @@
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 31000 \
|
||||
do_arch_blob(final, 33000 \
|
||||
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
|
||||
|
||||
#endif // CPU_X86_STUBDECLARATIONS_HPP
|
||||
|
||||
@@ -46,6 +46,12 @@
|
||||
//
|
||||
/******************************************************************************/
|
||||
|
||||
/* Represents 0x7FFFFFFFFFFFFFFF double precision in lower 64 bits*/
|
||||
ATTRIBUTE_ALIGNED(16) static const juint _ABS_MASK[] =
|
||||
{
|
||||
4294967295, 2147483647, 0, 0
|
||||
};
|
||||
|
||||
ATTRIBUTE_ALIGNED(4) static const juint _SIG_MASK[] =
|
||||
{
|
||||
0, 1032192
|
||||
@@ -188,10 +194,10 @@ address StubGenerator::generate_libmCbrt() {
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1;
|
||||
Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1;
|
||||
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1;
|
||||
Label B1_1, B1_2, B1_4;
|
||||
|
||||
address ABS_MASK = (address)_ABS_MASK;
|
||||
address SIG_MASK = (address)_SIG_MASK;
|
||||
address EXP_MASK = (address)_EXP_MASK;
|
||||
address EXP_MSK2 = (address)_EXP_MSK2;
|
||||
@@ -208,8 +214,12 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
__ bind(B1_1);
|
||||
__ subq(rsp, 24);
|
||||
__ movsd(Address(rsp), xmm0);
|
||||
__ ucomisd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if x is +/- zero or NaN
|
||||
__ movq(xmm1, xmm0);
|
||||
__ andpd(xmm1, ExternalAddress(ABS_MASK), r11 /*rscratch*/);
|
||||
__ ucomisd(xmm1, ExternalAddress(INF), r11 /*rscratch*/);
|
||||
__ jcc(Assembler::equal, B1_4); // Branch only if x is +/- INF
|
||||
|
||||
__ bind(B1_2);
|
||||
__ movq(xmm7, xmm0);
|
||||
@@ -228,8 +238,6 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ andl(rdx, rax);
|
||||
__ cmpl(rdx, 0);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_0_0_1); // Branch only if |x| is denormalized
|
||||
__ cmpl(rdx, 524032);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if |x| is INF or NaN
|
||||
__ shrl(rdx, 8);
|
||||
__ shrq(r9, 8);
|
||||
__ andpd(xmm2, xmm0);
|
||||
@@ -297,8 +305,6 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ andl(rdx, rax);
|
||||
__ shrl(rdx, 8);
|
||||
__ shrq(r9, 8);
|
||||
__ cmpl(rdx, 0);
|
||||
__ jcc(Assembler::equal, L_2TAG_PACKET_3_0_1); // Branch only if |x| is zero
|
||||
__ andpd(xmm2, xmm0);
|
||||
__ andpd(xmm0, xmm5);
|
||||
__ orpd(xmm3, xmm2);
|
||||
@@ -322,41 +328,10 @@ address StubGenerator::generate_libmCbrt() {
|
||||
__ psllq(xmm7, 52);
|
||||
__ jmp(L_2TAG_PACKET_2_0_1);
|
||||
|
||||
__ bind(L_2TAG_PACKET_3_0_1);
|
||||
__ cmpq(r9, 0);
|
||||
__ jcc(Assembler::notEqual, L_2TAG_PACKET_4_0_1); // Branch only if x is negative zero
|
||||
__ xorpd(xmm0, xmm0);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_4_0_1);
|
||||
__ movsd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_1_0_1);
|
||||
__ movl(rax, Address(rsp, 4));
|
||||
__ movl(rdx, Address(rsp));
|
||||
__ movl(rcx, rax);
|
||||
__ andl(rcx, 2147483647);
|
||||
__ cmpl(rcx, 2146435072);
|
||||
__ jcc(Assembler::above, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
|
||||
__ cmpl(rdx, 0);
|
||||
__ jcc(Assembler::notEqual, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
|
||||
__ cmpl(rax, 2146435072);
|
||||
__ jcc(Assembler::notEqual, L_2TAG_PACKET_6_0_1); // Branch only if x is negative INF
|
||||
__ movsd(xmm0, ExternalAddress(INF), r11 /*rscratch*/);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_6_0_1);
|
||||
__ movsd(xmm0, ExternalAddress(NEG_INF), r11 /*rscratch*/);
|
||||
__ jmp(B1_4);
|
||||
|
||||
__ bind(L_2TAG_PACKET_5_0_1);
|
||||
__ movsd(xmm0, Address(rsp));
|
||||
__ addsd(xmm0, xmm0);
|
||||
__ movq(Address(rsp, 8), xmm0);
|
||||
|
||||
__ bind(B1_4);
|
||||
__ addq(rsp, 24);
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(0);
|
||||
|
||||
|
||||
@@ -440,7 +440,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits apx_f
|
||||
__ jcc(Assembler::equal, vector_save_restore);
|
||||
|
||||
#ifndef PRODUCT
|
||||
bool save_apx = UseAPX;
|
||||
VM_Version::set_apx_cpuFeatures();
|
||||
UseAPX = true;
|
||||
@@ -457,7 +456,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
|
||||
__ movq(Address(rsi, 8), r31);
|
||||
|
||||
UseAPX = save_apx;
|
||||
#endif
|
||||
__ bind(vector_save_restore);
|
||||
//
|
||||
// Check if OS has enabled XGETBV instruction to access XCR0
|
||||
@@ -1022,8 +1020,6 @@ void VM_Version::get_processor_features() {
|
||||
if (UseAPX && !apx_supported) {
|
||||
warning("UseAPX is not supported on this CPU, setting it to false");
|
||||
FLAG_SET_DEFAULT(UseAPX, false);
|
||||
} else if (FLAG_IS_DEFAULT(UseAPX)) {
|
||||
FLAG_SET_DEFAULT(UseAPX, apx_supported ? true : false);
|
||||
}
|
||||
|
||||
if (!UseAPX) {
|
||||
@@ -2111,7 +2107,7 @@ bool VM_Version::is_intel_cascade_lake() {
|
||||
// has improved implementation of 64-byte load/stores and so the default
|
||||
// threshold is set to 0 for these platforms.
|
||||
int VM_Version::avx3_threshold() {
|
||||
return (is_intel_family_core() &&
|
||||
return (is_intel_server_family() &&
|
||||
supports_serialize() &&
|
||||
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
|
||||
}
|
||||
@@ -3151,17 +3147,11 @@ bool VM_Version::os_supports_apx_egprs() {
|
||||
if (!supports_apx_f()) {
|
||||
return false;
|
||||
}
|
||||
// Enable APX support for product builds after
|
||||
// completion of planned features listed in JDK-8329030.
|
||||
#if !defined(PRODUCT)
|
||||
if (_cpuid_info.apx_save[0] != egpr_test_value() ||
|
||||
_cpuid_info.apx_save[1] != egpr_test_value()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
uint VM_Version::cores_per_cpu() {
|
||||
|
||||
@@ -10527,7 +10527,8 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
|
||||
// Xor Register with Immediate
|
||||
instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseAPX);
|
||||
// Strict predicate check to make selection of xorI_rReg_im1 cost agnostic if immI src is -1.
|
||||
predicate(!UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI dst src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -10541,7 +10542,8 @@ instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
|
||||
|
||||
instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
// Strict predicate check to make selection of xorI_rReg_im1_ndd cost agnostic if immI src2 is -1.
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
|
||||
match(Set dst (XorI src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -10559,6 +10561,7 @@ instruct xorI_rReg_mem_imm_ndd(rRegI dst, memory src1, immI src2, rFlagsReg cr)
|
||||
predicate(UseAPX);
|
||||
match(Set dst (XorI (LoadI src1) src2));
|
||||
effect(KILL cr);
|
||||
ins_cost(150);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
|
||||
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
|
||||
@@ -11201,7 +11204,8 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
|
||||
// Xor Register with Immediate
|
||||
instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
|
||||
%{
|
||||
predicate(!UseAPX);
|
||||
// Strict predicate check to make selection of xorL_rReg_im1 cost agnostic if immL32 src is -1.
|
||||
predicate(!UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL dst src));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -11215,7 +11219,8 @@ instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
|
||||
|
||||
instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
|
||||
%{
|
||||
predicate(UseAPX);
|
||||
// Strict predicate check to make selection of xorL_rReg_im1_ndd cost agnostic if immL32 src2 is -1.
|
||||
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
|
||||
match(Set dst (XorL src1 src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
@@ -11234,6 +11239,7 @@ instruct xorL_rReg_mem_imm(rRegL dst, memory src1, immL32 src2, rFlagsReg cr)
|
||||
match(Set dst (XorL (LoadL src1) src2));
|
||||
effect(KILL cr);
|
||||
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
|
||||
ins_cost(150);
|
||||
|
||||
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
|
||||
ins_encode %{
|
||||
|
||||
@@ -2623,7 +2623,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT)
|
||||
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
|
||||
VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
|
||||
// Verify that OS save/restore APX registers.
|
||||
@@ -2631,7 +2630,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
|
||||
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr_apx());
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
|
||||
if (VMError::was_assert_poison_crash(exception_record)) {
|
||||
|
||||
@@ -429,13 +429,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
stub = VM_Version::cpuinfo_cont_addr();
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT) && defined(_LP64)
|
||||
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
|
||||
// Verify that OS save/restore APX registers.
|
||||
stub = VM_Version::cpuinfo_cont_addr_apx();
|
||||
VM_Version::clear_apx_test_state();
|
||||
}
|
||||
#endif
|
||||
|
||||
// We test if stub is already set (by the stack overflow code
|
||||
// above) so it is not overwritten by the code that follows. This
|
||||
|
||||
@@ -255,13 +255,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
|
||||
stub = VM_Version::cpuinfo_cont_addr();
|
||||
}
|
||||
|
||||
#if !defined(PRODUCT) && defined(_LP64)
|
||||
if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
|
||||
// Verify that OS save/restore APX registers.
|
||||
stub = VM_Version::cpuinfo_cont_addr_apx();
|
||||
VM_Version::clear_apx_test_state();
|
||||
}
|
||||
#endif
|
||||
|
||||
if (thread->thread_state() == _thread_in_Java) {
|
||||
// Java thread running in Java code => find exception handler if any
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -187,7 +187,13 @@ class ValueNumberingVisitor: public InstructionVisitor {
|
||||
void do_Convert (Convert* x) { /* nothing to do */ }
|
||||
void do_NullCheck (NullCheck* x) { /* nothing to do */ }
|
||||
void do_TypeCast (TypeCast* x) { /* nothing to do */ }
|
||||
void do_NewInstance (NewInstance* x) { /* nothing to do */ }
|
||||
void do_NewInstance (NewInstance* x) {
|
||||
ciInstanceKlass* c = x->klass();
|
||||
if (c != nullptr && !c->is_initialized() &&
|
||||
(!c->is_loaded() || c->has_class_initializer())) {
|
||||
kill_memory();
|
||||
}
|
||||
}
|
||||
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }
|
||||
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }
|
||||
void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }
|
||||
|
||||
@@ -147,7 +147,7 @@
|
||||
product(bool, AOTVerifyTrainingData, trueInDebug, DIAGNOSTIC, \
|
||||
"Verify archived training data") \
|
||||
\
|
||||
product(bool, AOTCompileEagerly, false, DIAGNOSTIC, \
|
||||
product(bool, AOTCompileEagerly, false, EXPERIMENTAL, \
|
||||
"Compile methods as soon as possible") \
|
||||
\
|
||||
/* AOT Code flags */ \
|
||||
|
||||
@@ -837,11 +837,10 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
|
||||
struct stat st;
|
||||
if (os::stat(AOTCache, &st) != 0) {
|
||||
tty->print_cr("AOTCache creation failed: %s", AOTCache);
|
||||
vm_exit(0);
|
||||
} else {
|
||||
tty->print_cr("AOTCache creation is complete: %s " INT64_FORMAT " bytes", AOTCache, (int64_t)(st.st_size));
|
||||
vm_exit(0);
|
||||
}
|
||||
vm_direct_exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -549,6 +549,11 @@ bool ciInstanceKlass::compute_has_trusted_loader() {
|
||||
return java_lang_ClassLoader::is_trusted_loader(loader_oop);
|
||||
}
|
||||
|
||||
bool ciInstanceKlass::has_class_initializer() {
|
||||
VM_ENTRY_MARK;
|
||||
return get_instanceKlass()->class_initializer() != nullptr;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciInstanceKlass::find_method
|
||||
//
|
||||
|
||||
@@ -231,6 +231,8 @@ public:
|
||||
ciInstanceKlass* unique_concrete_subklass();
|
||||
bool has_finalizable_subclass();
|
||||
|
||||
bool has_class_initializer();
|
||||
|
||||
bool contains_field_offset(int offset);
|
||||
|
||||
// Get the instance of java.lang.Class corresponding to
|
||||
|
||||
@@ -3738,6 +3738,7 @@ void ClassFileParser::apply_parsed_class_metadata(
|
||||
_cp->set_pool_holder(this_klass);
|
||||
this_klass->set_constants(_cp);
|
||||
this_klass->set_fieldinfo_stream(_fieldinfo_stream);
|
||||
this_klass->set_fieldinfo_search_table(_fieldinfo_search_table);
|
||||
this_klass->set_fields_status(_fields_status);
|
||||
this_klass->set_methods(_methods);
|
||||
this_klass->set_inner_classes(_inner_classes);
|
||||
@@ -3747,6 +3748,8 @@ void ClassFileParser::apply_parsed_class_metadata(
|
||||
this_klass->set_permitted_subclasses(_permitted_subclasses);
|
||||
this_klass->set_record_components(_record_components);
|
||||
|
||||
DEBUG_ONLY(FieldInfoStream::validate_search_table(_cp, _fieldinfo_stream, _fieldinfo_search_table));
|
||||
|
||||
// Delay the setting of _local_interfaces and _transitive_interfaces until after
|
||||
// initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could
|
||||
// be shared with _transitive_interfaces and _transitive_interfaces may be shared with
|
||||
@@ -5054,6 +5057,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
|
||||
// note that is not safe to use the fields in the parser from this point on
|
||||
assert(nullptr == _cp, "invariant");
|
||||
assert(nullptr == _fieldinfo_stream, "invariant");
|
||||
assert(nullptr == _fieldinfo_search_table, "invariant");
|
||||
assert(nullptr == _fields_status, "invariant");
|
||||
assert(nullptr == _methods, "invariant");
|
||||
assert(nullptr == _inner_classes, "invariant");
|
||||
@@ -5274,6 +5278,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
|
||||
_super_klass(),
|
||||
_cp(nullptr),
|
||||
_fieldinfo_stream(nullptr),
|
||||
_fieldinfo_search_table(nullptr),
|
||||
_fields_status(nullptr),
|
||||
_methods(nullptr),
|
||||
_inner_classes(nullptr),
|
||||
@@ -5350,6 +5355,7 @@ void ClassFileParser::clear_class_metadata() {
|
||||
// deallocated if classfile parsing returns an error.
|
||||
_cp = nullptr;
|
||||
_fieldinfo_stream = nullptr;
|
||||
_fieldinfo_search_table = nullptr;
|
||||
_fields_status = nullptr;
|
||||
_methods = nullptr;
|
||||
_inner_classes = nullptr;
|
||||
@@ -5372,6 +5378,7 @@ ClassFileParser::~ClassFileParser() {
|
||||
if (_fieldinfo_stream != nullptr) {
|
||||
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_stream);
|
||||
}
|
||||
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_search_table);
|
||||
|
||||
if (_fields_status != nullptr) {
|
||||
MetadataFactory::free_array<FieldStatus>(_loader_data, _fields_status);
|
||||
@@ -5772,6 +5779,7 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
|
||||
_fieldinfo_stream =
|
||||
FieldInfoStream::create_FieldInfoStream(_temp_field_info, _java_fields_count,
|
||||
injected_fields_count, loader_data(), CHECK);
|
||||
_fieldinfo_search_table = FieldInfoStream::create_search_table(_cp, _fieldinfo_stream, _loader_data, CHECK);
|
||||
_fields_status =
|
||||
MetadataFactory::new_array<FieldStatus>(_loader_data, _temp_field_info->length(),
|
||||
FieldStatus(0), CHECK);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -123,6 +123,7 @@ class ClassFileParser {
|
||||
const InstanceKlass* _super_klass;
|
||||
ConstantPool* _cp;
|
||||
Array<u1>* _fieldinfo_stream;
|
||||
Array<u1>* _fieldinfo_search_table;
|
||||
Array<FieldStatus>* _fields_status;
|
||||
Array<Method*>* _methods;
|
||||
Array<u2>* _inner_classes;
|
||||
|
||||
@@ -301,7 +301,7 @@ void FieldLayout::reconstruct_layout(const InstanceKlass* ik, bool& has_instance
|
||||
BasicType last_type;
|
||||
int last_offset = -1;
|
||||
while (ik != nullptr) {
|
||||
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
|
||||
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
|
||||
BasicType type = Signature::basic_type(fs.signature());
|
||||
// distinction between static and non-static fields is missing
|
||||
if (fs.access_flags().is_static()) continue;
|
||||
@@ -461,7 +461,7 @@ void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlas
|
||||
bool found = false;
|
||||
const InstanceKlass* ik = super;
|
||||
while (!found && ik != nullptr) {
|
||||
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
|
||||
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
|
||||
if (fs.offset() == b->offset()) {
|
||||
output->print_cr(" @%d \"%s\" %s %d/%d %s",
|
||||
b->offset(),
|
||||
|
||||
@@ -967,6 +967,13 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
|
||||
Array<u1>* new_fis = FieldInfoStream::create_FieldInfoStream(fields, java_fields, injected_fields, k->class_loader_data(), CHECK);
|
||||
ik->set_fieldinfo_stream(new_fis);
|
||||
MetadataFactory::free_array<u1>(k->class_loader_data(), old_stream);
|
||||
|
||||
Array<u1>* old_table = ik->fieldinfo_search_table();
|
||||
Array<u1>* search_table = FieldInfoStream::create_search_table(ik->constants(), new_fis, k->class_loader_data(), CHECK);
|
||||
ik->set_fieldinfo_search_table(search_table);
|
||||
MetadataFactory::free_array<u1>(k->class_loader_data(), old_table);
|
||||
|
||||
DEBUG_ONLY(FieldInfoStream::validate_search_table(ik->constants(), new_fis, search_table));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -132,8 +132,16 @@ bool StackMapTable::match_stackmap(
|
||||
}
|
||||
|
||||
void StackMapTable::check_jump_target(
|
||||
StackMapFrame* frame, int32_t target, TRAPS) const {
|
||||
StackMapFrame* frame, int bci, int offset, TRAPS) const {
|
||||
ErrorContext ctx;
|
||||
// Jump targets must be within the method and the method size is limited. See JVMS 4.11
|
||||
int min_offset = -1 * max_method_code_size;
|
||||
if (offset < min_offset || offset > max_method_code_size) {
|
||||
frame->verifier()->verify_error(ErrorContext::bad_stackmap(bci, frame),
|
||||
"Illegal target of jump or branch (bci %d + offset %d)", bci, offset);
|
||||
return;
|
||||
}
|
||||
int target = bci + offset;
|
||||
bool match = match_stackmap(
|
||||
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
|
||||
if (!match || (target < 0 || target >= _code_length)) {
|
||||
|
||||
@@ -67,7 +67,7 @@ class StackMapTable : public StackObj {
|
||||
|
||||
// Check jump instructions. Make sure there are no uninitialized
|
||||
// instances on backward branch.
|
||||
void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
|
||||
void check_jump_target(StackMapFrame* frame, int bci, int offset, TRAPS) const;
|
||||
|
||||
// The following methods are only used inside this class.
|
||||
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "gc/shared/oopStorage.inline.hpp"
|
||||
#include "gc/shared/oopStorageSet.hpp"
|
||||
@@ -115,6 +116,7 @@ OopStorage* StringTable::_oop_storage;
|
||||
|
||||
static size_t _current_size = 0;
|
||||
static volatile size_t _items_count = 0;
|
||||
DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
|
||||
|
||||
volatile bool _alt_hash = false;
|
||||
|
||||
@@ -346,6 +348,10 @@ bool StringTable::has_work() {
|
||||
return Atomic::load_acquire(&_has_work);
|
||||
}
|
||||
|
||||
size_t StringTable::items_count_acquire() {
|
||||
return Atomic::load_acquire(&_items_count);
|
||||
}
|
||||
|
||||
void StringTable::trigger_concurrent_work() {
|
||||
// Avoid churn on ServiceThread
|
||||
if (!has_work()) {
|
||||
@@ -504,6 +510,9 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
|
||||
}
|
||||
|
||||
oop StringTable::intern(const StringWrapper& name, TRAPS) {
|
||||
assert(!Atomic::load_acquire(&_disable_interning_during_cds_dump),
|
||||
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
|
||||
|
||||
// shared table always uses java_lang_String::hash_code
|
||||
unsigned int hash = hash_wrapped_string(name);
|
||||
oop found_string = lookup_shared(name, hash);
|
||||
@@ -793,7 +802,7 @@ void StringTable::verify() {
|
||||
}
|
||||
|
||||
// Verification and comp
|
||||
class VerifyCompStrings : StackObj {
|
||||
class StringTable::VerifyCompStrings : StackObj {
|
||||
static unsigned string_hash(oop const& str) {
|
||||
return java_lang_String::hash_code_noupdate(str);
|
||||
}
|
||||
@@ -805,7 +814,7 @@ class VerifyCompStrings : StackObj {
|
||||
string_hash, string_equals> _table;
|
||||
public:
|
||||
size_t _errors;
|
||||
VerifyCompStrings() : _table(unsigned(_items_count / 8) + 1, 0 /* do not resize */), _errors(0) {}
|
||||
VerifyCompStrings() : _table(unsigned(items_count_acquire() / 8) + 1, 0 /* do not resize */), _errors(0) {}
|
||||
bool operator()(WeakHandle* val) {
|
||||
oop s = val->resolve();
|
||||
if (s == nullptr) {
|
||||
@@ -939,20 +948,31 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
|
||||
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
|
||||
}
|
||||
|
||||
// This is called BEFORE we enter the CDS safepoint. We can allocate heap objects.
|
||||
// This should be called when we know no more strings will be added (which will be easy
|
||||
// to guarantee because CDS runs with a single Java thread. See JDK-8253495.)
|
||||
// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
|
||||
// be used by the shared strings table.
|
||||
void StringTable::allocate_shared_strings_array(TRAPS) {
|
||||
if (!CDSConfig::is_dumping_heap()) {
|
||||
return;
|
||||
}
|
||||
assert(CDSConfig::allow_only_single_java_thread(), "No more interned strings can be added");
|
||||
|
||||
if (_items_count > (size_t)max_jint) {
|
||||
fatal("Too many strings to be archived: %zu", _items_count);
|
||||
CompileBroker::wait_for_no_active_tasks();
|
||||
|
||||
precond(CDSConfig::allow_only_single_java_thread());
|
||||
|
||||
// At this point, no more strings will be added:
|
||||
// - There's only a single Java thread (this thread). It no longer executes Java bytecodes
|
||||
// so JIT compilation will eventually stop.
|
||||
// - CompileBroker has no more active tasks, so all JIT requests have been processed.
|
||||
|
||||
// This flag will be cleared after intern table dumping has completed, so we can run the
|
||||
// compiler again (for future AOT method compilation, etc).
|
||||
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, true));
|
||||
|
||||
if (items_count_acquire() > (size_t)max_jint) {
|
||||
fatal("Too many strings to be archived: %zu", items_count_acquire());
|
||||
}
|
||||
|
||||
int total = (int)_items_count;
|
||||
int total = (int)items_count_acquire();
|
||||
size_t single_array_size = objArrayOopDesc::object_size(total);
|
||||
|
||||
log_info(aot)("allocated string table for %d strings", total);
|
||||
@@ -972,7 +992,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
|
||||
// This can only happen if you have an extremely large number of classes that
|
||||
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
|
||||
// but bail out for safety.
|
||||
log_error(aot)("Too many strings to be archived: %zu", _items_count);
|
||||
log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
}
|
||||
|
||||
@@ -1070,7 +1090,7 @@ oop StringTable::init_shared_strings_array() {
|
||||
|
||||
void StringTable::write_shared_table() {
|
||||
_shared_table.reset();
|
||||
CompactHashtableWriter writer((int)_items_count, ArchiveBuilder::string_stats());
|
||||
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
|
||||
|
||||
int index = 0;
|
||||
auto copy_into_shared_table = [&] (WeakHandle* val) {
|
||||
@@ -1084,6 +1104,8 @@ void StringTable::write_shared_table() {
|
||||
};
|
||||
_local_table->do_safepoint_scan(copy_into_shared_table);
|
||||
writer.dump(&_shared_table, "string");
|
||||
|
||||
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, false));
|
||||
}
|
||||
|
||||
void StringTable::set_shared_strings_array_index(int root_index) {
|
||||
|
||||
@@ -40,7 +40,7 @@ class StringTableConfig;
|
||||
|
||||
class StringTable : AllStatic {
|
||||
friend class StringTableConfig;
|
||||
|
||||
class VerifyCompStrings;
|
||||
static volatile bool _has_work;
|
||||
|
||||
// Set if one bucket is out of balance due to hash algorithm deficiency
|
||||
@@ -74,6 +74,7 @@ private:
|
||||
|
||||
static void item_added();
|
||||
static void item_removed();
|
||||
static size_t items_count_acquire();
|
||||
|
||||
static oop intern(const StringWrapper& name, TRAPS);
|
||||
static oop do_intern(const StringWrapper& name, uintx hash, TRAPS);
|
||||
|
||||
@@ -781,7 +781,6 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
|
||||
// Merge with the next instruction
|
||||
{
|
||||
int target;
|
||||
VerificationType type, type2;
|
||||
VerificationType atype;
|
||||
|
||||
@@ -1606,9 +1605,8 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
case Bytecodes::_ifle:
|
||||
current_frame.pop_stack(
|
||||
VerificationType::integer_type(), CHECK_VERIFY(this));
|
||||
target = bcs.dest();
|
||||
stackmap_table.check_jump_target(
|
||||
¤t_frame, target, CHECK_VERIFY(this));
|
||||
¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
|
||||
no_control_flow = false; break;
|
||||
case Bytecodes::_if_acmpeq :
|
||||
case Bytecodes::_if_acmpne :
|
||||
@@ -1619,19 +1617,16 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
|
||||
case Bytecodes::_ifnonnull :
|
||||
current_frame.pop_stack(
|
||||
VerificationType::reference_check(), CHECK_VERIFY(this));
|
||||
target = bcs.dest();
|
||||
stackmap_table.check_jump_target
|
||||
(¤t_frame, target, CHECK_VERIFY(this));
|
||||
(¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
|
||||
no_control_flow = false; break;
|
||||
case Bytecodes::_goto :
|
||||
target = bcs.dest();
|
||||
stackmap_table.check_jump_target(
|
||||
¤t_frame, target, CHECK_VERIFY(this));
|
||||
¤t_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
|
||||
no_control_flow = true; break;
|
||||
case Bytecodes::_goto_w :
|
||||
target = bcs.dest_w();
|
||||
stackmap_table.check_jump_target(
|
||||
¤t_frame, target, CHECK_VERIFY(this));
|
||||
¤t_frame, bcs.bci(), bcs.get_offset_s4(), CHECK_VERIFY(this));
|
||||
no_control_flow = true; break;
|
||||
case Bytecodes::_tableswitch :
|
||||
case Bytecodes::_lookupswitch :
|
||||
@@ -2280,15 +2275,14 @@ void ClassVerifier::verify_switch(
|
||||
}
|
||||
}
|
||||
}
|
||||
int target = bci + default_offset;
|
||||
stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
|
||||
stackmap_table->check_jump_target(current_frame, bci, default_offset, CHECK_VERIFY(this));
|
||||
for (int i = 0; i < keys; i++) {
|
||||
// Because check_jump_target() may safepoint, the bytecode could have
|
||||
// moved, which means 'aligned_bcp' is no good and needs to be recalculated.
|
||||
aligned_bcp = align_up(bcs->bcp() + 1, jintSize);
|
||||
target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
stackmap_table->check_jump_target(
|
||||
current_frame, target, CHECK_VERIFY(this));
|
||||
current_frame, bci, offset, CHECK_VERIFY(this));
|
||||
}
|
||||
NOT_PRODUCT(aligned_bcp = nullptr); // no longer valid at this point
|
||||
}
|
||||
@@ -2549,7 +2543,12 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
|
||||
|
||||
case Bytecodes::_goto:
|
||||
case Bytecodes::_goto_w: {
|
||||
int target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
|
||||
int offset = (opcode == Bytecodes::_goto ? bcs.get_offset_s2() : bcs.get_offset_s4());
|
||||
int min_offset = -1 * max_method_code_size;
|
||||
// Check offset for overflow
|
||||
if (offset < min_offset || offset > max_method_code_size) return false;
|
||||
|
||||
int target = bci + offset;
|
||||
if (visited_branches->contains(bci)) {
|
||||
if (bci_stack->is_empty()) {
|
||||
if (handler_stack->is_empty()) {
|
||||
@@ -2607,7 +2606,10 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
|
||||
|
||||
// Push the switch alternatives onto the stack.
|
||||
for (int i = 0; i < keys; i++) {
|
||||
int target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
int min_offset = -1 * max_method_code_size;
|
||||
int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
|
||||
if (offset < min_offset || offset > max_method_code_size) return false;
|
||||
int target = bci + offset;
|
||||
if (target > code_length) return false;
|
||||
bci_stack->push(target);
|
||||
}
|
||||
|
||||
@@ -344,6 +344,7 @@ AOTCodeCache::~AOTCodeCache() {
|
||||
_store_buffer = nullptr;
|
||||
}
|
||||
if (_table != nullptr) {
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
delete _table;
|
||||
_table = nullptr;
|
||||
}
|
||||
@@ -774,6 +775,9 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind
|
||||
// we need to take a lock to prevent race between compiler threads generating AOT code
|
||||
// and the main thread generating adapter
|
||||
MutexLocker ml(Compile_lock);
|
||||
if (!is_on()) {
|
||||
return false; // AOT code cache was already dumped and closed.
|
||||
}
|
||||
if (!cache->align_write()) {
|
||||
return false;
|
||||
}
|
||||
@@ -1434,6 +1438,9 @@ AOTCodeAddressTable::~AOTCodeAddressTable() {
|
||||
if (_extrs_addr != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(address, _extrs_addr);
|
||||
}
|
||||
if (_stubs_addr != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(address, _stubs_addr);
|
||||
}
|
||||
if (_shared_blobs_addr != nullptr) {
|
||||
FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
|
||||
}
|
||||
@@ -1485,6 +1492,7 @@ void AOTCodeCache::load_strings() {
|
||||
|
||||
int AOTCodeCache::store_strings() {
|
||||
if (_C_strings_used > 0) {
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
uint offset = _write_position;
|
||||
uint length = 0;
|
||||
uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
|
||||
@@ -1510,15 +1518,17 @@ int AOTCodeCache::store_strings() {
|
||||
|
||||
const char* AOTCodeCache::add_C_string(const char* str) {
|
||||
if (is_on_for_dump() && str != nullptr) {
|
||||
return _cache->_table->add_C_string(str);
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
AOTCodeAddressTable* table = addr_table();
|
||||
if (table != nullptr) {
|
||||
return table->add_C_string(str);
|
||||
}
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
const char* AOTCodeAddressTable::add_C_string(const char* str) {
|
||||
if (_extrs_complete) {
|
||||
LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock
|
||||
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
|
||||
// Check previous strings address
|
||||
for (int i = 0; i < _C_strings_count; i++) {
|
||||
if (_C_strings_in[i] == str) {
|
||||
@@ -1535,9 +1545,7 @@ const char* AOTCodeAddressTable::add_C_string(const char* str) {
|
||||
_C_strings_in[_C_strings_count] = str;
|
||||
const char* dup = os::strdup(str);
|
||||
_C_strings[_C_strings_count++] = dup;
|
||||
if (log.is_enabled()) {
|
||||
log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
|
||||
}
|
||||
log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
|
||||
return dup;
|
||||
} else {
|
||||
assert(false, "Number of C strings >= MAX_STR_COUNT");
|
||||
|
||||
@@ -136,6 +136,7 @@ private:
|
||||
public:
|
||||
AOTCodeAddressTable() :
|
||||
_extrs_addr(nullptr),
|
||||
_stubs_addr(nullptr),
|
||||
_shared_blobs_addr(nullptr),
|
||||
_C1_blobs_addr(nullptr),
|
||||
_extrs_length(0),
|
||||
|
||||
@@ -160,7 +160,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size
|
||||
}
|
||||
} else {
|
||||
// We need unique and valid not null address
|
||||
assert(_mutable_data = blob_end(), "sanity");
|
||||
assert(_mutable_data == blob_end(), "sanity");
|
||||
}
|
||||
|
||||
set_oop_maps(oop_maps);
|
||||
@@ -177,6 +177,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
|
||||
_code_offset(_content_offset),
|
||||
_data_offset(size),
|
||||
_frame_size(0),
|
||||
_mutable_data_size(0),
|
||||
S390_ONLY(_ctable_offset(0) COMMA)
|
||||
_header_size(header_size),
|
||||
_frame_complete_offset(CodeOffsets::frame_never_safe),
|
||||
@@ -185,7 +186,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
|
||||
{
|
||||
assert(is_aligned(size, oopSize), "unaligned size");
|
||||
assert(is_aligned(header_size, oopSize), "unaligned size");
|
||||
assert(_mutable_data = blob_end(), "sanity");
|
||||
assert(_mutable_data == blob_end(), "sanity");
|
||||
}
|
||||
|
||||
void CodeBlob::restore_mutable_data(address reloc_data) {
|
||||
@@ -195,8 +196,11 @@ void CodeBlob::restore_mutable_data(address reloc_data) {
|
||||
if (_mutable_data == nullptr) {
|
||||
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
|
||||
}
|
||||
} else {
|
||||
_mutable_data = blob_end(); // default value
|
||||
}
|
||||
if (_relocation_size > 0) {
|
||||
assert(_mutable_data_size > 0, "relocation is part of mutable data section");
|
||||
memcpy((address)relocation_begin(), reloc_data, relocation_size());
|
||||
}
|
||||
}
|
||||
@@ -206,6 +210,8 @@ void CodeBlob::purge() {
|
||||
if (_mutable_data != blob_end()) {
|
||||
os::free(_mutable_data);
|
||||
_mutable_data = blob_end(); // Valid not null address
|
||||
_mutable_data_size = 0;
|
||||
_relocation_size = 0;
|
||||
}
|
||||
if (_oop_maps != nullptr) {
|
||||
delete _oop_maps;
|
||||
|
||||
@@ -247,7 +247,7 @@ public:
|
||||
// Sizes
|
||||
int size() const { return _size; }
|
||||
int header_size() const { return _header_size; }
|
||||
int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); }
|
||||
int relocation_size() const { return _relocation_size; }
|
||||
int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); }
|
||||
int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); }
|
||||
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "code/dependencies.hpp"
|
||||
#include "code/nativeInst.hpp"
|
||||
#include "code/nmethod.inline.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compilationLog.hpp"
|
||||
@@ -1653,10 +1652,6 @@ void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
|
||||
}
|
||||
|
||||
void nmethod::print_nmethod(bool printmethod) {
|
||||
// Enter a critical section to prevent a race with deopts that patch code and updates the relocation info.
|
||||
// Unfortunately, we have to lock the NMethodState_lock before the tty lock due to the deadlock rules and
|
||||
// cannot lock in a more finely grained manner.
|
||||
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
|
||||
ttyLocker ttyl; // keep the following output all in one block
|
||||
if (xtty != nullptr) {
|
||||
xtty->begin_head("print_nmethod");
|
||||
@@ -2046,17 +2041,6 @@ bool nmethod::make_not_entrant(const char* reason) {
|
||||
// cache call.
|
||||
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
|
||||
SharedRuntime::get_handle_wrong_method_stub());
|
||||
|
||||
// Update the relocation info for the patched entry.
|
||||
// First, get the old relocation info...
|
||||
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
|
||||
if (iter.next() && iter.addr() == verified_entry_point()) {
|
||||
Relocation* old_reloc = iter.reloc();
|
||||
// ...then reset the iterator to update it.
|
||||
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
|
||||
relocInfo::change_reloc_info_for_address(&iter, verified_entry_point(), old_reloc->type(),
|
||||
relocInfo::relocType::runtime_call_type);
|
||||
}
|
||||
}
|
||||
|
||||
if (update_recompile_counts()) {
|
||||
@@ -2182,6 +2166,7 @@ void nmethod::purge(bool unregister_nmethod) {
|
||||
}
|
||||
CodeCache::unregister_old_nmethod(this);
|
||||
|
||||
JVMCI_ONLY( _metadata_size = 0; )
|
||||
CodeBlob::purge();
|
||||
}
|
||||
|
||||
|
||||
@@ -1750,6 +1750,10 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
|
||||
}
|
||||
}
|
||||
|
||||
void CompileBroker::wait_for_no_active_tasks() {
|
||||
CompileTask::wait_for_no_active_tasks();
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize compiler thread(s) + compiler object(s). The postcondition
|
||||
* of this function is that the compiler runtimes are initialized and that
|
||||
|
||||
@@ -383,6 +383,9 @@ public:
|
||||
static bool is_compilation_disabled_forever() {
|
||||
return _should_compile_new_jobs == shutdown_compilation;
|
||||
}
|
||||
|
||||
static void wait_for_no_active_tasks();
|
||||
|
||||
static void handle_full_code_cache(CodeBlobType code_blob_type);
|
||||
// Ensures that warning is only printed once.
|
||||
static bool should_print_compiler_warning() {
|
||||
|
||||
@@ -37,12 +37,13 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
CompileTask* CompileTask::_task_free_list = nullptr;
|
||||
int CompileTask::_active_tasks = 0;
|
||||
|
||||
/**
|
||||
* Allocate a CompileTask, from the free list if possible.
|
||||
*/
|
||||
CompileTask* CompileTask::allocate() {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
MonitorLocker locker(CompileTaskAlloc_lock);
|
||||
CompileTask* task = nullptr;
|
||||
|
||||
if (_task_free_list != nullptr) {
|
||||
@@ -56,6 +57,7 @@ CompileTask* CompileTask::allocate() {
|
||||
}
|
||||
assert(task->is_free(), "Task must be free.");
|
||||
task->set_is_free(false);
|
||||
_active_tasks++;
|
||||
return task;
|
||||
}
|
||||
|
||||
@@ -63,7 +65,7 @@ CompileTask* CompileTask::allocate() {
|
||||
* Add a task to the free list.
|
||||
*/
|
||||
void CompileTask::free(CompileTask* task) {
|
||||
MutexLocker locker(CompileTaskAlloc_lock);
|
||||
MonitorLocker locker(CompileTaskAlloc_lock);
|
||||
if (!task->is_free()) {
|
||||
if ((task->_method_holder != nullptr && JNIHandles::is_weak_global_handle(task->_method_holder))) {
|
||||
JNIHandles::destroy_weak_global(task->_method_holder);
|
||||
@@ -79,6 +81,17 @@ void CompileTask::free(CompileTask* task) {
|
||||
task->set_is_free(true);
|
||||
task->set_next(_task_free_list);
|
||||
_task_free_list = task;
|
||||
_active_tasks--;
|
||||
if (_active_tasks == 0) {
|
||||
locker.notify_all();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTask::wait_for_no_active_tasks() {
|
||||
MonitorLocker locker(CompileTaskAlloc_lock);
|
||||
while (_active_tasks > 0) {
|
||||
locker.wait();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -83,6 +83,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
|
||||
private:
|
||||
static CompileTask* _task_free_list;
|
||||
static int _active_tasks;
|
||||
int _compile_id;
|
||||
Method* _method;
|
||||
jobject _method_holder;
|
||||
@@ -123,6 +124,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
|
||||
static CompileTask* allocate();
|
||||
static void free(CompileTask* task);
|
||||
static void wait_for_no_active_tasks();
|
||||
|
||||
int compile_id() const { return _compile_id; }
|
||||
Method* method() const { return _method; }
|
||||
|
||||
@@ -625,6 +625,34 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
|
||||
}
|
||||
#endif
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
|
||||
Node* control) {
|
||||
return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
|
||||
phase->ctrl_or_self(maybe_load) == control;
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
|
||||
if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
|
||||
return;
|
||||
}
|
||||
Node* mem = maybe_store->in(MemNode::Memory);
|
||||
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = mem->fast_out(i);
|
||||
if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
|
||||
wq.push(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
|
||||
wq.push(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
|
||||
// That both nodes have the same control is not sufficient to prove
|
||||
// domination, verify that there's no path from d to n
|
||||
@@ -639,22 +667,9 @@ bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node*
|
||||
if (m->is_Phi() && m->in(0)->is_Loop()) {
|
||||
assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
|
||||
} else {
|
||||
if (m->is_Store() || m->is_LoadStore()) {
|
||||
// Take anti-dependencies into account
|
||||
Node* mem = m->in(MemNode::Memory);
|
||||
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = mem->fast_out(i);
|
||||
if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
|
||||
phase->ctrl_or_self(u) == c) {
|
||||
wq.push(u);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (uint i = 0; i < m->req(); i++) {
|
||||
if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
|
||||
wq.push(m->in(i));
|
||||
}
|
||||
}
|
||||
// Take anti-dependencies into account
|
||||
maybe_push_anti_dependent_loads(phase, m, c, wq);
|
||||
push_data_inputs_at_control(phase, m, c, wq);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
@@ -1006,7 +1021,20 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
|
||||
phase->register_new_node(val, ctrl);
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
|
||||
void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
|
||||
nodes_above_barrier.clear();
|
||||
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
|
||||
nodes_above_barrier.push(init_raw_mem);
|
||||
}
|
||||
for (uint next = 0; next < nodes_above_barrier.size(); next++) {
|
||||
Node* n = nodes_above_barrier.at(next);
|
||||
// Take anti-dependencies into account
|
||||
maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
|
||||
push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
|
||||
Node* ctrl = phase->get_ctrl(barrier);
|
||||
Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
|
||||
|
||||
@@ -1017,30 +1045,17 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
|
||||
// control will be after the expanded barrier. The raw memory (if
|
||||
// its memory is control dependent on the barrier's input control)
|
||||
// must stay above the barrier.
|
||||
uses_to_ignore.clear();
|
||||
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
|
||||
uses_to_ignore.push(init_raw_mem);
|
||||
}
|
||||
for (uint next = 0; next < uses_to_ignore.size(); next++) {
|
||||
Node *n = uses_to_ignore.at(next);
|
||||
for (uint i = 0; i < n->req(); i++) {
|
||||
Node* in = n->in(i);
|
||||
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
|
||||
uses_to_ignore.push(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
|
||||
for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
|
||||
Node* u = ctrl->fast_out(i);
|
||||
if (u->_idx < last &&
|
||||
u != barrier &&
|
||||
!u->depends_only_on_test() && // preserve dependency on test
|
||||
!uses_to_ignore.member(u) &&
|
||||
!nodes_above_barrier.member(u) &&
|
||||
(u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
|
||||
(ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
|
||||
Node* old_c = phase->ctrl_or_self(u);
|
||||
Node* c = old_c;
|
||||
if (c != ctrl ||
|
||||
if (old_c != ctrl ||
|
||||
is_dominator_same_ctrl(old_c, barrier, u, phase) ||
|
||||
ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
|
||||
phase->igvn().rehash_node_delayed(u);
|
||||
@@ -1315,7 +1330,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
|
||||
// Expand load-reference-barriers
|
||||
MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
|
||||
Unique_Node_List uses_to_ignore;
|
||||
Unique_Node_List nodes_above_barriers;
|
||||
for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
|
||||
ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
|
||||
uint last = phase->C->unique();
|
||||
@@ -1410,7 +1425,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
|
||||
Node* out_val = val_phi;
|
||||
phase->register_new_node(val_phi, region);
|
||||
|
||||
fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
|
||||
fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
|
||||
|
||||
ctrl = orig_ctrl;
|
||||
|
||||
|
||||
@@ -62,8 +62,12 @@ private:
|
||||
PhaseIdealLoop* phase, int flags);
|
||||
static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
|
||||
DecoratorSet decorators, PhaseIdealLoop* phase);
|
||||
|
||||
static void collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl,
|
||||
Node* init_raw_mem);
|
||||
|
||||
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
|
||||
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase);
|
||||
|
||||
static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb);
|
||||
public:
|
||||
@@ -76,6 +80,11 @@ public:
|
||||
static bool expand(Compile* C, PhaseIterGVN& igvn);
|
||||
static void pin_and_expand(PhaseIdealLoop* phase);
|
||||
|
||||
static void push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl,
|
||||
Unique_Node_List &wq);
|
||||
static bool is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store, Node* control);
|
||||
|
||||
static void maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq);
|
||||
#ifdef ASSERT
|
||||
static void verify(RootNode* root);
|
||||
#endif
|
||||
|
||||
@@ -415,10 +415,6 @@ void ShenandoahConcurrentGC::entry_reset() {
|
||||
msg);
|
||||
op_reset();
|
||||
}
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
heap->old_generation()->card_scan()->mark_read_table_as_clean();
|
||||
}
|
||||
}
|
||||
|
||||
void ShenandoahConcurrentGC::entry_scan_remembered_set() {
|
||||
@@ -644,6 +640,10 @@ void ShenandoahConcurrentGC::op_reset() {
|
||||
} else {
|
||||
_generation->prepare_gc();
|
||||
}
|
||||
|
||||
if (heap->mode()->is_generational()) {
|
||||
heap->old_generation()->card_scan()->mark_read_table_as_clean();
|
||||
}
|
||||
}
|
||||
|
||||
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
|
||||
|
||||
@@ -136,9 +136,15 @@ void ShenandoahDegenGC::op_degenerated() {
|
||||
heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
|
||||
(!heap->mode()->is_generational() || _generation->is_global()));
|
||||
|
||||
if (heap->mode()->is_generational() && _generation->is_young()) {
|
||||
// Swap remembered sets for young
|
||||
_generation->swap_card_tables();
|
||||
if (heap->mode()->is_generational()) {
|
||||
// Clean the read table before swapping it. The end goal here is to have a clean
|
||||
// write table, and to have the read table updated with the previous write table.
|
||||
heap->old_generation()->card_scan()->mark_read_table_as_clean();
|
||||
|
||||
if (_generation->is_young()) {
|
||||
// Swap remembered sets for young
|
||||
_generation->swap_card_tables();
|
||||
}
|
||||
}
|
||||
|
||||
case _degenerated_roots:
|
||||
|
||||
@@ -183,6 +183,29 @@ void ShenandoahGenerationalHeap::stop() {
|
||||
regulator_thread()->stop();
|
||||
}
|
||||
|
||||
bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
|
||||
if (is_idle()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
|
||||
// We are marking young, this object is in young, and it is below the TAMS
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_in_old(obj)) {
|
||||
// Card marking barriers are required for objects in the old generation
|
||||
return true;
|
||||
}
|
||||
|
||||
if (has_forwarded_objects()) {
|
||||
// Object may have pointers that need to be updated
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
|
||||
ShenandoahRegionIterator regions;
|
||||
ShenandoahGenerationalEvacuationTask task(this, ®ions, concurrent, false /* only promote regions */);
|
||||
|
||||
@@ -128,6 +128,8 @@ public:
|
||||
|
||||
void stop() override;
|
||||
|
||||
bool requires_barriers(stackChunkOop obj) const override;
|
||||
|
||||
// Used for logging the result of a region transfer outside the heap lock
|
||||
struct TransferResult {
|
||||
bool success;
|
||||
|
||||
@@ -1452,27 +1452,23 @@ void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
|
||||
size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
|
||||
assert(start->is_humongous_start(), "reclaim regions starting with the first one");
|
||||
|
||||
oop humongous_obj = cast_to_oop(start->bottom());
|
||||
size_t size = humongous_obj->size();
|
||||
size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
|
||||
size_t index = start->index() + required_regions - 1;
|
||||
|
||||
assert(!start->has_live(), "liveness must be zero");
|
||||
|
||||
for(size_t i = 0; i < required_regions; i++) {
|
||||
// Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
|
||||
// as it expects that every region belongs to a humongous region starting with a humongous start region.
|
||||
ShenandoahHeapRegion* region = get_region(index --);
|
||||
|
||||
assert(region->is_humongous(), "expect correct humongous start or continuation");
|
||||
// Do not try to get the size of this humongous object. STW collections will
|
||||
// have already unloaded classes, so an unmarked object may have a bad klass pointer.
|
||||
ShenandoahHeapRegion* region = start;
|
||||
size_t index = region->index();
|
||||
do {
|
||||
assert(region->is_humongous(), "Expect correct humongous start or continuation");
|
||||
assert(!region->is_cset(), "Humongous region should not be in collection set");
|
||||
|
||||
region->make_trash_immediate();
|
||||
}
|
||||
return required_regions;
|
||||
region = get_region(++index);
|
||||
} while (region != nullptr && region->is_humongous_continuation());
|
||||
|
||||
// Return number of regions trashed
|
||||
return index - start->index();
|
||||
}
|
||||
|
||||
class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
|
||||
|
||||
@@ -828,7 +828,7 @@ public:
|
||||
static inline void atomic_clear_oop(narrowOop* addr, oop compare);
|
||||
static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
|
||||
|
||||
size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
|
||||
size_t trash_humongous_region_at(ShenandoahHeapRegion *r) const;
|
||||
|
||||
static inline void increase_object_age(oop obj, uint additional_age);
|
||||
|
||||
|
||||
@@ -624,7 +624,7 @@ void ShenandoahDirectCardMarkRememberedSet::swap_card_tables() {
|
||||
|
||||
#ifdef ASSERT
|
||||
CardValue* start_bp = &(_card_table->write_byte_map())[0];
|
||||
CardValue* end_bp = &(new_ptr)[_card_table->last_valid_index()];
|
||||
CardValue* end_bp = &(start_bp[_card_table->last_valid_index()]);
|
||||
|
||||
while (start_bp <= end_bp) {
|
||||
assert(*start_bp == CardTable::clean_card_val(), "Should be clean: " PTR_FORMAT, p2i(start_bp));
|
||||
|
||||
@@ -1936,7 +1936,7 @@ void ZPageAllocator::cleanup_failed_commit_multi_partition(ZMultiPartitionAlloca
|
||||
}
|
||||
|
||||
const size_t committed = allocation->committed_capacity();
|
||||
const ZVirtualMemory non_harvested_vmem = vmem.last_part(allocation->harvested());
|
||||
const ZVirtualMemory non_harvested_vmem = partial_vmem.last_part(allocation->harvested());
|
||||
const ZVirtualMemory committed_vmem = non_harvested_vmem.first_part(committed);
|
||||
const ZVirtualMemory non_committed_vmem = non_harvested_vmem.last_part(committed);
|
||||
|
||||
|
||||
@@ -214,9 +214,20 @@ void ZPhysicalMemoryManager::free(const ZVirtualMemory& vmem, uint32_t numa_id)
|
||||
});
|
||||
}
|
||||
|
||||
static size_t inject_commit_limit(const ZVirtualMemory& vmem) {
|
||||
// To facilitate easier interoperability with multi partition allocations we
|
||||
// divide by ZNUMA::count(). Users of ZFailLargerCommits need to be aware of
|
||||
// this when writing tests. In the future we could probe the VirtualMemoryManager
|
||||
// and condition this division on whether the vmem is in the multi partition
|
||||
// address space.
|
||||
return align_up(MIN2(ZFailLargerCommits / ZNUMA::count(), vmem.size()), ZGranuleSize);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryManager::commit(const ZVirtualMemory& vmem, uint32_t numa_id) {
|
||||
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
const size_t size = ZFailLargerCommits > 0
|
||||
? inject_commit_limit(vmem)
|
||||
: vmem.size();
|
||||
|
||||
size_t total_committed = 0;
|
||||
|
||||
|
||||
@@ -118,6 +118,11 @@
|
||||
develop(bool, ZVerifyOops, false, \
|
||||
"Verify accessed oops") \
|
||||
\
|
||||
develop(size_t, ZFailLargerCommits, 0, \
|
||||
"Commits larger than ZFailLargerCommits will be truncated, " \
|
||||
"used to stress page allocation commit failure paths " \
|
||||
"(0: Disabled)") \
|
||||
\
|
||||
develop(uint, ZFakeNUMA, 1, \
|
||||
"ZFakeNUMA is used to test the internal NUMA memory support " \
|
||||
"without the need for UseNUMA") \
|
||||
|
||||
@@ -100,8 +100,23 @@ class BaseBytecodeStream: StackObj {
|
||||
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
|
||||
|
||||
// Bytecode-specific attributes
|
||||
int dest() const { return bci() + bytecode().get_offset_s2(raw_code()); }
|
||||
int dest_w() const { return bci() + bytecode().get_offset_s4(raw_code()); }
|
||||
int get_offset_s2() const { return bytecode().get_offset_s2(raw_code()); }
|
||||
int get_offset_s4() const { return bytecode().get_offset_s4(raw_code()); }
|
||||
|
||||
// These methods are not safe to use before or during verification as they may
|
||||
// have large offsets and cause overflows
|
||||
int dest() const {
|
||||
int min_offset = -1 * max_method_code_size;
|
||||
int offset = bytecode().get_offset_s2(raw_code());
|
||||
guarantee(offset >= min_offset && offset <= max_method_code_size, "must be");
|
||||
return bci() + offset;
|
||||
}
|
||||
int dest_w() const {
|
||||
int min_offset = -1 * max_method_code_size;
|
||||
int offset = bytecode().get_offset_s4(raw_code());
|
||||
guarantee(offset >= min_offset && offset <= max_method_code_size, "must be");
|
||||
return bci() + offset;
|
||||
}
|
||||
|
||||
// One-byte indices.
|
||||
u1 get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }
|
||||
|
||||
@@ -132,14 +132,14 @@ InstanceKlass* JfrClassTransformer::create_new_instance_klass(InstanceKlass* ik,
|
||||
}
|
||||
|
||||
// Redefining / retransforming?
|
||||
const Klass* JfrClassTransformer::find_existing_klass(const InstanceKlass* ik, JavaThread* thread) {
|
||||
const InstanceKlass* JfrClassTransformer::find_existing_klass(const InstanceKlass* ik, JavaThread* thread) {
|
||||
assert(ik != nullptr, "invariant");
|
||||
assert(thread != nullptr, "invariant");
|
||||
JvmtiThreadState* const state = thread->jvmti_thread_state();
|
||||
return state != nullptr ? klass_being_redefined(ik, state) : nullptr;
|
||||
}
|
||||
|
||||
const Klass* JfrClassTransformer::klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) {
|
||||
const InstanceKlass* JfrClassTransformer::klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) {
|
||||
assert(ik != nullptr, "invariant");
|
||||
assert(state != nullptr, "invariant");
|
||||
const GrowableArray<Klass*>* const redef_klasses = state->get_classes_being_redefined();
|
||||
@@ -149,9 +149,10 @@ const Klass* JfrClassTransformer::klass_being_redefined(const InstanceKlass* ik,
|
||||
for (int i = 0; i < redef_klasses->length(); ++i) {
|
||||
const Klass* const existing_klass = redef_klasses->at(i);
|
||||
assert(existing_klass != nullptr, "invariant");
|
||||
assert(existing_klass->is_instance_klass(), "invariant");
|
||||
if (ik->name() == existing_klass->name() && ik->class_loader_data() == existing_klass->class_loader_data()) {
|
||||
// 'ik' is a scratch klass. Return the klass being redefined.
|
||||
return existing_klass;
|
||||
return InstanceKlass::cast(existing_klass);
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
|
||||
@@ -38,10 +38,10 @@ class InstanceKlass;
|
||||
class JfrClassTransformer : AllStatic {
|
||||
private:
|
||||
static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStream* stream, TRAPS);
|
||||
static const Klass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state);
|
||||
static const InstanceKlass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state);
|
||||
|
||||
public:
|
||||
static const Klass* find_existing_klass(const InstanceKlass* ik, JavaThread* thread);
|
||||
static const InstanceKlass* find_existing_klass(const InstanceKlass* ik, JavaThread* thread);
|
||||
static InstanceKlass* create_instance_klass(InstanceKlass*& ik, ClassFileStream* stream, bool is_initial_load, JavaThread* thread);
|
||||
static void copy_traceid(const InstanceKlass* ik, const InstanceKlass* new_ik);
|
||||
static void transfer_cached_class_file_data(InstanceKlass* ik, InstanceKlass* new_ik, const ClassFileParser& parser, JavaThread* thread);
|
||||
|
||||
@@ -36,6 +36,7 @@
|
||||
#include "jfr/support/jfrResolution.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
#include "jfr/support/methodtracer/jfrMethodTracer.hpp"
|
||||
#include "jfr/support/methodtracer/jfrTraceTagging.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
@@ -88,12 +89,10 @@ void Jfr::on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS)
|
||||
}
|
||||
}
|
||||
|
||||
void Jfr::on_klass_redefinition(const InstanceKlass* ik, Thread* thread) {
|
||||
assert(JfrMethodTracer::in_use(), "invariant");
|
||||
JfrMethodTracer::on_klass_redefinition(ik, thread);
|
||||
void Jfr::on_klass_redefinition(const InstanceKlass* ik, const InstanceKlass* scratch_klass) {
|
||||
JfrTraceTagging::on_klass_redefinition(ik, scratch_klass);
|
||||
}
|
||||
|
||||
|
||||
bool Jfr::is_excluded(Thread* t) {
|
||||
return JfrJavaSupport::is_excluded(t);
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ class Jfr : AllStatic {
|
||||
static void include_thread(Thread* thread);
|
||||
static void exclude_thread(Thread* thread);
|
||||
static void on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS);
|
||||
static void on_klass_redefinition(const InstanceKlass* ik, Thread* thread);
|
||||
static void on_klass_redefinition(const InstanceKlass* ik, const InstanceKlass* scratch_klass);
|
||||
static void on_thread_start(Thread* thread);
|
||||
static void on_thread_exit(Thread* thread);
|
||||
static void on_resolution(const CallInfo& info, TRAPS);
|
||||
|
||||
@@ -170,9 +170,15 @@ NO_TRANSITION(jboolean, jfr_set_throttle(JNIEnv* env, jclass jvm, jlong event_ty
|
||||
return JNI_TRUE;
|
||||
NO_TRANSITION_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_set_cpu_throttle(JNIEnv* env, jclass jvm, jdouble rate, jboolean auto_adapt))
|
||||
JVM_ENTRY_NO_ENV(void, jfr_set_cpu_rate(JNIEnv* env, jclass jvm, jdouble rate))
|
||||
JfrEventSetting::set_enabled(JfrCPUTimeSampleEvent, rate > 0);
|
||||
JfrCPUTimeThreadSampling::set_rate(rate, auto_adapt == JNI_TRUE);
|
||||
JfrCPUTimeThreadSampling::set_rate(rate);
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_set_cpu_period(JNIEnv* env, jclass jvm, jlong period_nanos))
|
||||
assert(period_nanos >= 0, "invariant");
|
||||
JfrEventSetting::set_enabled(JfrCPUTimeSampleEvent, period_nanos > 0);
|
||||
JfrCPUTimeThreadSampling::set_period(period_nanos);
|
||||
JVM_END
|
||||
|
||||
NO_TRANSITION(void, jfr_set_miscellaneous(JNIEnv* env, jclass jvm, jlong event_type_id, jlong value))
|
||||
|
||||
@@ -129,7 +129,9 @@ jlong JNICALL jfr_get_unloaded_event_classes_count(JNIEnv* env, jclass jvm);
|
||||
|
||||
jboolean JNICALL jfr_set_throttle(JNIEnv* env, jclass jvm, jlong event_type_id, jlong event_sample_size, jlong period_ms);
|
||||
|
||||
void JNICALL jfr_set_cpu_throttle(JNIEnv* env, jclass jvm, jdouble rate, jboolean auto_adapt);
|
||||
void JNICALL jfr_set_cpu_rate(JNIEnv* env, jclass jvm, jdouble rate);
|
||||
|
||||
void JNICALL jfr_set_cpu_period(JNIEnv* env, jclass jvm, jlong period_nanos);
|
||||
|
||||
void JNICALL jfr_set_miscellaneous(JNIEnv* env, jclass jvm, jlong id, jlong value);
|
||||
|
||||
|
||||
@@ -83,7 +83,8 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
|
||||
(char*)"getUnloadedEventClassCount", (char*)"()J", (void*)jfr_get_unloaded_event_classes_count,
|
||||
(char*)"setMiscellaneous", (char*)"(JJ)V", (void*)jfr_set_miscellaneous,
|
||||
(char*)"setThrottle", (char*)"(JJJ)Z", (void*)jfr_set_throttle,
|
||||
(char*)"setCPUThrottle", (char*)"(DZ)V", (void*)jfr_set_cpu_throttle,
|
||||
(char*)"setCPURate", (char*)"(D)V", (void*)jfr_set_cpu_rate,
|
||||
(char*)"setCPUPeriod", (char*)"(J)V", (void*)jfr_set_cpu_period,
|
||||
(char*)"emitOldObjectSamples", (char*)"(JZZ)V", (void*)jfr_emit_old_object_samples,
|
||||
(char*)"shouldRotateDisk", (char*)"()Z", (void*)jfr_should_rotate_disk,
|
||||
(char*)"exclude", (char*)"(Ljava/lang/Thread;)V", (void*)jfr_exclude_thread,
|
||||
|
||||
@@ -948,22 +948,24 @@
|
||||
<Field type="long" contentType="bytes" name="freeSize" label="Free Size" description="Free swap space" />
|
||||
</Event>
|
||||
|
||||
<Event name="ExecutionSample" category="Java Virtual Machine, Profiling" label="Method Profiling Sample" description="Snapshot of a threads state"
|
||||
<Event name="ExecutionSample" category="Java Virtual Machine, Profiling" label="Java Execution Sample"
|
||||
description="Snapshot of a thread executing Java code. Threads that are not executing Java code, including those waiting or executing native code, are not included."
|
||||
period="everyChunk">
|
||||
<Field type="Thread" name="sampledThread" label="Thread" />
|
||||
<Field type="StackTrace" name="stackTrace" label="Stack Trace" />
|
||||
<Field type="ThreadState" name="state" label="Thread State" />
|
||||
</Event>
|
||||
|
||||
<Event name="NativeMethodSample" category="Java Virtual Machine, Profiling" label="Method Profiling Sample Native" description="Snapshot of a threads state when in native"
|
||||
<Event name="NativeMethodSample" category="Java Virtual Machine, Profiling" label="Native Sample"
|
||||
description="Snapshot of a thread in native code, executing or waiting. Threads that are executing Java code are not included."
|
||||
period="everyChunk">
|
||||
<Field type="Thread" name="sampledThread" label="Thread" />
|
||||
<Field type="StackTrace" name="stackTrace" label="Stack Trace" />
|
||||
<Field type="ThreadState" name="state" label="Thread State" />
|
||||
</Event>
|
||||
|
||||
<Event name="CPUTimeSample" category="Java Virtual Machine, Profiling" label="CPU Time Method Sample"
|
||||
description="Snapshot of a threads state from the CPU time sampler. The throttle can be either an upper bound for the event emission rate, e.g. 100/s, or the cpu-time period, e.g. 10ms, with s, ms, us and ns supported as time units."
|
||||
<Event name="CPUTimeSample" category="Java Virtual Machine, Profiling" label="CPU Time Sample"
|
||||
description="Snapshot of a threads state from the CPU time sampler, both threads executing native and Java code are included. The throttle setting can be either an upper bound for the event emission rate, e.g. 100/s, or the cpu-time period, e.g. 10ms, with s, ms, us and ns supported as time units."
|
||||
throttle="true" thread="false" experimental="true" startTime="false">
|
||||
<Field type="StackTrace" name="stackTrace" label="Stack Trace" />
|
||||
<Field type="Thread" name="eventThread" label="Thread" />
|
||||
@@ -972,7 +974,7 @@
|
||||
<Field type="boolean" name="biased" label="Biased" description="The sample is safepoint-biased" />
|
||||
</Event>
|
||||
|
||||
<Event name="CPUTimeSamplesLost" category="Java Virtual Machine, Profiling" label="CPU Time Method Profiling Lost Samples" description="Records that the CPU time sampler lost samples"
|
||||
<Event name="CPUTimeSamplesLost" category="Java Virtual Machine, Profiling" label="CPU Time Samples Lost" description="Records that the CPU time sampler lost samples"
|
||||
thread="false" stackTrace="false" startTime="false" experimental="true">
|
||||
<Field type="int" name="lostSamples" label="Lost Samples" />
|
||||
<Field type="Thread" name="eventThread" label="Thread" />
|
||||
|
||||
@@ -45,7 +45,7 @@
|
||||
|
||||
#include "signals_posix.hpp"
|
||||
|
||||
static const int64_t AUTOADAPT_INTERVAL_MS = 100;
|
||||
static const int64_t RECOMPUTE_INTERVAL_MS = 100;
|
||||
|
||||
static bool is_excluded(JavaThread* jt) {
|
||||
return jt->is_hidden_from_external_view() ||
|
||||
@@ -163,20 +163,42 @@ void JfrCPUTimeTraceQueue::clear() {
|
||||
Atomic::release_store(&_head, (u4)0);
|
||||
}
|
||||
|
||||
static int64_t compute_sampling_period(double rate) {
|
||||
if (rate == 0) {
|
||||
return 0;
|
||||
// A throttle is either a rate or a fixed period
|
||||
class JfrCPUSamplerThrottle {
|
||||
|
||||
union {
|
||||
double _rate;
|
||||
u8 _period_nanos;
|
||||
};
|
||||
bool _is_rate;
|
||||
|
||||
public:
|
||||
|
||||
JfrCPUSamplerThrottle(double rate) : _rate(rate), _is_rate(true) {
|
||||
assert(rate >= 0, "invariant");
|
||||
}
|
||||
return os::active_processor_count() * 1000000000.0 / rate;
|
||||
}
|
||||
|
||||
JfrCPUSamplerThrottle(u8 period_nanos) : _period_nanos(period_nanos), _is_rate(false) {}
|
||||
|
||||
bool enabled() const { return _is_rate ? _rate > 0 : _period_nanos > 0; }
|
||||
|
||||
int64_t compute_sampling_period() const {
|
||||
if (_is_rate) {
|
||||
if (_rate == 0) {
|
||||
return 0;
|
||||
}
|
||||
return os::active_processor_count() * 1000000000.0 / _rate;
|
||||
}
|
||||
return _period_nanos;
|
||||
}
|
||||
};
|
||||
|
||||
class JfrCPUSamplerThread : public NonJavaThread {
|
||||
friend class JfrCPUTimeThreadSampling;
|
||||
private:
|
||||
Semaphore _sample;
|
||||
NonJavaThread* _sampler_thread;
|
||||
double _rate;
|
||||
bool _auto_adapt;
|
||||
JfrCPUSamplerThrottle _throttle;
|
||||
volatile int64_t _current_sampling_period_ns;
|
||||
volatile bool _disenrolled;
|
||||
// top bit is used to indicate that no signal handler should proceed
|
||||
@@ -187,7 +209,7 @@ class JfrCPUSamplerThread : public NonJavaThread {
|
||||
|
||||
static const u4 STOP_SIGNAL_BIT = 0x80000000;
|
||||
|
||||
JfrCPUSamplerThread(double rate, bool auto_adapt);
|
||||
JfrCPUSamplerThread(JfrCPUSamplerThrottle& throttle);
|
||||
|
||||
void start_thread();
|
||||
|
||||
@@ -195,9 +217,9 @@ class JfrCPUSamplerThread : public NonJavaThread {
|
||||
void disenroll();
|
||||
void update_all_thread_timers();
|
||||
|
||||
void auto_adapt_period_if_needed();
|
||||
void recompute_period_if_needed();
|
||||
|
||||
void set_rate(double rate, bool auto_adapt);
|
||||
void set_throttle(JfrCPUSamplerThrottle& throttle);
|
||||
int64_t get_sampling_period() const { return Atomic::load(&_current_sampling_period_ns); };
|
||||
|
||||
void sample_thread(JfrSampleRequest& request, void* ucontext, JavaThread* jt, JfrThreadLocal* tl, JfrTicks& now);
|
||||
@@ -231,18 +253,16 @@ public:
|
||||
void trigger_async_processing_of_cpu_time_jfr_requests();
|
||||
};
|
||||
|
||||
JfrCPUSamplerThread::JfrCPUSamplerThread(double rate, bool auto_adapt) :
|
||||
JfrCPUSamplerThread::JfrCPUSamplerThread(JfrCPUSamplerThrottle& throttle) :
|
||||
_sample(),
|
||||
_sampler_thread(nullptr),
|
||||
_rate(rate),
|
||||
_auto_adapt(auto_adapt),
|
||||
_current_sampling_period_ns(compute_sampling_period(rate)),
|
||||
_throttle(throttle),
|
||||
_current_sampling_period_ns(throttle.compute_sampling_period()),
|
||||
_disenrolled(true),
|
||||
_active_signal_handlers(STOP_SIGNAL_BIT),
|
||||
_is_async_processing_of_cpu_time_jfr_requests_triggered(false),
|
||||
_warned_about_timer_creation_failure(false),
|
||||
_signal_handler_installed(false) {
|
||||
assert(rate >= 0, "invariant");
|
||||
}
|
||||
|
||||
void JfrCPUSamplerThread::trigger_async_processing_of_cpu_time_jfr_requests() {
|
||||
@@ -321,7 +341,7 @@ void JfrCPUSamplerThread::disenroll() {
|
||||
void JfrCPUSamplerThread::run() {
|
||||
assert(_sampler_thread == nullptr, "invariant");
|
||||
_sampler_thread = this;
|
||||
int64_t last_auto_adapt_check = os::javaTimeNanos();
|
||||
int64_t last_recompute_check = os::javaTimeNanos();
|
||||
while (true) {
|
||||
if (!_sample.trywait()) {
|
||||
// disenrolled
|
||||
@@ -329,9 +349,9 @@ void JfrCPUSamplerThread::run() {
|
||||
}
|
||||
_sample.signal();
|
||||
|
||||
if (os::javaTimeNanos() - last_auto_adapt_check > AUTOADAPT_INTERVAL_MS * 1000000) {
|
||||
auto_adapt_period_if_needed();
|
||||
last_auto_adapt_check = os::javaTimeNanos();
|
||||
if (os::javaTimeNanos() - last_recompute_check > RECOMPUTE_INTERVAL_MS * 1000000) {
|
||||
recompute_period_if_needed();
|
||||
last_recompute_check = os::javaTimeNanos();
|
||||
}
|
||||
|
||||
if (Atomic::cmpxchg(&_is_async_processing_of_cpu_time_jfr_requests_triggered, true, false)) {
|
||||
@@ -442,42 +462,50 @@ JfrCPUTimeThreadSampling::~JfrCPUTimeThreadSampling() {
|
||||
}
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::create_sampler(double rate, bool auto_adapt) {
|
||||
void JfrCPUTimeThreadSampling::create_sampler(JfrCPUSamplerThrottle& throttle) {
|
||||
assert(_sampler == nullptr, "invariant");
|
||||
_sampler = new JfrCPUSamplerThread(rate, auto_adapt);
|
||||
_sampler = new JfrCPUSamplerThread(throttle);
|
||||
_sampler->start_thread();
|
||||
_sampler->enroll();
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::update_run_state(double rate, bool auto_adapt) {
|
||||
if (rate != 0) {
|
||||
void JfrCPUTimeThreadSampling::update_run_state(JfrCPUSamplerThrottle& throttle) {
|
||||
if (throttle.enabled()) {
|
||||
if (_sampler == nullptr) {
|
||||
create_sampler(rate, auto_adapt);
|
||||
create_sampler(throttle);
|
||||
} else {
|
||||
_sampler->set_rate(rate, auto_adapt);
|
||||
_sampler->set_throttle(throttle);
|
||||
_sampler->enroll();
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (_sampler != nullptr) {
|
||||
_sampler->set_rate(rate /* 0 */, auto_adapt);
|
||||
_sampler->set_throttle(throttle);
|
||||
_sampler->disenroll();
|
||||
}
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::set_rate(double rate, bool auto_adapt) {
|
||||
assert(rate >= 0, "invariant");
|
||||
void JfrCPUTimeThreadSampling::set_rate(double rate) {
|
||||
if (_instance == nullptr) {
|
||||
return;
|
||||
}
|
||||
instance().set_rate_value(rate, auto_adapt);
|
||||
JfrCPUSamplerThrottle throttle(rate);
|
||||
instance().set_throttle_value(throttle);
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::set_rate_value(double rate, bool auto_adapt) {
|
||||
if (_sampler != nullptr) {
|
||||
_sampler->set_rate(rate, auto_adapt);
|
||||
void JfrCPUTimeThreadSampling::set_period(u8 nanos) {
|
||||
if (_instance == nullptr) {
|
||||
return;
|
||||
}
|
||||
update_run_state(rate, auto_adapt);
|
||||
JfrCPUSamplerThrottle throttle(nanos);
|
||||
instance().set_throttle_value(throttle);
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::set_throttle_value(JfrCPUSamplerThrottle& throttle) {
|
||||
if (_sampler != nullptr) {
|
||||
_sampler->set_throttle(throttle);
|
||||
}
|
||||
update_run_state(throttle);
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::on_javathread_create(JavaThread *thread) {
|
||||
@@ -704,24 +732,21 @@ void JfrCPUSamplerThread::stop_timer() {
|
||||
VMThread::execute(&op);
|
||||
}
|
||||
|
||||
void JfrCPUSamplerThread::auto_adapt_period_if_needed() {
|
||||
void JfrCPUSamplerThread::recompute_period_if_needed() {
|
||||
int64_t current_period = get_sampling_period();
|
||||
if (_auto_adapt || current_period == -1) {
|
||||
int64_t period = compute_sampling_period(_rate);
|
||||
if (period != current_period) {
|
||||
Atomic::store(&_current_sampling_period_ns, period);
|
||||
update_all_thread_timers();
|
||||
}
|
||||
int64_t period = _throttle.compute_sampling_period();
|
||||
if (period != current_period) {
|
||||
Atomic::store(&_current_sampling_period_ns, period);
|
||||
update_all_thread_timers();
|
||||
}
|
||||
}
|
||||
|
||||
void JfrCPUSamplerThread::set_rate(double rate, bool auto_adapt) {
|
||||
_rate = rate;
|
||||
_auto_adapt = auto_adapt;
|
||||
if (_rate > 0 && Atomic::load_acquire(&_disenrolled) == false) {
|
||||
auto_adapt_period_if_needed();
|
||||
void JfrCPUSamplerThread::set_throttle(JfrCPUSamplerThrottle& throttle) {
|
||||
_throttle = throttle;
|
||||
if (_throttle.enabled() && Atomic::load_acquire(&_disenrolled) == false) {
|
||||
recompute_period_if_needed();
|
||||
} else {
|
||||
Atomic::store(&_current_sampling_period_ns, compute_sampling_period(rate));
|
||||
Atomic::store(&_current_sampling_period_ns, _throttle.compute_sampling_period());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -765,12 +790,18 @@ void JfrCPUTimeThreadSampling::destroy() {
|
||||
_instance = nullptr;
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::set_rate(double rate, bool auto_adapt) {
|
||||
void JfrCPUTimeThreadSampling::set_rate(double rate) {
|
||||
if (rate != 0) {
|
||||
warn();
|
||||
}
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::set_period(u8 period_nanos) {
|
||||
if (period_nanos != 0) {
|
||||
warn();
|
||||
}
|
||||
}
|
||||
|
||||
void JfrCPUTimeThreadSampling::on_javathread_create(JavaThread* thread) {
|
||||
}
|
||||
|
||||
|
||||
@@ -95,14 +95,16 @@ public:
|
||||
|
||||
class JfrCPUSamplerThread;
|
||||
|
||||
class JfrCPUSamplerThrottle;
|
||||
|
||||
class JfrCPUTimeThreadSampling : public JfrCHeapObj {
|
||||
friend class JfrRecorder;
|
||||
private:
|
||||
|
||||
JfrCPUSamplerThread* _sampler;
|
||||
|
||||
void create_sampler(double rate, bool auto_adapt);
|
||||
void set_rate_value(double rate, bool auto_adapt);
|
||||
void create_sampler(JfrCPUSamplerThrottle& throttle);
|
||||
void set_throttle_value(JfrCPUSamplerThrottle& throttle);
|
||||
|
||||
JfrCPUTimeThreadSampling();
|
||||
~JfrCPUTimeThreadSampling();
|
||||
@@ -111,10 +113,13 @@ class JfrCPUTimeThreadSampling : public JfrCHeapObj {
|
||||
static JfrCPUTimeThreadSampling* create();
|
||||
static void destroy();
|
||||
|
||||
void update_run_state(double rate, bool auto_adapt);
|
||||
void update_run_state(JfrCPUSamplerThrottle& throttle);
|
||||
|
||||
static void set_rate(JfrCPUSamplerThrottle& throttle);
|
||||
|
||||
public:
|
||||
static void set_rate(double rate, bool auto_adapt);
|
||||
static void set_rate(double rate);
|
||||
static void set_period(u8 nanos);
|
||||
|
||||
static void on_javathread_create(JavaThread* thread);
|
||||
static void on_javathread_terminate(JavaThread* thread);
|
||||
@@ -140,7 +145,8 @@ private:
|
||||
static void destroy();
|
||||
|
||||
public:
|
||||
static void set_rate(double rate, bool auto_adapt);
|
||||
static void set_rate(double rate);
|
||||
static void set_period(u8 nanos);
|
||||
|
||||
static void on_javathread_create(JavaThread* thread);
|
||||
static void on_javathread_terminate(JavaThread* thread);
|
||||
|
||||
@@ -1,417 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/semaphore.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
static const int initial_array_size = 30;
|
||||
|
||||
class ThreadGroupExclusiveAccess : public StackObj {
|
||||
private:
|
||||
static Semaphore _mutex_semaphore;
|
||||
public:
|
||||
ThreadGroupExclusiveAccess() { _mutex_semaphore.wait(); }
|
||||
~ThreadGroupExclusiveAccess() { _mutex_semaphore.signal(); }
|
||||
};
|
||||
|
||||
Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1);
|
||||
JfrThreadGroup* JfrThreadGroup::_instance = nullptr;
|
||||
|
||||
class JfrThreadGroupPointers : public ResourceObj {
|
||||
private:
|
||||
const Handle _thread_group_handle;
|
||||
jweak _thread_group_weak_ref;
|
||||
public:
|
||||
JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref);
|
||||
Handle thread_group_handle() const;
|
||||
jweak thread_group_weak_ref() const;
|
||||
oopDesc* thread_group_oop() const;
|
||||
jweak transfer_weak_global_handle_ownership();
|
||||
void clear_weak_ref();
|
||||
};
|
||||
|
||||
JfrThreadGroupPointers::JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref) :
|
||||
_thread_group_handle(thread_group_handle),
|
||||
_thread_group_weak_ref(thread_group_weak_ref) {}
|
||||
|
||||
Handle JfrThreadGroupPointers::thread_group_handle() const {
|
||||
return _thread_group_handle;
|
||||
}
|
||||
|
||||
jweak JfrThreadGroupPointers::thread_group_weak_ref() const {
|
||||
return _thread_group_weak_ref;
|
||||
}
|
||||
|
||||
oopDesc* JfrThreadGroupPointers::thread_group_oop() const {
|
||||
assert(_thread_group_weak_ref == nullptr ||
|
||||
JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant");
|
||||
return _thread_group_handle();
|
||||
}
|
||||
|
||||
jweak JfrThreadGroupPointers::transfer_weak_global_handle_ownership() {
|
||||
jweak temp = _thread_group_weak_ref;
|
||||
_thread_group_weak_ref = nullptr;
|
||||
return temp;
|
||||
}
|
||||
|
||||
void JfrThreadGroupPointers::clear_weak_ref() {
|
||||
if (nullptr != _thread_group_weak_ref) {
|
||||
JNIHandles::destroy_weak_global(_thread_group_weak_ref);
|
||||
}
|
||||
}
|
||||
|
||||
class JfrThreadGroupsHelper : public ResourceObj {
|
||||
private:
|
||||
static const int invalid_iterator_pos = -1;
|
||||
GrowableArray<JfrThreadGroupPointers*>* _thread_group_hierarchy;
|
||||
int _current_iterator_pos;
|
||||
|
||||
int populate_thread_group_hierarchy(const JavaThread* jt, Thread* current);
|
||||
JfrThreadGroupPointers& at(int index);
|
||||
|
||||
public:
|
||||
JfrThreadGroupsHelper(const JavaThread* jt, Thread* current);
|
||||
~JfrThreadGroupsHelper();
|
||||
JfrThreadGroupPointers& next();
|
||||
bool is_valid() const;
|
||||
bool has_next() const;
|
||||
};
|
||||
|
||||
JfrThreadGroupsHelper::JfrThreadGroupsHelper(const JavaThread* jt, Thread* current) {
|
||||
_thread_group_hierarchy = new GrowableArray<JfrThreadGroupPointers*>(10);
|
||||
_current_iterator_pos = populate_thread_group_hierarchy(jt, current) - 1;
|
||||
}
|
||||
|
||||
JfrThreadGroupsHelper::~JfrThreadGroupsHelper() {
|
||||
assert(_current_iterator_pos == invalid_iterator_pos, "invariant");
|
||||
for (int i = 0; i < _thread_group_hierarchy->length(); ++i) {
|
||||
_thread_group_hierarchy->at(i)->clear_weak_ref();
|
||||
}
|
||||
}
|
||||
|
||||
JfrThreadGroupPointers& JfrThreadGroupsHelper::at(int index) {
|
||||
assert(_thread_group_hierarchy != nullptr, "invariant");
|
||||
assert(index > invalid_iterator_pos && index < _thread_group_hierarchy->length(), "invariant");
|
||||
return *(_thread_group_hierarchy->at(index));
|
||||
}
|
||||
|
||||
bool JfrThreadGroupsHelper::has_next() const {
|
||||
return _current_iterator_pos > invalid_iterator_pos;
|
||||
}
|
||||
|
||||
bool JfrThreadGroupsHelper::is_valid() const {
|
||||
return (_thread_group_hierarchy != nullptr && _thread_group_hierarchy->length() > 0);
|
||||
}
|
||||
|
||||
JfrThreadGroupPointers& JfrThreadGroupsHelper::next() {
|
||||
assert(is_valid(), "invariant");
|
||||
return at(_current_iterator_pos--);
|
||||
}
|
||||
|
||||
/*
|
||||
* If not at a safepoint, we create global weak references for
|
||||
* all reachable threadgroups for this thread.
|
||||
* If we are at a safepoint, the caller is the VMThread during
|
||||
* JFR checkpointing. It can use naked oops, because nothing
|
||||
* will move before the list of threadgroups is cleared and
|
||||
* mutator threads restarted. The threadgroup list is cleared
|
||||
* later by the VMThread as one of the final steps in JFR checkpointing
|
||||
* (not here).
|
||||
*/
|
||||
int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, Thread* current) {
|
||||
assert(jt != nullptr && jt->is_Java_thread(), "invariant");
|
||||
assert(current != nullptr, "invariant");
|
||||
assert(_thread_group_hierarchy != nullptr, "invariant");
|
||||
|
||||
oop thread_oop = jt->threadObj();
|
||||
if (thread_oop == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
// immediate thread group
|
||||
Handle thread_group_handle(current, java_lang_Thread::threadGroup(thread_oop));
|
||||
if (thread_group_handle == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
const bool use_weak_handles = !SafepointSynchronize::is_at_safepoint();
|
||||
jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : nullptr;
|
||||
|
||||
JfrThreadGroupPointers* thread_group_pointers = new JfrThreadGroupPointers(thread_group_handle, thread_group_weak_ref);
|
||||
_thread_group_hierarchy->append(thread_group_pointers);
|
||||
// immediate parent thread group
|
||||
oop parent_thread_group_obj = java_lang_ThreadGroup::parent(thread_group_handle());
|
||||
Handle parent_thread_group_handle(current, parent_thread_group_obj);
|
||||
|
||||
// and check parents parents...
|
||||
while (parent_thread_group_handle != nullptr) {
|
||||
const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : nullptr;
|
||||
thread_group_pointers = new JfrThreadGroupPointers(parent_thread_group_handle, parent_group_weak_ref);
|
||||
_thread_group_hierarchy->append(thread_group_pointers);
|
||||
parent_thread_group_obj = java_lang_ThreadGroup::parent(parent_thread_group_handle());
|
||||
parent_thread_group_handle = Handle(current, parent_thread_group_obj);
|
||||
}
|
||||
return _thread_group_hierarchy->length();
|
||||
}
|
||||
|
||||
static traceid next_id() {
|
||||
static traceid _current_threadgroup_id = 1; // 1 is reserved for thread group "VirtualThreads"
|
||||
return ++_current_threadgroup_id;
|
||||
}
|
||||
|
||||
class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj {
|
||||
friend class JfrThreadGroup;
|
||||
private:
|
||||
traceid _thread_group_id;
|
||||
traceid _parent_group_id;
|
||||
char* _thread_group_name; // utf8 format
|
||||
// If an entry is created during a safepoint, the
|
||||
// _thread_group_oop contains a direct oop to
|
||||
// the java.lang.ThreadGroup object.
|
||||
// If an entry is created on javathread exit time (not at safepoint),
|
||||
// _thread_group_weak_ref contains a JNI weak global handle
|
||||
// indirection to the java.lang.ThreadGroup object.
|
||||
// Note: we cannot use a union here since CHECK_UNHANDLED_OOPS makes oop have
|
||||
// a ctor which isn't allowed in a union by the SunStudio compiler
|
||||
oop _thread_group_oop;
|
||||
jweak _thread_group_weak_ref;
|
||||
|
||||
JfrThreadGroupEntry(const char* tgstr, JfrThreadGroupPointers& ptrs);
|
||||
~JfrThreadGroupEntry();
|
||||
|
||||
traceid thread_group_id() const { return _thread_group_id; }
|
||||
void set_thread_group_id(traceid tgid) { _thread_group_id = tgid; }
|
||||
|
||||
const char* thread_group_name() const { return _thread_group_name; }
|
||||
void set_thread_group_name(const char* tgname);
|
||||
|
||||
traceid parent_group_id() const { return _parent_group_id; }
|
||||
void set_parent_group_id(traceid pgid) { _parent_group_id = pgid; }
|
||||
|
||||
void set_thread_group(JfrThreadGroupPointers& ptrs);
|
||||
bool is_equal(const JfrThreadGroupPointers& ptrs) const;
|
||||
oop thread_group() const;
|
||||
};
|
||||
|
||||
JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) :
|
||||
_thread_group_id(0),
|
||||
_parent_group_id(0),
|
||||
_thread_group_name(nullptr),
|
||||
_thread_group_oop(nullptr),
|
||||
_thread_group_weak_ref(nullptr) {
|
||||
set_thread_group_name(tgname);
|
||||
set_thread_group(ptrs);
|
||||
}
|
||||
|
||||
JfrThreadGroup::JfrThreadGroupEntry::~JfrThreadGroupEntry() {
|
||||
if (_thread_group_name != nullptr) {
|
||||
JfrCHeapObj::free(_thread_group_name, strlen(_thread_group_name) + 1);
|
||||
}
|
||||
if (_thread_group_weak_ref != nullptr) {
|
||||
JNIHandles::destroy_weak_global(_thread_group_weak_ref);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgname) {
|
||||
assert(_thread_group_name == nullptr, "invariant");
|
||||
if (tgname != nullptr) {
|
||||
size_t len = strlen(tgname);
|
||||
_thread_group_name = JfrCHeapObj::new_array<char>(len + 1);
|
||||
strncpy(_thread_group_name, tgname, len + 1);
|
||||
}
|
||||
}
|
||||
|
||||
oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const {
|
||||
return _thread_group_weak_ref != nullptr ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop;
|
||||
}
|
||||
|
||||
void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointers& ptrs) {
|
||||
_thread_group_weak_ref = ptrs.transfer_weak_global_handle_ownership();
|
||||
if (_thread_group_weak_ref == nullptr) {
|
||||
_thread_group_oop = ptrs.thread_group_oop();
|
||||
assert(_thread_group_oop != nullptr, "invariant");
|
||||
} else {
|
||||
_thread_group_oop = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
JfrThreadGroup::JfrThreadGroup() :
|
||||
_list(new (mtTracing) GrowableArray<JfrThreadGroupEntry*>(initial_array_size, mtTracing)) {}
|
||||
|
||||
JfrThreadGroup::~JfrThreadGroup() {
|
||||
if (_list != nullptr) {
|
||||
for (int i = 0; i < _list->length(); i++) {
|
||||
JfrThreadGroupEntry* e = _list->at(i);
|
||||
delete e;
|
||||
}
|
||||
delete _list;
|
||||
}
|
||||
}
|
||||
|
||||
JfrThreadGroup* JfrThreadGroup::instance() {
|
||||
return _instance;
|
||||
}
|
||||
|
||||
void JfrThreadGroup::set_instance(JfrThreadGroup* new_instance) {
|
||||
_instance = new_instance;
|
||||
}
|
||||
|
||||
traceid JfrThreadGroup::thread_group_id(const JavaThread* jt, Thread* current) {
|
||||
HandleMark hm(current);
|
||||
JfrThreadGroupsHelper helper(jt, current);
|
||||
return helper.is_valid() ? thread_group_id_internal(helper) : 0;
|
||||
}
|
||||
|
||||
traceid JfrThreadGroup::thread_group_id(JavaThread* const jt) {
|
||||
return thread_group_id(jt, jt);
|
||||
}
|
||||
|
||||
traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper) {
|
||||
ThreadGroupExclusiveAccess lock;
|
||||
JfrThreadGroup* tg_instance = instance();
|
||||
if (tg_instance == nullptr) {
|
||||
tg_instance = new JfrThreadGroup();
|
||||
if (tg_instance == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
set_instance(tg_instance);
|
||||
}
|
||||
|
||||
JfrThreadGroupEntry* tge = nullptr;
|
||||
traceid parent_thread_group_id = 0;
|
||||
while (helper.has_next()) {
|
||||
JfrThreadGroupPointers& ptrs = helper.next();
|
||||
tge = tg_instance->find_entry(ptrs);
|
||||
if (nullptr == tge) {
|
||||
tge = tg_instance->new_entry(ptrs);
|
||||
assert(tge != nullptr, "invariant");
|
||||
tge->set_parent_group_id(parent_thread_group_id);
|
||||
}
|
||||
parent_thread_group_id = tge->thread_group_id();
|
||||
}
|
||||
// the last entry in the hierarchy is the immediate thread group
|
||||
return tge->thread_group_id();
|
||||
}
|
||||
|
||||
bool JfrThreadGroup::JfrThreadGroupEntry::is_equal(const JfrThreadGroupPointers& ptrs) const {
|
||||
return ptrs.thread_group_oop() == thread_group();
|
||||
}
|
||||
|
||||
JfrThreadGroup::JfrThreadGroupEntry*
|
||||
JfrThreadGroup::find_entry(const JfrThreadGroupPointers& ptrs) const {
|
||||
for (int index = 0; index < _list->length(); ++index) {
|
||||
JfrThreadGroupEntry* curtge = _list->at(index);
|
||||
if (curtge->is_equal(ptrs)) {
|
||||
return curtge;
|
||||
}
|
||||
}
|
||||
return (JfrThreadGroupEntry*) nullptr;
|
||||
}
|
||||
|
||||
// Assumes you already searched for the existence
|
||||
// of a corresponding entry in find_entry().
|
||||
JfrThreadGroup::JfrThreadGroupEntry*
|
||||
JfrThreadGroup::new_entry(JfrThreadGroupPointers& ptrs) {
|
||||
JfrThreadGroupEntry* const tge = new JfrThreadGroupEntry(java_lang_ThreadGroup::name(ptrs.thread_group_oop()), ptrs);
|
||||
add_entry(tge);
|
||||
return tge;
|
||||
}
|
||||
|
||||
int JfrThreadGroup::add_entry(JfrThreadGroupEntry* tge) {
|
||||
assert(tge != nullptr, "attempting to add a null entry!");
|
||||
assert(0 == tge->thread_group_id(), "id must be unassigned!");
|
||||
tge->set_thread_group_id(next_id());
|
||||
return _list->append(tge);
|
||||
}
|
||||
|
||||
void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) const {
|
||||
assert(_list != nullptr && !_list->is_empty(), "should not need be here!");
|
||||
const int number_of_tg_entries = _list->length();
|
||||
writer.write_count(number_of_tg_entries + 1); // + VirtualThread group
|
||||
writer.write_key(1); // 1 is reserved for VirtualThread group
|
||||
writer.write<traceid>(0); // parent
|
||||
const oop vgroup = java_lang_Thread_Constants::get_VTHREAD_GROUP();
|
||||
assert(vgroup != (oop)nullptr, "invariant");
|
||||
const char* const vgroup_name = java_lang_ThreadGroup::name(vgroup);
|
||||
assert(vgroup_name != nullptr, "invariant");
|
||||
writer.write(vgroup_name);
|
||||
for (int index = 0; index < number_of_tg_entries; ++index) {
|
||||
const JfrThreadGroupEntry* const curtge = _list->at(index);
|
||||
writer.write_key(curtge->thread_group_id());
|
||||
writer.write(curtge->parent_group_id());
|
||||
writer.write(curtge->thread_group_name());
|
||||
}
|
||||
}
|
||||
|
||||
void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const {
|
||||
assert(writer != nullptr, "invariant");
|
||||
assert(_list != nullptr && !_list->is_empty(), "should not need be here!");
|
||||
assert(thread_group_id != 1, "should not need be here!");
|
||||
const int number_of_tg_entries = _list->length();
|
||||
|
||||
// save context
|
||||
const JfrCheckpointContext ctx = writer->context();
|
||||
writer->write_type(TYPE_THREADGROUP);
|
||||
const jlong count_offset = writer->reserve(sizeof(u4)); // Don't know how many yet
|
||||
int number_of_entries_written = 0;
|
||||
for (int index = number_of_tg_entries - 1; index >= 0; --index) {
|
||||
const JfrThreadGroupEntry* const curtge = _list->at(index);
|
||||
if (thread_group_id == curtge->thread_group_id()) {
|
||||
writer->write_key(curtge->thread_group_id());
|
||||
writer->write(curtge->parent_group_id());
|
||||
writer->write(curtge->thread_group_name());
|
||||
++number_of_entries_written;
|
||||
thread_group_id = curtge->parent_group_id();
|
||||
}
|
||||
}
|
||||
if (number_of_entries_written == 0) {
|
||||
// nothing to write, restore context
|
||||
writer->set_context(ctx);
|
||||
return;
|
||||
}
|
||||
assert(number_of_entries_written > 0, "invariant");
|
||||
writer->write_count(number_of_entries_written, count_offset);
|
||||
}
|
||||
|
||||
// Write out JfrThreadGroup instance and then delete it
|
||||
void JfrThreadGroup::serialize(JfrCheckpointWriter& writer) {
|
||||
ThreadGroupExclusiveAccess lock;
|
||||
JfrThreadGroup* tg_instance = instance();
|
||||
assert(tg_instance != nullptr, "invariant");
|
||||
tg_instance->write_thread_group_entries(writer);
|
||||
}
|
||||
|
||||
// for writing a particular thread group
|
||||
void JfrThreadGroup::serialize(JfrCheckpointWriter* writer, traceid thread_group_id) {
|
||||
assert(writer != nullptr, "invariant");
|
||||
ThreadGroupExclusiveAccess lock;
|
||||
JfrThreadGroup* const tg_instance = instance();
|
||||
assert(tg_instance != nullptr, "invariant");
|
||||
tg_instance->write_selective_thread_group(writer, thread_group_id);
|
||||
}
|
||||
@@ -0,0 +1,331 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "jfr/jni/jfrJavaSupport.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
#include "jfr/utilities/jfrLinkedList.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/semaphore.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
class ThreadGroupExclusiveAccess : public StackObj {
|
||||
private:
|
||||
static Semaphore _mutex_semaphore;
|
||||
public:
|
||||
ThreadGroupExclusiveAccess() { _mutex_semaphore.wait(); }
|
||||
~ThreadGroupExclusiveAccess() { _mutex_semaphore.signal(); }
|
||||
};
|
||||
|
||||
Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1);
|
||||
|
||||
static traceid next_id() {
|
||||
static traceid _tgid = 1; // 1 is reserved for thread group "VirtualThreads"
|
||||
return ++_tgid;
|
||||
}
|
||||
|
||||
class JfrThreadGroup : public JfrCHeapObj {
|
||||
template <typename, typename>
|
||||
friend class JfrLinkedList;
|
||||
private:
|
||||
mutable const JfrThreadGroup* _next;
|
||||
const JfrThreadGroup* _parent;
|
||||
traceid _tgid;
|
||||
char* _tg_name; // utf8 format
|
||||
jweak _tg_handle;
|
||||
mutable u2 _generation;
|
||||
|
||||
public:
|
||||
JfrThreadGroup(Handle tg, const JfrThreadGroup* parent) :
|
||||
_next(nullptr), _parent(parent), _tgid(next_id()), _tg_name(nullptr),
|
||||
_tg_handle(JNIHandles::make_weak_global(tg)), _generation(0) {
|
||||
const char* name = java_lang_ThreadGroup::name(tg());
|
||||
if (name != nullptr) {
|
||||
const size_t len = strlen(name);
|
||||
_tg_name = JfrCHeapObj::new_array<char>(len + 1);
|
||||
strncpy(_tg_name, name, len + 1);
|
||||
}
|
||||
}
|
||||
|
||||
~JfrThreadGroup() {
|
||||
JNIHandles::destroy_weak_global(_tg_handle);
|
||||
if (_tg_name != nullptr) {
|
||||
JfrCHeapObj::free(_tg_name, strlen(_tg_name) + 1);
|
||||
}
|
||||
}
|
||||
|
||||
const JfrThreadGroup* next() const { return _next; }
|
||||
|
||||
traceid id() const { return _tgid; }
|
||||
|
||||
const char* name() const {
|
||||
return _tg_name;
|
||||
}
|
||||
|
||||
const JfrThreadGroup* parent() const { return _parent; }
|
||||
|
||||
traceid parent_id() const {
|
||||
return _parent != nullptr ? _parent->id() : 0;
|
||||
}
|
||||
|
||||
bool is_dead() const {
|
||||
return JNIHandles::resolve(_tg_handle) == nullptr;
|
||||
}
|
||||
|
||||
bool operator==(oop tg) const {
|
||||
assert(tg != nullptr, "invariant");
|
||||
return tg == JNIHandles::resolve(_tg_handle);
|
||||
}
|
||||
|
||||
bool should_write() const {
|
||||
return !JfrTraceIdEpoch::is_current_epoch_generation(_generation);
|
||||
}
|
||||
|
||||
void set_written() const {
|
||||
assert(should_write(), "invariant");
|
||||
_generation = JfrTraceIdEpoch::epoch_generation();
|
||||
}
|
||||
};
|
||||
|
||||
typedef JfrLinkedList<const JfrThreadGroup> JfrThreadGroupList;
|
||||
|
||||
static JfrThreadGroupList* _list = nullptr;
|
||||
|
||||
static JfrThreadGroupList& list() {
|
||||
assert(_list != nullptr, "invariant");
|
||||
return *_list;
|
||||
}
|
||||
|
||||
bool JfrThreadGroupManager::create() {
|
||||
assert(_list == nullptr, "invariant");
|
||||
_list = new JfrThreadGroupList();
|
||||
return _list != nullptr;
|
||||
}
|
||||
|
||||
void JfrThreadGroupManager::destroy() {
|
||||
delete _list;
|
||||
_list = nullptr;
|
||||
}
|
||||
|
||||
static int populate(GrowableArray<Handle>* hierarchy, const JavaThread* jt, Thread* current) {
|
||||
assert(hierarchy != nullptr, "invariant");
|
||||
assert(jt != nullptr, "invariant");
|
||||
assert(current == Thread::current(), "invariant");
|
||||
|
||||
oop thread_oop = jt->threadObj();
|
||||
if (thread_oop == nullptr) {
|
||||
return 0;
|
||||
}
|
||||
// Immediate thread group.
|
||||
const Handle tg_handle(current, java_lang_Thread::threadGroup(thread_oop));
|
||||
if (tg_handle.is_null()) {
|
||||
return 0;
|
||||
}
|
||||
hierarchy->append(tg_handle);
|
||||
|
||||
// Thread group parent and then its parents...
|
||||
Handle parent_tg_handle(current, java_lang_ThreadGroup::parent(tg_handle()));
|
||||
|
||||
while (parent_tg_handle != nullptr) {
|
||||
hierarchy->append(parent_tg_handle);
|
||||
parent_tg_handle = Handle(current, java_lang_ThreadGroup::parent(parent_tg_handle()));
|
||||
}
|
||||
|
||||
return hierarchy->length();
|
||||
}
|
||||
|
||||
class JfrThreadGroupLookup : public ResourceObj {
|
||||
static const int invalid_iterator = -1;
|
||||
private:
|
||||
GrowableArray<Handle>* _hierarchy;
|
||||
mutable int _iterator;
|
||||
|
||||
public:
|
||||
JfrThreadGroupLookup(const JavaThread* jt, Thread* current) :
|
||||
_hierarchy(new GrowableArray<Handle>(16)),
|
||||
_iterator(populate(_hierarchy, jt, current) - 1) {}
|
||||
|
||||
bool has_next() const {
|
||||
return _iterator > invalid_iterator;
|
||||
}
|
||||
|
||||
const Handle& next() const {
|
||||
assert(has_next(), "invariant");
|
||||
return _hierarchy->at(_iterator--);
|
||||
}
|
||||
};
|
||||
|
||||
static const JfrThreadGroup* find_or_add(const Handle& tg_oop, const JfrThreadGroup* parent) {
|
||||
assert(parent == nullptr || list().in_list(parent), "invariant");
|
||||
const JfrThreadGroup* tg = list().head();
|
||||
const JfrThreadGroup* result = nullptr;
|
||||
while (tg != nullptr) {
|
||||
if (*tg == tg_oop()) {
|
||||
assert(tg->parent() == parent, "invariant");
|
||||
result = tg;
|
||||
tg = nullptr;
|
||||
continue;
|
||||
}
|
||||
tg = tg->next();
|
||||
}
|
||||
if (result == nullptr) {
|
||||
result = new JfrThreadGroup(tg_oop, parent);
|
||||
list().add(result);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
static traceid find_tgid(const JfrThreadGroupLookup& lookup) {
|
||||
const JfrThreadGroup* tg = nullptr;
|
||||
const JfrThreadGroup* ptg = nullptr;
|
||||
while (lookup.has_next()) {
|
||||
tg = find_or_add(lookup.next(), ptg);
|
||||
ptg = tg;
|
||||
}
|
||||
return tg != nullptr ? tg->id() : 0;
|
||||
}
|
||||
|
||||
static traceid find(const JfrThreadGroupLookup& lookup) {
|
||||
ThreadGroupExclusiveAccess lock;
|
||||
return find_tgid(lookup);
|
||||
}
|
||||
|
||||
traceid JfrThreadGroupManager::thread_group_id(JavaThread* jt) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
|
||||
ResourceMark rm(jt);
|
||||
HandleMark hm(jt);
|
||||
const JfrThreadGroupLookup lookup(jt, jt);
|
||||
return find(lookup);
|
||||
}
|
||||
|
||||
traceid JfrThreadGroupManager::thread_group_id(const JavaThread* jt, Thread* current) {
|
||||
assert(jt != nullptr, "invariant");
|
||||
assert(current != nullptr, "invariant");
|
||||
assert(!current->is_Java_thread() || JavaThread::cast(current)->thread_state() == _thread_in_vm, "invariant");
|
||||
ResourceMark rm(current);
|
||||
HandleMark hm(current);
|
||||
const JfrThreadGroupLookup lookup(jt, current);
|
||||
return find(lookup);
|
||||
}
|
||||
|
||||
static void write_virtual_thread_group(JfrCheckpointWriter& writer) {
|
||||
writer.write_key(1); // 1 is reserved for VirtualThread group
|
||||
writer.write<traceid>(0); // parent
|
||||
const oop vgroup = java_lang_Thread_Constants::get_VTHREAD_GROUP();
|
||||
assert(vgroup != (oop)nullptr, "invariant");
|
||||
const char* const vgroup_name = java_lang_ThreadGroup::name(vgroup);
|
||||
assert(vgroup_name != nullptr, "invariant");
|
||||
writer.write(vgroup_name);
|
||||
}
|
||||
|
||||
static int write_thread_group(JfrCheckpointWriter& writer, const JfrThreadGroup* tg, bool to_blob = false) {
|
||||
assert(tg != nullptr, "invariant");
|
||||
if (tg->should_write() || to_blob) {
|
||||
writer.write_key(tg->id());
|
||||
writer.write(tg->parent_id());
|
||||
writer.write(tg->name());
|
||||
if (!to_blob) {
|
||||
tg->set_written();
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// For writing all live thread groups while removing and deleting dead thread groups.
|
||||
void JfrThreadGroupManager::serialize(JfrCheckpointWriter& writer) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
|
||||
|
||||
const uint64_t count_offset = writer.reserve(sizeof(u4)); // Don't know how many yet
|
||||
|
||||
// First write the pre-defined ThreadGroup for virtual threads.
|
||||
write_virtual_thread_group(writer);
|
||||
int number_of_groups_written = 1;
|
||||
|
||||
const JfrThreadGroup* next = nullptr;
|
||||
const JfrThreadGroup* prev = nullptr;
|
||||
|
||||
{
|
||||
ThreadGroupExclusiveAccess lock;
|
||||
const JfrThreadGroup* tg = list().head();
|
||||
while (tg != nullptr) {
|
||||
next = tg->next();
|
||||
if (tg->is_dead()) {
|
||||
prev = list().excise(prev, tg);
|
||||
assert(!list().in_list(tg), "invariant");
|
||||
delete tg;
|
||||
tg = next;
|
||||
continue;
|
||||
}
|
||||
number_of_groups_written += write_thread_group(writer, tg);
|
||||
prev = tg;
|
||||
tg = next;
|
||||
}
|
||||
}
|
||||
|
||||
assert(number_of_groups_written > 0, "invariant");
|
||||
writer.write_count(number_of_groups_written, count_offset);
|
||||
}
|
||||
|
||||
// For writing a specific thread group and its ancestry.
|
||||
void JfrThreadGroupManager::serialize(JfrCheckpointWriter& writer, traceid tgid, bool to_blob) {
|
||||
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
|
||||
// save context
|
||||
const JfrCheckpointContext ctx = writer.context();
|
||||
|
||||
writer.write_type(TYPE_THREADGROUP);
|
||||
const uint64_t count_offset = writer.reserve(sizeof(u4)); // Don't know how many yet
|
||||
|
||||
int number_of_groups_written = 0;
|
||||
|
||||
{
|
||||
ThreadGroupExclusiveAccess lock;
|
||||
const JfrThreadGroup* tg = list().head();
|
||||
while (tg != nullptr) {
|
||||
if (tgid == tg->id()) {
|
||||
while (tg != nullptr) {
|
||||
number_of_groups_written += write_thread_group(writer, tg, to_blob);
|
||||
tg = tg->parent();
|
||||
}
|
||||
break;
|
||||
}
|
||||
tg = tg->next();
|
||||
}
|
||||
}
|
||||
|
||||
if (number_of_groups_written == 0) {
|
||||
// nothing to write, restore context
|
||||
writer.set_context(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(number_of_groups_written > 0, "invariant");
|
||||
writer.write_count(number_of_groups_written, count_offset);
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -22,44 +22,27 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
|
||||
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUPMANAGER_HPP
|
||||
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUPMANAGER_HPP
|
||||
|
||||
#include "jfr/utilities/jfrAllocation.hpp"
|
||||
#include "jfr/utilities/jfrTypes.hpp"
|
||||
#include "jni.h"
|
||||
#include "memory/allStatic.hpp"
|
||||
|
||||
class JfrCheckpointWriter;
|
||||
template <typename>
|
||||
class GrowableArray;
|
||||
class JfrThreadGroupsHelper;
|
||||
class JfrThreadGroupPointers;
|
||||
|
||||
class JfrThreadGroup : public JfrCHeapObj {
|
||||
friend class JfrCheckpointThreadClosure;
|
||||
class JfrThreadGroupManager : public AllStatic {
|
||||
friend class JfrRecorder;
|
||||
|
||||
private:
|
||||
static JfrThreadGroup* _instance;
|
||||
class JfrThreadGroupEntry;
|
||||
GrowableArray<JfrThreadGroupEntry*>* _list;
|
||||
|
||||
JfrThreadGroup();
|
||||
JfrThreadGroupEntry* find_entry(const JfrThreadGroupPointers& ptrs) const;
|
||||
JfrThreadGroupEntry* new_entry(JfrThreadGroupPointers& ptrs);
|
||||
int add_entry(JfrThreadGroupEntry* const tge);
|
||||
|
||||
void write_thread_group_entries(JfrCheckpointWriter& writer) const;
|
||||
void write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const;
|
||||
|
||||
static traceid thread_group_id_internal(JfrThreadGroupsHelper& helper);
|
||||
static JfrThreadGroup* instance();
|
||||
static void set_instance(JfrThreadGroup* new_instance);
|
||||
static bool create();
|
||||
static void destroy();
|
||||
|
||||
public:
|
||||
~JfrThreadGroup();
|
||||
static void serialize(JfrCheckpointWriter& w);
|
||||
static void serialize(JfrCheckpointWriter* w, traceid thread_group_id);
|
||||
static void serialize(JfrCheckpointWriter& w, traceid tgid, bool is_blob);
|
||||
|
||||
static traceid thread_group_id(JavaThread* thread);
|
||||
static traceid thread_group_id(const JavaThread* thread, Thread* current);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUPMANAGER_HPP
|
||||
@@ -32,7 +32,7 @@
|
||||
#include "gc/shared/gcWhen.hpp"
|
||||
#include "jfr/leakprofiler/leakProfiler.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
@@ -106,7 +106,7 @@ void JfrCheckpointThreadClosure::do_thread(Thread* t) {
|
||||
} else {
|
||||
_writer.write(name);
|
||||
_writer.write(tid);
|
||||
_writer.write(JfrThreadGroup::thread_group_id(JavaThread::cast(t), _curthread));
|
||||
_writer.write(JfrThreadGroupManager::thread_group_id(JavaThread::cast(t), _curthread));
|
||||
}
|
||||
_writer.write<bool>(false); // isVirtual
|
||||
}
|
||||
@@ -115,7 +115,10 @@ void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
|
||||
JfrCheckpointThreadClosure tc(writer);
|
||||
JfrJavaThreadIterator javathreads;
|
||||
while (javathreads.has_next()) {
|
||||
tc.do_thread(javathreads.next());
|
||||
JavaThread* const jt = javathreads.next();
|
||||
if (jt->jfr_thread_local()->should_write()) {
|
||||
tc.do_thread(jt);
|
||||
}
|
||||
}
|
||||
JfrNonJavaThreadIterator nonjavathreads;
|
||||
while (nonjavathreads.has_next()) {
|
||||
@@ -124,7 +127,7 @@ void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
|
||||
}
|
||||
|
||||
void JfrThreadGroupConstant::serialize(JfrCheckpointWriter& writer) {
|
||||
JfrThreadGroup::serialize(writer);
|
||||
JfrThreadGroupManager::serialize(writer);
|
||||
}
|
||||
|
||||
static const char* flag_value_origin_to_string(JVMFlagOrigin origin) {
|
||||
@@ -303,11 +306,11 @@ void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
|
||||
writer.write(JfrThreadId::jfr_id(_thread, _tid));
|
||||
// java thread group - VirtualThread threadgroup reserved id 1
|
||||
const traceid thread_group_id = is_vthread ? 1 :
|
||||
JfrThreadGroup::thread_group_id(JavaThread::cast(_thread), Thread::current());
|
||||
JfrThreadGroupManager::thread_group_id(JavaThread::cast(_thread), Thread::current());
|
||||
writer.write(thread_group_id);
|
||||
writer.write<bool>(is_vthread); // isVirtual
|
||||
if (!is_vthread) {
|
||||
JfrThreadGroup::serialize(&writer, thread_group_id);
|
||||
if (thread_group_id > 1) {
|
||||
JfrThreadGroupManager::serialize(writer, thread_group_id, _to_blob);
|
||||
}
|
||||
// VirtualThread threadgroup already serialized invariant.
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -109,11 +109,12 @@ class JfrThreadConstant : public JfrSerializer {
|
||||
oop _vthread;
|
||||
const char* _name;
|
||||
int _length;
|
||||
const bool _to_blob;
|
||||
void write_name(JfrCheckpointWriter& writer);
|
||||
void write_os_name(JfrCheckpointWriter& writer, bool is_vthread);
|
||||
public:
|
||||
JfrThreadConstant(Thread* t, traceid tid, oop vthread = nullptr) :
|
||||
_thread(t), _tid(tid), _vthread(vthread), _name(nullptr), _length(-1) {}
|
||||
JfrThreadConstant(Thread* t, traceid tid, bool to_blob, oop vthread = nullptr) :
|
||||
_thread(t), _tid(tid), _vthread(vthread), _name(nullptr), _length(-1), _to_blob(to_blob) {}
|
||||
void serialize(JfrCheckpointWriter& writer);
|
||||
};
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ JfrBlobHandle JfrTypeManager::create_thread_blob(JavaThread* jt, traceid tid /*
|
||||
// TYPE_THREAD and count is written unconditionally for blobs, also for vthreads.
|
||||
writer.write_type(TYPE_THREAD);
|
||||
writer.write_count(1);
|
||||
JfrThreadConstant type_thread(jt, tid, vthread);
|
||||
JfrThreadConstant type_thread(jt, tid, true, vthread);
|
||||
type_thread.serialize(writer);
|
||||
return writer.move();
|
||||
}
|
||||
@@ -128,7 +128,7 @@ void JfrTypeManager::write_checkpoint(Thread* t, traceid tid /* 0 */, oop vthrea
|
||||
writer.write_type(TYPE_THREAD);
|
||||
writer.write_count(1);
|
||||
}
|
||||
JfrThreadConstant type_thread(t, tid, vthread);
|
||||
JfrThreadConstant type_thread(t, tid, false, vthread);
|
||||
type_thread.serialize(writer);
|
||||
}
|
||||
|
||||
|
||||
@@ -533,8 +533,9 @@ static void clear_method_tracer_klasses() {
|
||||
static void do_unloading_klass(Klass* klass) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(_subsystem_callback != nullptr, "invariant");
|
||||
if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_scratch_class()) {
|
||||
return;
|
||||
if (!used(klass) && klass->is_instance_klass() && InstanceKlass::cast(klass)->is_scratch_class()) {
|
||||
SET_TRANSIENT(klass);
|
||||
assert(used(klass), "invariant");
|
||||
}
|
||||
if (JfrKlassUnloading::on_unload(klass)) {
|
||||
if (JfrTraceId::has_sticky_bit(klass)) {
|
||||
|
||||
@@ -152,7 +152,7 @@ public:
|
||||
if (!klass->is_instance_klass()) {
|
||||
return false;
|
||||
}
|
||||
return _current_epoch ? METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_PREVIOUS_EPOCH(klass);
|
||||
return _current_epoch ? USED_THIS_EPOCH(klass) : USED_PREVIOUS_EPOCH(klass);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include "memory/allStatic.hpp"
|
||||
|
||||
class ClassLoaderData;
|
||||
class InstanceKlass;
|
||||
class Klass;
|
||||
class Method;
|
||||
class ModuleEntry;
|
||||
@@ -86,7 +87,6 @@ class JfrTraceId : public AllStatic {
|
||||
|
||||
// through load barrier
|
||||
static traceid load(const Klass* klass);
|
||||
static traceid load_previous_epoch(const Klass* klass);
|
||||
static traceid load(jclass jc, bool raw = false);
|
||||
static traceid load(const Method* method);
|
||||
static traceid load(const Klass* klass, const Method* method);
|
||||
@@ -146,10 +146,8 @@ class JfrTraceId : public AllStatic {
|
||||
static void set_sticky_bit(const Method* method);
|
||||
static void clear_sticky_bit(const Klass* k);
|
||||
static void clear_sticky_bit(const Method* method);
|
||||
static bool has_timing_bit(const Klass* k);
|
||||
static void set_timing_bit(const Klass* k);
|
||||
static void clear_timing_bit(const Klass* k);
|
||||
|
||||
static bool has_timing_bit(const InstanceKlass* scratch_klass);
|
||||
static void set_timing_bit(const InstanceKlass* scratch_klass);
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_HPP
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
@@ -81,10 +82,6 @@ inline traceid JfrTraceId::load_leakp_previous_epoch(const Klass* klass, const M
|
||||
return JfrTraceIdLoadBarrier::load_leakp_previous_epoch(klass, method);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceId::load_previous_epoch(const Klass* klass) {
|
||||
return JfrTraceIdLoadBarrier::load_previous_epoch(klass);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline traceid raw_load(const T* t) {
|
||||
assert(t != nullptr, "invariant");
|
||||
@@ -198,6 +195,7 @@ inline void JfrTraceId::set_sticky_bit(const Method* method) {
|
||||
assert(method != nullptr, "invariant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
assert(!has_sticky_bit(method), "invariant");
|
||||
assert(!method->is_old(), "invariant");
|
||||
SET_METHOD_STICKY_BIT(method);
|
||||
assert(has_sticky_bit(method), "invariant");
|
||||
}
|
||||
@@ -205,30 +203,22 @@ inline void JfrTraceId::set_sticky_bit(const Method* method) {
|
||||
inline void JfrTraceId::clear_sticky_bit(const Method* method) {
|
||||
assert(method != nullptr, "invarriant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
assert(!method->is_old(), "invariant");
|
||||
assert(JfrTraceId::has_sticky_bit(method), "invariant");
|
||||
CLEAR_STICKY_BIT_METHOD(method);
|
||||
assert(!JfrTraceId::has_sticky_bit(method), "invariant");
|
||||
}
|
||||
|
||||
inline bool JfrTraceId::has_timing_bit(const Klass* k) {
|
||||
assert(k != nullptr, "invariant");
|
||||
return HAS_TIMING_BIT(k);
|
||||
inline bool JfrTraceId::has_timing_bit(const InstanceKlass* scratch_klass) {
|
||||
assert(scratch_klass != nullptr, "invariant");
|
||||
return HAS_TIMING_BIT(scratch_klass);
|
||||
}
|
||||
|
||||
inline void JfrTraceId::set_timing_bit(const Klass* k) {
|
||||
assert(k != nullptr, "invariant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
assert(!has_timing_bit(k), "invariant");
|
||||
SET_TIMING_BIT(k);
|
||||
assert(has_timing_bit(k), "invariant");
|
||||
}
|
||||
|
||||
inline void JfrTraceId::clear_timing_bit(const Klass* k) {
|
||||
assert(k != nullptr, "invarriant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
assert(JfrTraceId::has_timing_bit(k), "invariant");
|
||||
CLEAR_TIMING_BIT(k);
|
||||
assert(!JfrTraceId::has_timing_bit(k), "invariant");
|
||||
inline void JfrTraceId::set_timing_bit(const InstanceKlass* scratch_klass) {
|
||||
assert(scratch_klass != nullptr, "invariant");
|
||||
assert(!has_timing_bit(scratch_klass), "invariant");
|
||||
SET_TIMING_BIT(scratch_klass);
|
||||
assert(has_timing_bit(scratch_klass), "invariant");
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -78,7 +78,7 @@ inline uint8_t* traceid_meta_byte(const T* ptr) {
|
||||
template <>
|
||||
inline uint8_t* traceid_meta_byte<Method>(const Method* ptr) {
|
||||
assert(ptr != nullptr, "invariant");
|
||||
return ptr->trace_meta_addr();
|
||||
return ptr->trace_flags_meta_addr();
|
||||
}
|
||||
|
||||
inline uint8_t traceid_and(uint8_t bits, uint8_t current) {
|
||||
|
||||
@@ -86,6 +86,27 @@ inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass) {
|
||||
return TRACE_ID(klass);
|
||||
}
|
||||
|
||||
inline const Method* latest_version(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
assert(method->is_old(), "invariant");
|
||||
const InstanceKlass* const ik = InstanceKlass::cast(klass);
|
||||
assert(ik->has_been_redefined(), "invariant");
|
||||
const Method* const latest_version = ik->method_with_orig_idnum(method->orig_method_idnum());
|
||||
if (latest_version == nullptr) {
|
||||
assert(AllowRedefinitionToAddDeleteMethods, "invariant");
|
||||
// method has been removed. Return old version.
|
||||
return method;
|
||||
}
|
||||
assert(latest_version != nullptr, "invariant");
|
||||
assert(latest_version != method, "invariant");
|
||||
assert(!latest_version->is_old(), "invariant");
|
||||
assert(latest_version->orig_method_idnum() == method->orig_method_idnum(), "invariant");
|
||||
assert(latest_version->name() == method->name() && latest_version->signature() == method->signature(), "invariant");
|
||||
return latest_version;
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const Method* method) {
|
||||
return load(method->method_holder(), method);
|
||||
}
|
||||
@@ -93,6 +114,9 @@ inline traceid JfrTraceIdLoadBarrier::load(const Method* method) {
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
if (method->is_old()) {
|
||||
method = latest_version(klass, method);
|
||||
}
|
||||
if (should_tag(method)) {
|
||||
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
|
||||
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
@@ -111,6 +135,9 @@ inline traceid JfrTraceIdLoadBarrier::load_no_enqueue(const Method* method) {
|
||||
inline traceid JfrTraceIdLoadBarrier::load_no_enqueue(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
if (method->is_old()) {
|
||||
method = latest_version(klass, method);
|
||||
}
|
||||
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
|
||||
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
@@ -123,11 +150,12 @@ inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) {
|
||||
if (cld->has_class_mirror_holder()) {
|
||||
return 0;
|
||||
}
|
||||
const traceid id = set_used_and_get(cld);
|
||||
const Klass* const class_loader_klass = cld->class_loader_klass();
|
||||
if (class_loader_klass != nullptr) {
|
||||
load(class_loader_klass);
|
||||
}
|
||||
return set_used_and_get(cld);
|
||||
return id;
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const ModuleEntry* module) {
|
||||
@@ -158,6 +186,7 @@ inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass) {
|
||||
inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
assert(!method->is_old(), "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
if (should_tag(method)) {
|
||||
@@ -175,6 +204,7 @@ inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Metho
|
||||
inline traceid JfrTraceIdLoadBarrier::load_leakp_previous_epoch(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(method != nullptr, "invariant");
|
||||
assert(!method->is_old(), "invariant");
|
||||
assert(klass == method->method_holder(), "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_PREVIOUS_EPOCH(klass), "invariant");
|
||||
if (METHOD_FLAG_NOT_USED_PREVIOUS_EPOCH(method)) {
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
|
||||
#include "jfr/recorder/repository/jfrRepository.hpp"
|
||||
#include "jfr/recorder/service/jfrEventThrottler.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
@@ -311,6 +312,9 @@ bool JfrRecorder::create_components() {
|
||||
if (!create_event_throttler()) {
|
||||
return false;
|
||||
}
|
||||
if (!create_thread_group_manager()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -405,6 +409,10 @@ bool JfrRecorder::create_event_throttler() {
|
||||
return JfrEventThrottler::create();
|
||||
}
|
||||
|
||||
bool JfrRecorder::create_thread_group_manager() {
|
||||
return JfrThreadGroupManager::create();
|
||||
}
|
||||
|
||||
void JfrRecorder::destroy_components() {
|
||||
JfrJvmtiAgent::destroy();
|
||||
if (_post_box != nullptr) {
|
||||
@@ -444,6 +452,7 @@ void JfrRecorder::destroy_components() {
|
||||
_cpu_time_thread_sampling = nullptr;
|
||||
}
|
||||
JfrEventThrottler::destroy();
|
||||
JfrThreadGroupManager::destroy();
|
||||
}
|
||||
|
||||
bool JfrRecorder::create_recorder_thread() {
|
||||
|
||||
@@ -53,6 +53,7 @@ class JfrRecorder : public JfrCHeapObj {
|
||||
static bool create_stacktrace_repository();
|
||||
static bool create_storage();
|
||||
static bool create_stringpool();
|
||||
static bool create_thread_group_manager();
|
||||
static bool create_thread_sampler();
|
||||
static bool create_cpu_time_thread_sampling();
|
||||
static bool create_event_throttler();
|
||||
|
||||
@@ -645,7 +645,7 @@ static void write_thread_local_buffer(JfrChunkWriter& chunkwriter, Thread* t) {
|
||||
|
||||
size_t JfrRecorderService::flush() {
|
||||
size_t total_elements = flush_metadata(_chunkwriter);
|
||||
total_elements = flush_storage(_storage, _chunkwriter);
|
||||
total_elements += flush_storage(_storage, _chunkwriter);
|
||||
if (_string_pool.is_modified()) {
|
||||
total_elements += flush_stringpool(_string_pool, _chunkwriter);
|
||||
}
|
||||
|
||||
@@ -36,14 +36,6 @@
|
||||
#include "utilities/preserveException.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
class JfrRecorderThread : public JavaThread {
|
||||
public:
|
||||
JfrRecorderThread(ThreadFunction entry_point) : JavaThread(entry_point) {}
|
||||
virtual ~JfrRecorderThread() {}
|
||||
|
||||
virtual bool is_JfrRecorder_thread() const { return true; }
|
||||
};
|
||||
|
||||
static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) {
|
||||
assert(thread_oop.not_null(), "invariant");
|
||||
assert(proc != nullptr, "invariant");
|
||||
|
||||
@@ -26,9 +26,9 @@
|
||||
#define SHARE_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class JavaThread;
|
||||
class JfrCheckpointManager;
|
||||
class JfrPostBox;
|
||||
class Thread;
|
||||
@@ -42,4 +42,12 @@ class JfrRecorderThreadEntry : AllStatic {
|
||||
static bool start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS);
|
||||
};
|
||||
|
||||
class JfrRecorderThread : public JavaThread {
|
||||
public:
|
||||
JfrRecorderThread(ThreadFunction entry_point) : JavaThread(entry_point) {}
|
||||
virtual ~JfrRecorderThread() {}
|
||||
|
||||
virtual bool is_JfrRecorder_thread() const { return true; }
|
||||
};
|
||||
|
||||
#endif // SHARE_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
#include "jfr/support/jfrThreadLocal.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
/*
|
||||
* There are two separate repository instances.
|
||||
@@ -186,6 +187,7 @@ void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_threa
|
||||
}
|
||||
|
||||
traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
|
||||
assert(stacktrace.number_of_frames() > 0, "invariant");
|
||||
const size_t index = stacktrace._hash % TABLE_SIZE;
|
||||
|
||||
@@ -390,15 +390,16 @@ static inline void write_stacktraces(JfrChunkWriter& cw) {
|
||||
_resolved_list.iterate(scw);
|
||||
}
|
||||
|
||||
// First, we consolidate all stack trace blobs into a single TYPE_STACKTRACE checkpoint
|
||||
// and serialize it to the chunk. Then, all events are serialized, and unique type set blobs
|
||||
// written into the JfrCheckpoint system to be serialized to the chunk upon return.
|
||||
// First, all events are serialized, and unique type set blobs are written into the
|
||||
// JfrCheckpoint system to be serialized to the chunk upon return.
|
||||
// Then, we consolidate all stack trace blobs into a single TYPE_STACKTRACE checkpoint
|
||||
// and serialize it directly to the chunk.
|
||||
void JfrDeprecationManager::write_edges(JfrChunkWriter& cw, Thread* thread, bool on_error /* false */) {
|
||||
if (_resolved_list.is_nonempty() && JfrEventSetting::is_enabled(JfrDeprecatedInvocationEvent)) {
|
||||
write_events(cw, thread, on_error);
|
||||
if (has_stacktrace()) {
|
||||
write_stacktraces(cw);
|
||||
}
|
||||
write_events(cw, thread, on_error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,8 +40,6 @@
|
||||
#define EVENT_STICKY_BIT 8192
|
||||
#define IS_EVENT_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)) != 0)
|
||||
#define IS_EVENT_OR_HOST_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS | EVENT_HOST_KLASS)) != 0)
|
||||
#define KLASS_HAS_STICKY_BIT(ptr) (((ptr)->trace_id() & STICKY_BIT) != 0)
|
||||
#define ON_KLASS_REDEFINITION(k, t) if (KLASS_HAS_STICKY_BIT(k)) Jfr::on_klass_redefinition(k, t)
|
||||
#define ON_KLASS_CREATION(k, p, t) Jfr::on_klass_creation(k, p, t)
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRKLASSEXTENSION_HPP
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp"
|
||||
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrOopTraceId.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/jfrRecorder.hpp"
|
||||
#include "jfr/recorder/service/jfrOptionSet.hpp"
|
||||
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
|
||||
@@ -45,6 +46,7 @@
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/threadIdentifier.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
#include "utilities/spinYield.hpp"
|
||||
|
||||
JfrThreadLocal::JfrThreadLocal() :
|
||||
_sample_request(),
|
||||
@@ -74,12 +76,14 @@ JfrThreadLocal::JfrThreadLocal() :
|
||||
_wallclock_time(os::javaTimeNanos()),
|
||||
_non_reentrant_nesting(0),
|
||||
_vthread_epoch(0),
|
||||
_generation(0),
|
||||
_vthread_excluded(false),
|
||||
_jvm_thread_excluded(false),
|
||||
_enqueued_requests(false),
|
||||
_vthread(false),
|
||||
_notified(false),
|
||||
_dead(false)
|
||||
_dead(false),
|
||||
_sampling_critical_section(false)
|
||||
#ifdef LINUX
|
||||
,_cpu_timer(nullptr),
|
||||
_cpu_time_jfr_locked(UNLOCKED),
|
||||
@@ -134,17 +138,33 @@ static void send_java_thread_start_event(JavaThread* jt) {
|
||||
}
|
||||
|
||||
void JfrThreadLocal::on_start(Thread* t) {
|
||||
assign_thread_id(t, t->jfr_thread_local());
|
||||
JfrThreadLocal* const tl = t->jfr_thread_local();
|
||||
assert(tl != nullptr, "invariant");
|
||||
assign_thread_id(t, tl);
|
||||
if (JfrRecorder::is_recording()) {
|
||||
JfrCheckpointManager::write_checkpoint(t);
|
||||
if (t->is_Java_thread()) {
|
||||
JavaThread *const jt = JavaThread::cast(t);
|
||||
if (!t->is_Java_thread()) {
|
||||
JfrCheckpointManager::write_checkpoint(t);
|
||||
return;
|
||||
}
|
||||
JavaThread* const jt = JavaThread::cast(t);
|
||||
if (jt->thread_state() == _thread_new) {
|
||||
JfrCPUTimeThreadSampling::on_javathread_create(jt);
|
||||
} else {
|
||||
assert(jt->thread_state() == _thread_in_vm, "invariant");
|
||||
if (tl->should_write()) {
|
||||
JfrCheckpointManager::write_checkpoint(t);
|
||||
}
|
||||
send_java_thread_start_event(jt);
|
||||
if (tl->has_cached_stack_trace()) {
|
||||
tl->clear_cached_stack_trace();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (t->jfr_thread_local()->has_cached_stack_trace()) {
|
||||
t->jfr_thread_local()->clear_cached_stack_trace();
|
||||
if (t->is_Java_thread() && JavaThread::cast(t)->thread_state() == _thread_in_vm) {
|
||||
if (tl->has_cached_stack_trace()) {
|
||||
tl->clear_cached_stack_trace();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,13 +247,18 @@ void JfrThreadLocal::on_exit(Thread* t) {
|
||||
JfrThreadLocal * const tl = t->jfr_thread_local();
|
||||
assert(!tl->is_dead(), "invariant");
|
||||
if (JfrRecorder::is_recording()) {
|
||||
JfrCheckpointManager::write_checkpoint(t);
|
||||
}
|
||||
if (t->is_Java_thread()) {
|
||||
JavaThread* const jt = JavaThread::cast(t);
|
||||
send_java_thread_end_event(jt, JfrThreadLocal::jvm_thread_id(jt));
|
||||
JfrCPUTimeThreadSampling::on_javathread_terminate(jt);
|
||||
JfrThreadCPULoadEvent::send_event_for_thread(jt);
|
||||
if (!t->is_Java_thread()) {
|
||||
JfrCheckpointManager::write_checkpoint(t);
|
||||
} else {
|
||||
JavaThread* const jt = JavaThread::cast(t);
|
||||
assert(jt->thread_state() == _thread_in_vm, "invariant");
|
||||
if (tl->should_write()) {
|
||||
JfrCheckpointManager::write_checkpoint(t);
|
||||
}
|
||||
send_java_thread_end_event(jt, JfrThreadLocal::jvm_thread_id(jt));
|
||||
JfrCPUTimeThreadSampling::on_javathread_terminate(jt);
|
||||
JfrThreadCPULoadEvent::send_event_for_thread(jt);
|
||||
}
|
||||
}
|
||||
release(tl, Thread::current()); // because it could be that Thread::current() != t
|
||||
}
|
||||
@@ -423,6 +448,15 @@ u2 JfrThreadLocal::vthread_epoch(const JavaThread* jt) {
|
||||
return Atomic::load(&jt->jfr_thread_local()->_vthread_epoch);
|
||||
}
|
||||
|
||||
bool JfrThreadLocal::should_write() const {
|
||||
const u2 current_generation = JfrTraceIdEpoch::epoch_generation();
|
||||
if (Atomic::load(&_generation) != current_generation) {
|
||||
Atomic::store(&_generation, current_generation);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
traceid JfrThreadLocal::thread_id(const Thread* t) {
|
||||
assert(t != nullptr, "invariant");
|
||||
if (is_impersonating(t)) {
|
||||
@@ -599,7 +633,10 @@ bool JfrThreadLocal::try_acquire_cpu_time_jfr_dequeue_lock() {
|
||||
}
|
||||
|
||||
void JfrThreadLocal::acquire_cpu_time_jfr_dequeue_lock() {
|
||||
while (Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE) != UNLOCKED);
|
||||
SpinYield s;
|
||||
while (Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE) != UNLOCKED) {
|
||||
s.wait();
|
||||
}
|
||||
}
|
||||
|
||||
void JfrThreadLocal::release_cpu_time_jfr_queue_lock() {
|
||||
|
||||
@@ -75,6 +75,7 @@ class JfrThreadLocal {
|
||||
jlong _wallclock_time;
|
||||
int32_t _non_reentrant_nesting;
|
||||
u2 _vthread_epoch;
|
||||
mutable u2 _generation;
|
||||
bool _vthread_excluded;
|
||||
bool _jvm_thread_excluded;
|
||||
volatile bool _enqueued_requests;
|
||||
@@ -348,6 +349,9 @@ class JfrThreadLocal {
|
||||
return _sampling_critical_section;
|
||||
}
|
||||
|
||||
// Serialization state.
|
||||
bool should_write() const;
|
||||
|
||||
static int32_t make_non_reentrant(Thread* thread);
|
||||
static void make_reentrant(Thread* thread, int32_t previous_nesting);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -95,11 +95,13 @@ class JfrTraceFlag {
|
||||
uint8_t* trace_flags_addr() const { \
|
||||
return _trace_flags.flags_addr(); \
|
||||
} \
|
||||
uint8_t* trace_meta_addr() const { \
|
||||
uint8_t* trace_flags_meta_addr() const { \
|
||||
return _trace_flags.meta_addr(); \
|
||||
} \
|
||||
void copy_trace_flags(uint16_t rhs_flags) const { \
|
||||
_trace_flags.set_flags(_trace_flags.flags() | rhs_flags); \
|
||||
void copy_trace_flags(const Method* rhm) const { \
|
||||
assert(rhm != nullptr, "invariant"); \
|
||||
set_trace_flags(rhm->trace_flags()); \
|
||||
assert(trace_flags()==rhm->trace_flags(), ""); \
|
||||
}
|
||||
|
||||
#endif // SHARE_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP
|
||||
|
||||
@@ -36,16 +36,16 @@ class InstanceKlass;
|
||||
class JfrInstrumentedClass {
|
||||
private:
|
||||
traceid _trace_id;
|
||||
const InstanceKlass* _instance_klass;
|
||||
const InstanceKlass* _ik;
|
||||
bool _unloaded;
|
||||
|
||||
public:
|
||||
JfrInstrumentedClass(traceid trace_id = 0, const InstanceKlass* instance_klass = nullptr, bool unloaded = false) :
|
||||
_trace_id(trace_id), _instance_klass(instance_klass), _unloaded(unloaded) {
|
||||
JfrInstrumentedClass(traceid trace_id = 0, const InstanceKlass* ik = nullptr, bool unloaded = false) :
|
||||
_trace_id(trace_id), _ik(ik), _unloaded(unloaded) {
|
||||
}
|
||||
|
||||
const InstanceKlass* instance_klass() const {
|
||||
return _instance_klass;
|
||||
return _ik;
|
||||
}
|
||||
|
||||
traceid trace_id() const {
|
||||
|
||||
@@ -67,6 +67,8 @@ void JfrMethodProcessor::update_methods(const InstanceKlass* ik) {
|
||||
const uint32_t idx = _methods->at(i).methods_array_index();
|
||||
Method* const method = ik_methods->at(idx);
|
||||
assert(method != nullptr, "invariant");
|
||||
assert(method->name() == _methods->at(i).name(), "invariant");
|
||||
assert(method->signature() == _methods->at(i).signature(), "invariant");
|
||||
_methods->at(i).set_method(method);
|
||||
// This is to keep the method from being unloaded during redefine / retransform.
|
||||
// Equivalent functionality to that provided by the methodHandle. Unfortunately,
|
||||
|
||||
@@ -128,11 +128,11 @@ void JfrMethodTracer::retransform(JNIEnv* env, const JfrFilterClassClosure& clas
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_no_bytecode_result(const Klass* klass) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
if (JfrTraceId::has_sticky_bit(klass)) {
|
||||
static void handle_no_bytecode_result(const InstanceKlass* ik) {
|
||||
assert(ik != nullptr, "invariant");
|
||||
if (JfrTraceId::has_sticky_bit(ik)) {
|
||||
MutexLocker lock(ClassLoaderDataGraph_lock);
|
||||
JfrTraceTagging::clear_sticky_bit(InstanceKlass::cast(klass));
|
||||
JfrTraceTagging::clear_sticky(ik);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,11 +143,11 @@ void JfrMethodTracer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& par
|
||||
ResourceMark rm(THREAD);
|
||||
|
||||
// 1. Is the ik the initial load, i.e.the first InstanceKlass, or a scratch klass, denoting a redefine / retransform?
|
||||
const Klass* const existing_klass = JfrClassTransformer::find_existing_klass(ik, THREAD);
|
||||
const bool is_retransform = existing_klass != nullptr;
|
||||
const InstanceKlass* const existing_ik = JfrClassTransformer::find_existing_klass(ik, THREAD);
|
||||
const bool is_retransform = existing_ik != nullptr;
|
||||
|
||||
// 2. Test the ik and its methods against the currently installed filter object.
|
||||
JfrMethodProcessor mp(is_retransform ? InstanceKlass::cast(existing_klass) : ik, THREAD);
|
||||
JfrMethodProcessor mp(is_retransform ? existing_ik : ik, THREAD);
|
||||
if (!mp.has_methods()) {
|
||||
return;
|
||||
}
|
||||
@@ -159,7 +159,7 @@ void JfrMethodTracer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& par
|
||||
// If no bytecode is returned, either an error occurred during transformation, but more
|
||||
// likely the matched instructions were negative, i.e. instructions to remove existing instrumentation
|
||||
// and so Java added no new instrumentation. By not returning a bytecode result, the klass is restored to its original, non-instrumented, version.
|
||||
handle_no_bytecode_result(is_retransform ? InstanceKlass::cast(existing_klass) : ik);
|
||||
handle_no_bytecode_result(is_retransform ? existing_ik : ik);
|
||||
return;
|
||||
}
|
||||
// 4. Now create a new InstanceKlass representation from the modified bytecode.
|
||||
@@ -173,13 +173,12 @@ void JfrMethodTracer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& par
|
||||
// Keep the original cached class file data from the existing class.
|
||||
JfrClassTransformer::transfer_cached_class_file_data(ik, new_ik, parser, THREAD);
|
||||
JfrClassTransformer::rewrite_klass_pointer(ik, new_ik, parser, THREAD); // The ik is modified to point to new_ik here.
|
||||
const InstanceKlass* const existing_ik = InstanceKlass::cast(existing_klass);
|
||||
mp.update_methods(existing_ik);
|
||||
existing_ik->module()->add_read(jdk_jfr_module());
|
||||
// By setting the sticky bit on the existng klass, we receive a callback into on_klass_redefinition (see below)
|
||||
// when our new methods are installed into the existing klass as part of retransformation / redefinition.
|
||||
// Only when we know our new methods have been installed can we add the klass to the instrumented list (done as part of callback).
|
||||
JfrTraceTagging::install_sticky_bit_for_retransform_klass(existing_ik, mp.methods(), mp.has_timing());
|
||||
JfrTraceTagging::tag_sticky_for_retransform_klass(existing_ik, ik, mp.methods(), mp.has_timing());
|
||||
return;
|
||||
}
|
||||
// Initial class load.
|
||||
@@ -203,28 +202,22 @@ static inline void log_add(const InstanceKlass* ik) {
|
||||
}
|
||||
}
|
||||
|
||||
void JfrMethodTracer::add_timing_entry(const InstanceKlass* ik, traceid klass_id) {
|
||||
assert(ik != nullptr, "invariant");
|
||||
void JfrMethodTracer::add_timing_entry(traceid klass_id) {
|
||||
assert(_timing_entries != nullptr, "invariant");
|
||||
if (JfrTraceId::has_timing_bit(ik)) {
|
||||
JfrTraceId::clear_timing_bit(ik);
|
||||
_timing_entries->append(klass_id);
|
||||
}
|
||||
_timing_entries->append(klass_id);
|
||||
}
|
||||
|
||||
// At this point we have installed our new retransformed methods into the original klass, which is ik.
|
||||
// jvmtiRedefineClassses::redefine_single_class() has finished so we are still at a safepoint.
|
||||
// If the original klass is not already in the list, add it and also dynamically tag all
|
||||
// artifacts that have the sticky bit set. If the klass has an associated TimedClass,
|
||||
// also add the klass to the list of _timing_entries for publication.
|
||||
void JfrMethodTracer::on_klass_redefinition(const InstanceKlass* ik, Thread* thread) {
|
||||
// jvmtiRedefineClassses::redefine_single_class() is about to finish so we are still at a safepoint.
|
||||
// If the original klass is not already in the list, add it. If the klass has an associated TimedClass,
|
||||
// add also the klass_id to the list of _timing_entries for publication.
|
||||
void JfrMethodTracer::on_klass_redefinition(const InstanceKlass* ik, bool has_timing) {
|
||||
assert(ik != nullptr, "invariant");
|
||||
assert(!ik->is_scratch_class(), "invarint");
|
||||
assert(ik->has_been_redefined(), "invariant");
|
||||
assert(JfrTraceId::has_sticky_bit(ik), "invariant");
|
||||
assert(in_use(), "invariant");
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
|
||||
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
|
||||
|
||||
const traceid klass_id = JfrTraceId::load_raw(ik);
|
||||
const JfrInstrumentedClass jic(klass_id, ik, false);
|
||||
@@ -235,8 +228,9 @@ void JfrMethodTracer::on_klass_redefinition(const InstanceKlass* ik, Thread* thr
|
||||
assert(!JfrTraceIdEpoch::has_method_tracer_changed_tag_state(), "invariant");
|
||||
JfrTraceIdEpoch::set_method_tracer_tag_state();
|
||||
}
|
||||
add_timing_entry(ik, klass_id);
|
||||
JfrTraceTagging::set_dynamic_tag_for_sticky_bit(ik);
|
||||
if (has_timing) {
|
||||
add_timing_entry(klass_id);
|
||||
}
|
||||
log_add(ik);
|
||||
}
|
||||
}
|
||||
@@ -258,8 +252,7 @@ void JfrMethodTracer::add_instrumented_class(InstanceKlass* ik, GrowableArray<Jf
|
||||
ik->module()->add_read(jdk_jfr_module());
|
||||
MutexLocker lock(ClassLoaderDataGraph_lock);
|
||||
assert(!in_instrumented_list(ik, instrumented_classes()), "invariant");
|
||||
JfrTraceTagging::set_dynamic_tag(ik, methods);
|
||||
JfrTraceTagging::set_sticky_bit(ik, methods);
|
||||
JfrTraceTagging::tag_sticky(ik, methods);
|
||||
const JfrInstrumentedClass jik(JfrTraceId::load_raw(ik), ik, false);
|
||||
const int idx = instrumented_classes()->append(jik);
|
||||
if (idx == 0) {
|
||||
|
||||
@@ -51,7 +51,7 @@ class JfrMethodTracer: AllStatic {
|
||||
static GrowableArray<jlong>* _timing_entries; // Guarded by ClassLoaderDataGraph_lock
|
||||
|
||||
static ModuleEntry* jdk_jfr_module();
|
||||
static void add_timing_entry(const InstanceKlass* ik, traceid klass_id);
|
||||
static void add_timing_entry(traceid klass_id);
|
||||
static void retransform(JNIEnv* env, const JfrFilterClassClosure& classes, TRAPS);
|
||||
static void add_instrumented_class(InstanceKlass* ik, GrowableArray<JfrTracedMethod>* methods);
|
||||
|
||||
@@ -61,7 +61,7 @@ class JfrMethodTracer: AllStatic {
|
||||
static void add_to_unloaded_set(const Klass* k);
|
||||
static void trim_instrumented_classes(bool trim);
|
||||
static GrowableArray<JfrInstrumentedClass>* instrumented_classes();
|
||||
static void on_klass_redefinition(const InstanceKlass* ik, Thread* thread);
|
||||
static void on_klass_redefinition(const InstanceKlass* ik, bool has_timing);
|
||||
static void on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS);
|
||||
static jlongArray set_filters(JNIEnv* env,
|
||||
jobjectArray classes,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user