Compare commits

...

156 Commits

Author SHA1 Message Date
Rob McKenna
78770bfaef 8367031: [backout] Change java.time month/day field types to 'byte'
Backport-of: 00be643fa3eff6fd66d39f5f5ea70ff347296318
2025-09-25 16:16:56 +00:00
Nibedita Jena
5100536d49 8368308: ISO 4217 Amendment 180 Update
Backport-of: 3f9c665586705c833674ae998f49cabbc7e15615
2025-09-25 14:42:40 +00:00
Nibedita Jena
415f2adffb 8366223: ZGC: ZPageAllocator::cleanup_failed_commit_multi_partition is broken
Backport-of: 009612805f79e37d9ce4e3f5c90627b635b095cf
2025-09-02 11:16:22 +00:00
Nibedita Jena
f92ad752ab Merge 2025-08-19 10:27:06 +00:00
Matias Saavedra Silva
4f265785a9 8352637: Enhance bytecode verification
Reviewed-by: dlong
Backport-of: d9bf0c2ca2d52d783a8122504cac9566d42b22df
2025-08-13 16:03:34 +00:00
Prasanta Sadhukhan
6c48f4ed70 8348760: RadioButton is not shown if JRadioButtonMenuItem is rendered with ImageIcon in WindowsLookAndFeel
Reviewed-by: prr, aivanov
Backport-of: e29346dbd6
2025-08-12 17:15:36 +00:00
Nibedita Jena
c5d85e09e1 8360647: [XWayland] [OL10] NumPad keys are not triggered
Backport-of: 4d5fb6eb8bb66556f06dada72df531d537cf32c2
2025-08-12 11:47:25 +00:00
Ravi Reddy
408ae8637c 8356294: Enhance Path Factories
Backport-of: 5835cefe4946524af3be4933b20cd1b0005b0ad0
2025-08-11 12:57:00 +00:00
Nibedita Jena
1962c746dc Merge
Reviewed-by: rreddy
2025-08-06 15:52:35 +00:00
Nibedita Jena
c02fce22ed 8361212: Remove AffirmTrust root CAs
Backport-of: e58859e8acc19bfd8aaa80e98534651e83850a97
2025-08-06 08:43:16 +00:00
Jesper Wilhelmsson
73c28c2e3d 8364038: Remove EA from the JDK 25 version string with first RC promotion
Reviewed-by: mikael, erikj, iris, cstein
2025-08-05 23:17:32 +00:00
Markus Grönlund
1e2bf070f0 8364258: ThreadGroup constant pool serialization is not normalized
Reviewed-by: egahlin
Backport-of: 3bc449797e
2025-08-04 14:52:19 +00:00
Manuel Hässig
24936b9295 8364409: [BACKOUT] Consolidate Identity of self-inverse operations
Reviewed-by: chagedorn, bmaillard
Backport-of: ddb64836e5
2025-08-04 08:39:05 +00:00
Justin Lu
b5bec8db3f 8364370: java.text.DecimalFormat specification indentation correction
Reviewed-by: liach, naoto
Backport-of: 8e921aee5a
2025-08-01 21:25:52 +00:00
Justin Lu
9bdf9ebadd 8360416: Incorrect l10n test case in sun/security/tools/keytool/i18n.java
Reviewed-by: hchao
Backport-of: 5540a7859b
2025-08-01 16:09:52 +00:00
Nibedita Jena
99f80700d7 8356587: Missing object ID X in pool jdk.types.Method
Backport-of: 9fe2aa59ff
2025-07-31 11:24:44 +00:00
Nibedita Jena
d30e89c381 8360679: Shenandoah: AOT saved adapter calls into broken GC barrier stub
Backport-of: 033a121c96
2025-07-31 11:05:30 +00:00
Nibedita Jena
1d92cd3517 8362882: Update SubmissionPublisher() specification to reflect use of ForkJoinPool.asyncCommonPool()
Backport-of: 3db8262445
2025-07-31 11:05:12 +00:00
Nibedita Jena
6fcaf66539 8315131: Clarify VarHandle set/get access on 32-bit platforms
Backport-of: 1867effcc0
2025-07-31 11:00:02 +00:00
Jiangli Zhou
7b69679175 8362564: hotspot/jtreg/compiler/c2/TestLWLockingCodeGen.java fails on static JDK on x86_64 with AVX instruction extensions
Reviewed-by: kvn
Backport-of: c239c0ab00
2025-07-30 23:19:40 +00:00
Alisen Chung
bf31e50754 8364089: JDK 25 RDP2 L10n resource files update
Reviewed-by: jlu, naoto
Backport-of: c671089d6e
2025-07-30 22:52:03 +00:00
Markus Grönlund
9fe2aa59ff 8356587: Missing object ID X in pool jdk.types.Method
Reviewed-by: egahlin
Backport-of: a34994476e
2025-07-29 11:40:55 +00:00
Alan Bateman
3db8262445 8362882: Update SubmissionPublisher() specification to reflect use of ForkJoinPool.asyncCommonPool()
Reviewed-by: jpai
Backport-of: f79bd54bbb
2025-07-25 11:21:52 +00:00
Aleksey Shipilev
033a121c96 8360679: Shenandoah: AOT saved adapter calls into broken GC barrier stub
Reviewed-by: kvn
Backport-of: 8477630970
2025-07-25 08:09:02 +00:00
Chen Liang
1867effcc0 8315131: Clarify VarHandle set/get access on 32-bit platforms
Reviewed-by: rriggs, iris
Backport-of: ea6674fec8
2025-07-24 17:49:18 +00:00
pavel_kharskii
3eee56e456 8362109: Change milestone to fcs for all releases
Reviewed-by: coffeys, mvs
2025-07-22 12:14:38 +00:00
Nibedita Jena
5dab0808b1 Merge 2025-07-22 05:13:25 +00:00
Rob McKenna
987af5af16 Merge 2025-07-21 17:45:35 +00:00
Thomas Schatzl
e8f2cd8f3d 8347052: Update java man page documentation to reflect current state of the UseNUMA flag
Reviewed-by: ayang
Backport-of: ea774b74e8
2025-07-18 11:36:08 +00:00
SendaoYan
e599ee4a88 8361827: [TESTBUG] serviceability/HeapDump/UnmountedVThreadNativeMethodAtTop.java throws OutOfMemoryError
Reviewed-by: rrich, lmesnik
Backport-of: cbb3d23e19
2025-07-18 06:08:46 +00:00
David Holmes
3a8e9dfe85 8362565: ProblemList jdk/jfr/event/io/TestIOTopFrame.java
Reviewed-by: egahlin
Backport-of: 04c0b130f0
2025-07-18 02:39:10 +00:00
William Kemper
347084bfbd 8360288: Shenandoah crash at size_given_klass in op_degenerated
Reviewed-by: shade
Backport-of: 3b44d7bfa4
2025-07-17 16:50:07 +00:00
SendaoYan
5cc7a31b3f 8361869: Tests which call ThreadController should mark as /native
Reviewed-by: jpai
Backport-of: 3bacf7ea85
2025-07-17 12:50:53 +00:00
SendaoYan
f1f6452e01 8358004: Delete applications/scimark/Scimark.java test
Reviewed-by: coleenp
Backport-of: a5c9bc7032
2025-07-17 12:41:34 +00:00
Erik Gahlin
331adac38e 8361639: JFR: Incorrect top frame for I/O events
Reviewed-by: mgronlun
Backport-of: 1a6cbe421f
2025-07-17 12:20:22 +00:00
Brian Burkhalter
e989c1d138 8362429: AssertionError in File.listFiles(FileFilter | FilenameFilter)
Reviewed-by: alanb
Backport-of: be0161a8e6
2025-07-17 06:57:58 +00:00
Boris Ulasevich
5129887dfe 8362250: ARM32: forward_exception_entry missing return address
Reviewed-by: shade
Backport-of: 6ed81641b1
2025-07-17 01:29:57 +00:00
Brian Burkhalter
69ea85ee12 8361587: AssertionError in File.listFiles() when path is empty and -esa is enabled
Reviewed-by: alanb
Backport-of: eefbfdce31
2025-07-16 15:35:50 +00:00
Erik Gahlin
93260d639e 8361640: JFR: RandomAccessFile::readLine emits events for each character
Reviewed-by: mgronlun, alanb
Backport-of: 9bef2d1610
2025-07-16 15:35:30 +00:00
Tobias Hartmann
b67fb82a03 8362171: C2 fails with unexpected node in SuperWord truncation: ModI
Reviewed-by: chagedorn
Backport-of: 70c1ff7e15
2025-07-16 14:51:08 +00:00
Johannes Bechberger
a626c1d92c 8358619: Fix interval recomputation in CPU Time Profiler
Reviewed-by: jbachorik
Backport-of: c70258ca1c
2025-07-16 10:13:57 +00:00
Johannes Bechberger
533211af73 8358621: Reduce busy waiting in worse case at the synchronization point returning from native in CPU Time Profiler
Reviewed-by: shade
Backport-of: d2082c58ff
2025-07-16 07:31:23 +00:00
Erik Gahlin
07bb0e3e2f 8362097: JFR: Active Settings view broken
Reviewed-by: mgronlun
Backport-of: 25e509b0db
2025-07-16 06:56:09 +00:00
Tobias Hartmann
60196a6b6f 8361952: Installation of MethodData::extra_data_lock() misses synchronization on reader side
Reviewed-by: chagedorn
Backport-of: 272e66d017
2025-07-16 06:33:47 +00:00
Brent Christian
0e6bf00550 Merge
Reviewed-by: jpai
2025-07-16 03:57:34 +00:00
Calvin Cheung
e1926a6d0e 8361328: cds/appcds/dynamicArchive/TestAutoCreateSharedArchive.java archive timestamps comparison failed
Reviewed-by: matsaave, iklam
Backport-of: 4a351e3e57
2025-07-15 21:46:00 +00:00
David Holmes
03a67a969b 8356942: invokeinterface Throws AbstractMethodError Instead of IncompatibleClassChangeError
Reviewed-by: iklam, coleenp
Backport-of: f36147b326
2025-07-15 20:56:47 +00:00
Chris Plummer
cf92877aa5 8361905: Problem list serviceability/sa/ClhsdbThreadContext.java on Windows due to JDK-8356704
Reviewed-by: sspitsyn
Backport-of: f7e8d255cc
2025-07-15 18:29:32 +00:00
Nibedita Jena
05bf5e3a50 Merge 2025-07-15 13:44:52 +00:00
Nibedita Jena
cc2cf97834 8360937: Enhance certificate handling
Reviewed-by: mullan
Backport-of: f2fba5a55176ca82985ca42996cef36be7b7500a
2025-07-15 13:39:47 +00:00
Phil Race
121f5a72e4 8360147: Better Glyph drawing redux
Reviewed-by: rhalade, ahgross, psadhukhan, jdv
2025-07-15 19:00:48 +05:30
Phil Race
52e1e739af 8355884: [macos] java/awt/Frame/I18NTitle.java fails on MacOS
Reviewed-by: kcr, dmarkov, aivanov, honkar, kizune
2025-07-15 19:00:48 +05:30
Darragh Clarke
5ae719c8fc 8350991: Improve HTTP client header handling
Reviewed-by: rhalade, dfuchs, michaelm
2025-07-15 19:00:47 +05:30
Kevin Driver
3ec6eb6482 8349594: Enhance TLS protocol support
Reviewed-by: rhalade, ahgross, wetmore, jnimeh
2025-07-15 19:00:47 +05:30
Christian Hagedorn
fae2345971 8349584: Improve compiler processing
Reviewed-by: rhalade, ahgross, epeter, thartmann
2025-07-15 19:00:47 +05:30
Prasanta Sadhukhan
6e490a465a 8349111: Enhance Swing supports
Reviewed-by: rhalade, jdv, prr
2025-07-15 19:00:47 +05:30
Phil Race
2555b5a632 8348989: Better Glyph drawing
Reviewed-by: mschoene, psadhukhan, jdv, rhalade
2025-07-15 19:00:47 +05:30
Volkan Yazici
caac8172ad 8349551: Failures in tests after JDK-8345625
Reviewed-by: jpai, dfuchs
2025-07-15 19:00:47 +05:30
Volkan Yazici
d1ea951d39 8345625: Better HTTP connections
Reviewed-by: skoivu, rhalade, ahgross, dfuchs, jpai, aefimov
2025-07-15 19:00:47 +05:30
Tobias Hartmann
7aa3f31724 8359678: C2: assert(static_cast<T1>(result) == thing) caused by ReverseBytesNode::Value()
Reviewed-by: chagedorn
Backport-of: e5ab210713
2025-07-15 11:35:53 +00:00
Richard Reingruber
ce85123f3a 8361602: [TESTBUG] serviceability/HeapDump/UnmountedVThreadNativeMethodAtTop.java deadlocks on exception
Reviewed-by: clanger, cjplummer
Backport-of: 917d0182cb
2025-07-15 08:02:44 +00:00
William Kemper
20fc8f74d5 8361529: GenShen: Fix bad assert in swap card tables
Reviewed-by: shade
Backport-of: 1de2acea77
2025-07-14 16:50:47 +00:00
Taizo Kurashige
db6230991b 8358819: The first year is not displayed correctly in Japanese Calendar
Backport-of: 99829950f6
2025-07-14 15:26:25 +00:00
Tobias Hartmann
dd82a0922b 8350177: C2 SuperWord: Integer.numberOfLeadingZeros, numberOfTrailingZeros, reverse and bitCount have input types wrongly truncated for byte and short
Reviewed-by: chagedorn
Backport-of: 77bd417c99
2025-07-14 07:31:27 +00:00
Srinivas Vamsi Parasa
9f21845262 8360775: Fix Shenandoah GC test failures when APX is enabled
Reviewed-by: shade, sviswanathan, jbhateja
Backport-of: 1c560727b8
2025-07-14 02:55:02 +00:00
Srinivas Vamsi Parasa
c5d0f1bc5e 8360776: Disable Intel APX by default and enable it with -XX:+UnlockExperimentalVMOptions -XX:+UseAPX in all builds
Reviewed-by: kvn, sviswanathan
Backport-of: 26b002805a
2025-07-12 21:34:48 +00:00
Chen Liang
c374ac6df4 8361615: CodeBuilder::parameterSlot throws undocumented IOOBE
Reviewed-by: asotona
Backport-of: c9bea77342
2025-07-11 22:52:41 +00:00
Dingli Zhang
98bc22a969 8361829: [TESTBUG] RISC-V: compiler/vectorization/runner/BasicIntOpTest.java fails with RVV but not Zvbb
Backport-of: 2e7e272d7b
2025-07-11 20:59:26 +00:00
Dingli Zhang
05dab283f2 8361532: RISC-V: Several vector tests fail after JDK-8354383
Backport-of: e0245682c8
2025-07-11 20:59:07 +00:00
Rob McKenna
8229274b2d Merge 2025-07-11 19:16:38 +00:00
Boris Ulasevich
44f5dfef97 8358183: [JVMCI] crash accessing nmethod::jvmci_name in CodeCache::aggregate
Reviewed-by: thartmann
Backport-of: 74822ce12a
2025-07-11 11:59:32 +00:00
David Holmes
9adc480ec3 8361447: [REDO] Checked version of JNI Release<type>ArrayElements needs to filter out known wrapped arrays
8361754: New test runtime/jni/checked/TestCharArrayReleasing.java can cause disk full errors

Reviewed-by: coleenp
Backport-of: f67e435431
2025-07-11 00:21:36 +00:00
William Kemper
4d5211ccb0 8357976: GenShen crash in swap_card_tables: Should be clean
Reviewed-by: shade
Backport-of: 382f870cd5
2025-07-10 22:26:14 +00:00
Vladimir Kozlov
e92f387ab5 8360942: [ubsan] aotCache tests trigger runtime error: applying non-zero offset 16 to null pointer in CodeBlob::relocation_end()
Reviewed-by: shade, thartmann
Backport-of: dedcce0450
2025-07-10 17:04:29 +00:00
Chris Plummer
96380509b3 8360312: Serviceability Agent tests fail with JFR enabled due to unknown thread type JfrRecorderThread
Reviewed-by: kevinw, sspitsyn
Backport-of: 712d866b72
2025-07-10 15:43:11 +00:00
Brian Burkhalter
9b99ed8b39 8361299: (bf) CharBuffer.getChars(int,int,char[],int) violates pre-existing specification
Reviewed-by: liach, alanb
Backport-of: 6249259c80
2025-07-10 15:14:31 +00:00
Richard Reingruber
532b1c732e 8360599: [TESTBUG] DumpThreadsWithEliminatedLock.java fails because of unstable inlining
Reviewed-by: mdoerr, kevinw
Backport-of: fea73c1d40
2025-07-10 07:34:40 +00:00
Erik Gahlin
1de8943731 8361175: JFR: Document differences between method sample events
Reviewed-by: mgronlun
Backport-of: 63e08d4af7
2025-07-09 15:32:57 +00:00
Jan Lahoda
50751da562 8361570: Incorrect 'sealed is not allowed here' compile-time error
Reviewed-by: liach, vromero
Backport-of: 853319439e
2025-07-09 13:41:05 +00:00
Nibedita Jena
83d69cab8b Merge
Reviewed-by: rreddy
2025-07-09 05:29:11 +00:00
Jan Lahoda
21cb2acda0 8361445: javac crashes on unresolvable constant in @SuppressWarnings
Reviewed-by: liach, asotona
Backport-of: 0bd2f9cba2
2025-07-09 05:07:20 +00:00
Vicente Romero
0e4422b284 8361214: An anonymous class is erroneously being classify as an abstract class
Reviewed-by: liach
Backport-of: 05c9eec8d0
2025-07-08 21:13:43 +00:00
Ioi Lam
1e985168d6 8358680: AOT cache creation fails: no strings should have been added
Reviewed-by: shade, kvn
Backport-of: 3daa03c30f
2025-07-08 19:02:36 +00:00
Ioi Lam
b8965318c1 8360164: AOT cache creation crashes in ~ThreadTotalCPUTimeClosure()
Reviewed-by: shade, kvn
Backport-of: 7d7e60c8ae
2025-07-08 17:36:10 +00:00
Ioi Lam
afe6bd6910 8336147: Clarify CDS documentation about static vs dynamic archive
Reviewed-by: shade
Backport-of: 854de8c9c6
2025-07-08 17:34:39 +00:00
Matthias Baesken
5500a2d134 8357826: Avoid running some jtreg tests when asan is configured
Backport-of: d7aa349820
2025-07-08 15:12:25 +00:00
Erik Gahlin
b3b5595362 8361338: JFR: Min and max time in MethodTime event is confusing
Reviewed-by: shade
Backport-of: f3e0588d0b
2025-07-08 14:03:56 +00:00
Jan Lahoda
5e716fd7d1 8359596: Behavior change when both -Xlint:options and -Xlint:-options flags are given
Reviewed-by: liach
Backport-of: 3525a40f39
2025-07-08 07:16:25 +00:00
Roger Riggs
3e93b98baf 8354872: Clarify java.lang.Process resource cleanup
Reviewed-by: iris
Backport-of: afb4a1be9e
2025-07-07 22:18:03 +00:00
Kieran Farrell
1ce41821b5 8359454: Enhance String handling
Backport-of: 2f2665738a67aeed224b54870608a346eb627d2a
2025-07-07 18:46:07 +00:00
Ian Myers
829742bcb4 8358577: Test serviceability/jvmti/thread/GetCurrentContendedMonitor/contmon01/contmon01.java failed: unexpexcted monitor object
Backport-of: 8f487d26c0
2025-07-07 16:24:14 +00:00
Manukumar V S
9a73987f9b 8359889: java/awt/MenuItem/SetLabelTest.java inadvertently triggers clicks on items pinned to the taskbar
Reviewed-by: abhiscxk, aivanov
Backport-of: b7fcd0b235
2025-07-07 13:14:30 +00:00
Matthias Baesken
622c743470 8360533: ContainerRuntimeVersionTestUtils fromVersionString fails with some docker versions
Backport-of: 97ec9d3e0a
2025-07-07 07:16:40 +00:00
Erik Gahlin
8707167ef3 8358750: JFR: EventInstrumentation MASK_THROTTLE* constants should be computed in longs
Reviewed-by: mgronlun
Backport-of: 77e69e02eb
2025-07-04 15:07:32 +00:00
Erik Gahlin
e3bd9c6e1c 8360287: JFR: PlatformTracer class should be loaded lazily
Reviewed-by: mgronlun
Backport-of: 8ea544c33f
2025-07-03 18:34:38 +00:00
Martin Doerr
993215f3dd 8361259: JDK25: Backout JDK-8258229
Reviewed-by: mhaessig, thartmann, dlong
2025-07-03 08:52:23 +00:00
Martin Doerr
8a98738f44 8361183: JDK-8360887 needs fixes to avoid cycles and better tests (aix)
Reviewed-by: mbaesken
Backport-of: c460f842bf
2025-07-03 08:46:22 +00:00
Ashutosh Mehra
ab01396209 8361101: AOTCodeAddressTable::_stubs_addr not initialized/freed properly
Reviewed-by: shade
Backport-of: 3066a67e62
2025-07-02 17:49:52 +00:00
Kevin Walls
92268e17be 8359870: JVM crashes in AccessInternal::PostRuntimeDispatch
Reviewed-by: alanb, sspitsyn
Backport-of: 13a3927855
2025-07-02 16:59:29 +00:00
Renjith Kannath Pariyangad
94b6b99ba4 8358452: JNI exception pending in Java_sun_awt_screencast_ScreencastHelper_remoteDesktopKeyImpl of screencast_pipewire.c:1214 (ID: 51119)
Backport-of: 2103dc15cb
2025-07-02 16:44:01 +00:00
Martin Doerr
a98a5e54fc 8360887: (fs) Files.getFileAttributeView returns unusable FileAttributeView if UserDefinedFileAttributeView unavailable (aix)
Reviewed-by: mbaesken
Backport-of: 0572b6ece7
2025-07-02 15:34:12 +00:00
Aleksey Shipilev
b245c517e3 8359436: AOTCompileEagerly should not be diagnostic
Reviewed-by: kvn
Backport-of: e138297323
2025-07-02 11:52:28 +00:00
Tobias Hartmann
0a151c68d6 8358179: Performance regression in Math.cbrt
Reviewed-by: epeter
Backport-of: 38f59f84c9
2025-07-02 08:23:19 +00:00
Jaikiran Pai
554e38dd5a 8359337: XML/JAXP tests that make network connections should ensure that no proxy is selected
Reviewed-by: dfuchs, iris, joehw
Backport-of: 7583a7b857
2025-07-02 01:36:10 +00:00
Fei Yang
26d99e045a 8359270: C2: alignment check should consider base offset when emitting arraycopy runtime call
Backport-of: 6b4393917a
2025-07-01 00:49:45 +00:00
Archie Cobbs
16addb192b 8359596: Behavior change when both -Xlint:options and -Xlint:-options flags are given
Backport-of: 3525a40f39
2025-06-30 16:54:20 +00:00
Aleksey Shipilev
b5b0b3a33a 8360201: JFR: Initialize JfrThreadLocal::_sampling_critical_section
Reviewed-by: zgu
Backport-of: 5c1f77fab1
2025-06-30 13:28:03 +00:00
David Holmes
0dc9e8447b 8358645: Access violation in ThreadsSMRSupport::print_info_on during thread dump
Reviewed-by: shade, dcubed
Backport-of: 334683e634
2025-06-30 01:06:46 +00:00
Alisen Chung
12ffb0c131 8359761: JDK 25 RDP1 L10n resource files update
Reviewed-by: jlu, aivanov
Backport-of: da7080fffb
2025-06-27 19:28:15 +00:00
Roland Westrelin
eaaaae5be9 8356708: C2: loop strip mining expansion doesn't take sunk stores into account
Reviewed-by: thartmann, epeter
Backport-of: c11f36e620
2025-06-27 16:27:33 +00:00
Jaikiran Pai
926c900efa 8359830: Incorrect os.version reported on macOS Tahoe 26 (Beta)
Reviewed-by: rriggs
Backport-of: 8df6b2c4a3
2025-06-27 02:18:57 +00:00
Roman Kennke
658f80e392 8355319: Update Manpage for Compact Object Headers (Production)
Reviewed-by: coleenp
Backport-of: 75ce44aa84
2025-06-26 12:32:36 +00:00
Martin Doerr
274a2dd729 8360405: [PPC64] some environments don't support mfdscr instruction
Reviewed-by: haosun, rrich
Backport-of: f71d64fbeb
2025-06-26 09:14:18 +00:00
Michael McMahon
a84946dde4 8359268: 3 JNI exception pending defect groups in 2 files
Reviewed-by: dfuchs, djelinski
Backport-of: 1fa090524a
2025-06-25 16:17:18 +00:00
Igor Veresov
fdb3e37c71 8359788: Internal Error: assert(get_instanceKlass()->is_loaded()) failed: must be at least loaded
Reviewed-by: shade
Backport-of: 5c4f92ba9a
2025-06-25 16:12:45 +00:00
Fei Yang
e23c817521 8360179: RISC-V: Only enable BigInteger intrinsics when AvoidUnalignedAccess == false
Backport-of: 34412da52b
2025-06-25 11:16:01 +00:00
Ravi Reddy
0ad5402463 8359059: Bump version numbers for 25.0.1
Reviewed-by: erikj
Backport-of: bff98e7d4d
2025-06-25 09:59:47 +00:00
Hannes Wallnöfer
80cb773b7e 8328848: Inaccuracy in the documentation of the -group option
Reviewed-by: liach
Backport-of: f8de5bc582
2025-06-25 05:40:18 +00:00
Hannes Wallnöfer
a576952039 8359024: Accessibility bugs in API documentation
Reviewed-by: liach
Backport-of: 9a726df373
2025-06-25 05:36:31 +00:00
Anthony Scarpino
b89f364842 8358099: PEM spec updates
Reviewed-by: weijun
Backport-of: 78158f30ae
2025-06-24 19:32:07 +00:00
Coleen Phillimore
0694cc1d52 8352075: Perf regression accessing fields
Reviewed-by: shade, iklam
Backport-of: e18277b470
2025-06-24 17:10:28 +00:00
Markus Grönlund
a3abaadc15 8360403: Disable constant pool ID assert during troubleshooting
Reviewed-by: egahlin
Backport-of: cbcf401170
2025-06-24 16:49:43 +00:00
Aleksey Shipilev
7cc1f82b84 8360042: GHA: Bump MSVC to 14.44
Reviewed-by: serb
Backport-of: 72679c94ee
2025-06-24 05:48:20 +00:00
William Kemper
636b56374e 8357550: GenShen crashes during freeze: assert(!chunk->requires_barriers()) failed
Reviewed-by: shade
Backport-of: 17cf49746d
2025-06-23 21:03:04 +00:00
Phil Race
fe9efb75b0 8358526: Clarify behavior of java.awt.HeadlessException constructed with no-args
Reviewed-by: honkar, tr, azvegint
Backport-of: 81985d422d
2025-06-23 17:05:48 +00:00
Erik Gahlin
ca6b165003 8359895: JFR: method-timing view doesn't work
Reviewed-by: mgronlun
Backport-of: 984d7f9cdf
2025-06-23 13:09:03 +00:00
Erik Gahlin
d5aa225451 8359242: JFR: Missing help text for method trace and timing
Reviewed-by: mgronlun
Backport-of: e57a214e2a
2025-06-23 12:22:30 +00:00
Matthias Bläsing
79a85df074 8353950: Clipboard interaction on Windows is unstable
8332271: Reading data from the clipboard from multiple threads crashes the JVM

Reviewed-by: prr
Backport-of: 92be7821f5
2025-06-20 21:49:26 +00:00
Jaikiran Pai
41928aed7d 8359709: java.net.HttpURLConnection sends unexpected "Host" request header in some cases after JDK-8344190
Reviewed-by: dfuchs
Backport-of: 57266064a7
2025-06-20 09:47:26 +00:00
Tobias Hartmann
3f6b0c69c3 8359386: Fix incorrect value for max_size of C2CodeStub when APX is used
Reviewed-by: mhaessig, epeter
Backport-of: b52af182c4
2025-06-20 08:29:10 +00:00
SendaoYan
36b185a930 8359402: Test CloseDescriptors.java should throw SkippedException when there is no lsof/sctp
Reviewed-by: jpai
Backport-of: a16d23557b
2025-06-20 06:26:52 +00:00
Erik Gahlin
c832f001e4 8359593: JFR: Instrumentation of java.lang.String corrupts recording
Reviewed-by: mgronlun
Backport-of: 2f2acb2e3f
2025-06-19 14:19:16 +00:00
Vladimir Kozlov
e5ac75a35b 8359646: C1 crash in AOTCodeAddressTable::add_C_string
Reviewed-by: shade, thartmann
Backport-of: 96070212ad
2025-06-19 13:41:06 +00:00
Erik Gahlin
b79ca5f03b 8359248: JFR: Help text for-XX:StartFlightRecording:report-on-exit should explain option can be repeated
Reviewed-by: mgronlun
Backport-of: fedd0a0ee3
2025-06-19 12:56:19 +00:00
Fei Yang
ee45ba9138 8359218: RISC-V: Only enable CRC32 intrinsic when AvoidUnalignedAccess == false
Backport-of: 65e63b6ab4
2025-06-18 11:50:22 +00:00
Stuart Marks
5bcea92eaa 8338140: (str) Add notes to String.trim and String.isEmpty pointing to newer APIs
Reviewed-by: naoto, bpb, liach
Backport-of: 06d804a0f0
2025-06-17 20:45:27 +00:00
Damon Fenacci
cc4e9716ac 8358129: compiler/startup/StartupOutput.java runs into out of memory on Windows after JDK-8347406
Reviewed-by: shade
Backport-of: 534a8605e5
2025-06-17 13:10:06 +00:00
Roland Westrelin
46cfc1e194 8358334: C2/Shenandoah: incorrect execution with Unsafe
Reviewed-by: thartmann
Backport-of: 1fcede053c
2025-06-17 08:06:58 +00:00
Rajan Halade
ae71782e77 8359170: Add 2 TLS and 2 CS Sectigo roots
Reviewed-by: mullan
Backport-of: 9586817cea
2025-06-17 06:10:35 +00:00
Ioi Lam
753700182d 8355556: JVM crash because archived method handle intrinsics are not restored
Reviewed-by: shade
Backport-of: 366650a438
2025-06-17 04:36:41 +00:00
SendaoYan
eb727dcb51 8359272: Several vmTestbase/compact tests timed out on large memory machine
Reviewed-by: ayang
Backport-of: a0fb35c837
2025-06-17 00:43:52 +00:00
Johannes Bechberger
b6cacfcbc8 8359135: New test TestCPUTimeSampleThrottling fails intermittently
Reviewed-by: mdoerr
Backport-of: 3f0fef2c9c
2025-06-16 16:20:54 +00:00
Hamlin Li
d870a48880 8358892: RISC-V: jvm crash when running dacapo sunflow after JDK-8352504
8359045: RISC-V: construct test to verify invocation of C2_MacroAssembler::enc_cmove_cmp_fp => BoolTest::ge/gt

Reviewed-by: fyang
Backport-of: 9d060574e5
2025-06-16 11:18:32 +00:00
Fernando Guallini
2ea2f74f92 8358171: Additional code coverage for PEM API
Reviewed-by: rhalade, ascarpino
Backport-of: b2e7cda6a0
2025-06-16 09:54:18 +00:00
Alan Bateman
077ce2edc7 8358764: (sc) SocketChannel.close when thread blocked in read causes connection to be reset (win)
Reviewed-by: iris, jpai
Backport-of: e5196fc24d
2025-06-16 09:19:56 +00:00
Tobias Hartmann
2a3294571a 8359327: Incorrect AVX3Threshold results into code buffer overflows on APX targets
Reviewed-by: chagedorn
Backport-of: e7f63ba310
2025-06-16 08:48:49 +00:00
SendaoYan
3877746eb9 8359181: Error messages generated by configure --help after 8301197
Reviewed-by: ihse
Backport-of: 7b7136b4ec
2025-06-15 12:25:17 +00:00
Tobias Hartmann
3bd80fe3ba 8357782: JVM JIT Causes Static Initialization Order Issue
Reviewed-by: shade
Backport-of: e8ef93ae9d
2025-06-15 09:05:56 +00:00
Tobias Hartmann
03232d4a5d 8359200: Memory corruption in MStack::push
Reviewed-by: epeter, shade
Backport-of: ed39e17e34
2025-06-15 09:04:55 +00:00
Daniel Fuchs
4111730845 8359364: java/net/URL/EarlyOrDelayedParsing test fails intermittently
Reviewed-by: alanb
Backport-of: 57cabc6d74
2025-06-13 16:54:40 +00:00
Kevin Walls
74ea38e406 8358701: Remove misleading javax.management.remote API doc wording about JMX spec, and historic link to JMXMP
Reviewed-by: alanb
Backport-of: 66535fe26d
2025-06-13 14:28:14 +00:00
Tobias Hartmann
839a91e14b 8357982: Fix several failing BMI tests with -XX:+UseAPX
Reviewed-by: chagedorn
Backport-of: c98dffa186
2025-06-12 11:11:41 +00:00
Daniel Fuchs
aa4f79eaec 8358617: java/net/HttpURLConnection/HttpURLConnectionExpectContinueTest.java fails with 403 due to system proxies
Reviewed-by: jpai
Backport-of: a377773fa7
2025-06-11 16:22:34 +00:00
Rob McKenna
bff98e7d4d 8359059: Bump version numbers for 25.0.1
Reviewed-by: iris
2025-06-09 23:01:41 +00:00
Stuart Marks
c7df72ff0f 8358809: Improve link to stdin.encoding from java.lang.IO
Reviewed-by: naoto
Backport-of: d024f58e61
2025-06-07 00:56:45 +00:00
Rajan Halade
80e066e733 8345414: Google CAInterop test failures
Reviewed-by: weijun
Backport-of: 8e9ba788ae
2025-06-06 21:31:33 +00:00
460 changed files with 11430 additions and 3764 deletions

View File

@@ -310,7 +310,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-x64
msvc-toolset-version: '14.43'
msvc-toolset-version: '14.44'
msvc-toolset-architecture: 'x86.x64'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
@@ -322,7 +322,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-aarch64
msvc-toolset-version: '14.43'
msvc-toolset-version: '14.44'
msvc-toolset-architecture: 'arm64'
make-target: 'hotspot'
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'

View File

@@ -1,7 +1,7 @@
[general]
project=jdk
project=jdk-updates
jbs=JDK
version=25
version=25.0.1
[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright

View File

@@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -366,7 +366,7 @@ EOT
# Print additional help, e.g. a list of toolchains and JVM features.
# This must be done by the autoconf script.
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf )
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
cat <<EOT

View File

@@ -28,15 +28,15 @@
DEFAULT_VERSION_FEATURE=25
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_UPDATE=1
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
DEFAULT_VERSION_DATE=2025-09-16
DEFAULT_VERSION_DATE=2025-10-21
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25"
DEFAULT_JDK_SOURCE_TARGET_VERSION=25
DEFAULT_PROMOTED_VERSION_PRE=ea
DEFAULT_PROMOTED_VERSION_PRE=

View File

@@ -542,10 +542,10 @@ class Bundle {
if (pattern != null) {
// Perform date-time format pattern conversion which is
// applicable to both SimpleDateFormat and j.t.f.DateTimeFormatter.
String transPattern = translateDateFormatLetters(calendarType, pattern, this::convertDateTimePatternLetter);
String transPattern = translateDateFormatLetters(calendarType, key, pattern, this::convertDateTimePatternLetter);
dateTimePatterns.add(i, transPattern);
// Additionally, perform SDF specific date-time format pattern conversion
sdfPatterns.add(i, translateDateFormatLetters(calendarType, transPattern, this::convertSDFLetter));
sdfPatterns.add(i, translateDateFormatLetters(calendarType, key, transPattern, this::convertSDFLetter));
} else {
dateTimePatterns.add(i, null);
sdfPatterns.add(i, null);
@@ -568,7 +568,7 @@ class Bundle {
}
}
private String translateDateFormatLetters(CalendarType calendarType, String cldrFormat, ConvertDateTimeLetters converter) {
private String translateDateFormatLetters(CalendarType calendarType, String patternKey, String cldrFormat, ConvertDateTimeLetters converter) {
String pattern = cldrFormat;
int length = pattern.length();
boolean inQuote = false;
@@ -587,7 +587,7 @@ class Bundle {
if (nextc == '\'') {
i++;
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = 0;
count = 0;
}
@@ -597,7 +597,7 @@ class Bundle {
}
if (!inQuote) {
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = 0;
count = 0;
}
@@ -614,7 +614,7 @@ class Bundle {
}
if (!(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z')) {
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = 0;
count = 0;
}
@@ -627,7 +627,7 @@ class Bundle {
count++;
continue;
}
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = c;
count = 1;
}
@@ -637,7 +637,7 @@ class Bundle {
}
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
}
if (cldrFormat.contentEquals(jrePattern)) {
return cldrFormat;
@@ -661,7 +661,7 @@ class Bundle {
* on the support given by the SimpleDateFormat and the j.t.f.DateTimeFormatter
* for date-time formatting.
*/
private void convertDateTimePatternLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
private void convertDateTimePatternLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
switch (cldrLetter) {
case 'u':
case 'U':
@@ -683,7 +683,7 @@ class Bundle {
* Perform a conversion of CLDR date-time format pattern letter which is
* specific to the SimpleDateFormat.
*/
private void convertSDFLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
private void convertSDFLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
switch (cldrLetter) {
case 'G':
if (calendarType != CalendarType.GREGORIAN) {
@@ -722,6 +722,17 @@ class Bundle {
appendN('z', count, sb);
break;
case 'y':
// If the style is FULL/LONG for a Japanese Calendar, make the
// count == 4 for Gan-nen
if (calendarType == CalendarType.JAPANESE &&
(patternKey.contains("full-") ||
patternKey.contains("long-"))) {
count = 4;
}
appendN(cldrLetter, count, sb);
break;
case 'Z':
if (count == 4 || count == 5) {
sb.append("XXX");
@@ -767,6 +778,7 @@ class Bundle {
.collect(Collectors.toMap(
e -> calendarPrefix + e.getKey(),
e -> translateDateFormatLetters(calendarType,
e.getKey(),
(String)e.getValue(),
this::convertDateTimePatternLetter)
))
@@ -775,7 +787,7 @@ class Bundle {
@FunctionalInterface
private interface ConvertDateTimeLetters {
void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb);
void convert(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb);
}
/**

View File

@@ -456,13 +456,13 @@ SliderDemo.horizontal=Horizontal
SliderDemo.vertical=Vertikal
SliderDemo.plain=Einfach
SliderDemo.a_plain_slider=Ein einfacher Schieberegler
SliderDemo.majorticks=Grobteilungen
SliderDemo.majorticksdescription=Ein Schieberegler mit Grobteilungsmarkierungen
SliderDemo.ticks=Feinteilungen, Teilungen zum Einrasten und Labels
SliderDemo.minorticks=Feinteilungen
SliderDemo.minorticksdescription=Ein Schieberegler mit Grob- und Feinteilungen, mit Teilungen, in die der Schieberegler einrastet, wobei einige Teilungen mit einem sichtbaren Label versehen sind
SliderDemo.majorticks=Hauptteilstriche
SliderDemo.majorticksdescription=Ein Schieberegler mit Hauptteilstrichen
SliderDemo.ticks=Hilfsteilstriche, zum Einrasten und Beschriften
SliderDemo.minorticks=Hilfsteilstriche
SliderDemo.minorticksdescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, in die der Schieberegler einrastet, wobei einige Teilstriche mit einer sichtbaren Beschriftung versehen sind
SliderDemo.disabled=Deaktiviert
SliderDemo.disableddescription=Ein Schieberegler mit Grob- und Feinteilungen, der nicht aktiviert ist (kann nicht bearbeitet werden)
SliderDemo.disableddescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, der nicht aktiviert ist (kann nicht bearbeitet werden)
### SplitPane Demo ###

View File

@@ -292,7 +292,8 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
} else {
assert(is_phantom, "only remaining strength");
assert(!is_narrow, "phantom access cannot be narrow");
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
// AOT saved adapters need relocation for this call.
__ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
}
__ blr(lr);
__ mov(rscratch1, r0);

View File

@@ -8888,13 +8888,8 @@ instruct TailCalljmpInd(IPRegP jump_target, inline_cache_regP method_ptr) %{
match(TailCall jump_target method_ptr);
ins_cost(CALL_COST);
format %{ "MOV Rexception_pc, LR\n\t"
"jump $jump_target \t! $method_ptr holds method" %}
format %{ "jump $jump_target \t! $method_ptr holds method" %}
ins_encode %{
__ mov(Rexception_pc, LR); // this is used only to call
// StubRoutines::forward_exception_entry()
// which expects PC of exception in
// R5. FIXME?
__ jump($jump_target$$Register);
%}
ins_pipe(tail_call);
@@ -8939,8 +8934,10 @@ instruct ForwardExceptionjmp()
match(ForwardException);
ins_cost(CALL_COST);
format %{ "b forward_exception_stub" %}
format %{ "MOV Rexception_pc, LR\n\t"
"b forward_exception_entry" %}
ins_encode %{
__ mov(Rexception_pc, LR);
// OK to trash Rtemp, because Rtemp is used by stub
__ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
%}

View File

@@ -3928,8 +3928,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
Label L_outer_loop, L_inner_loop, L_last;
// Set DSCR pre-fetch to deepest.
load_const_optimized(t0, VM_Version::_dscr_val | 7);
mtdscr(t0);
if (VM_Version::has_mfdscr()) {
load_const_optimized(t0, VM_Version::_dscr_val | 7);
mtdscr(t0);
}
mtvrwz(VCRC, crc); // crc lives in VCRC, now
@@ -4073,8 +4075,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
// ********** Main loop end **********
// Restore DSCR pre-fetch value.
load_const_optimized(t0, VM_Version::_dscr_val);
mtdscr(t0);
if (VM_Version::has_mfdscr()) {
load_const_optimized(t0, VM_Version::_dscr_val);
mtdscr(t0);
}
// ********** Simple loop for remaining 16 byte blocks **********
{

View File

@@ -952,8 +952,10 @@ class StubGenerator: public StubCodeGenerator {
address start_pc = __ pc();
Register tmp1 = R6_ARG4;
// probably copy stub would have changed value reset it.
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
__ mtdscr(tmp1);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
__ mtdscr(tmp1);
}
__ li(R3_RET, 0); // return 0
__ blr();
return start_pc;
@@ -1070,9 +1072,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1092,8 +1095,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_10); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
@@ -1344,8 +1349,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. It's not aligned 16-byte
@@ -1365,8 +1372,11 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_9); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
__ bind(l_6);
@@ -1527,9 +1537,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1549,9 +1560,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_7); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
@@ -1672,9 +1684,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1694,8 +1707,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
__ cmpwi(CR0, R5_ARG3, 0);
__ beq(CR0, l_6);
@@ -1788,9 +1803,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1810,8 +1826,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_5); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
@@ -1910,9 +1928,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1932,8 +1951,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
__ cmpwi(CR0, R5_ARG3, 0);
__ beq(CR0, l_1);

View File

@@ -80,7 +80,9 @@ void VM_Version::initialize() {
"%zu on this machine", PowerArchitecturePPC64);
// Power 8: Configure Data Stream Control Register.
config_dscr();
if (VM_Version::has_mfdscr()) {
config_dscr();
}
if (!UseSIGTRAP) {
MSG(TrapBasedICMissChecks);
@@ -170,7 +172,8 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
"ppc64 sha aes%s%s",
"ppc64 sha aes%s%s%s",
(has_mfdscr() ? " mfdscr" : ""),
(has_darn() ? " darn" : ""),
(has_brw() ? " brw" : "")
// Make sure number of %s matches num_features!
@@ -488,6 +491,7 @@ void VM_Version::determine_features() {
uint32_t *code = (uint32_t *)a->pc();
// Keep R3_ARG1 unmodified, it contains &field (see below).
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
a->mfdscr(R0);
a->darn(R7);
a->brw(R5, R6);
a->blr();
@@ -524,6 +528,7 @@ void VM_Version::determine_features() {
// determine which instructions are legal.
int feature_cntr = 0;
if (code[feature_cntr++]) features |= mfdscr_m;
if (code[feature_cntr++]) features |= darn_m;
if (code[feature_cntr++]) features |= brw_m;

View File

@@ -32,12 +32,14 @@
class VM_Version: public Abstract_VM_Version {
protected:
enum Feature_Flag {
mfdscr,
darn,
brw,
num_features // last entry to count features
};
enum Feature_Flag_Set {
unknown_m = 0,
mfdscr_m = (1 << mfdscr ),
darn_m = (1 << darn ),
brw_m = (1 << brw ),
all_features_m = (unsigned long)-1
@@ -67,8 +69,9 @@ public:
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
// CPU instruction support
static bool has_darn() { return (_features & darn_m) != 0; }
static bool has_brw() { return (_features & brw_m) != 0; }
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; } // Power8, but may be unavailable (QEMU)
static bool has_darn() { return (_features & darn_m) != 0; }
static bool has_brw() { return (_features & brw_m) != 0; }
// Assembler testing
static void allow_all();

View File

@@ -2170,15 +2170,13 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
cmov_cmp_fp_le(op1, op2, dst, src, is_single);
break;
case BoolTest::ge:
assert(false, "Should go to BoolTest::le case");
ShouldNotReachHere();
cmov_cmp_fp_ge(op1, op2, dst, src, is_single);
break;
case BoolTest::lt:
cmov_cmp_fp_lt(op1, op2, dst, src, is_single);
break;
case BoolTest::gt:
assert(false, "Should go to BoolTest::lt case");
ShouldNotReachHere();
cmov_cmp_fp_gt(op1, op2, dst, src, is_single);
break;
default:
assert(false, "unsupported compare condition");

View File

@@ -1268,12 +1268,19 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
}
// ----------- cmove, compare float -----------
//
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
// so, just list behaviour of unordered ones as follow.
//
// Set dst (CMoveI (Binary cop (CmpF/D op1 op2)) (Binary dst src))
// (If one or both inputs to the compare are NaN, then)
// 1. (op1 lt op2) => true => CMove: dst = src
// 2. (op1 le op2) => true => CMove: dst = src
// 3. (op1 gt op2) => false => CMove: dst = dst
// 4. (op1 ge op2) => false => CMove: dst = dst
// 5. (op1 eq op2) => false => CMove: dst = dst
// 6. (op1 ne op2) => true => CMove: dst = src
// Move src to dst only if cmp1 == cmp2,
// otherwise leave dst unchanged, including the case where one of them is NaN.
// Clarification:
// java code : cmp1 != cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1289,7 +1296,7 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 != cmp2, including the case of NaN
// not jump (i.e. move src to dst) if cmp1 == cmp2
// fallthrough (i.e. move src to dst) if cmp1 == cmp2
float_bne(cmp1, cmp2, no_set);
} else {
double_bne(cmp1, cmp2, no_set);
@@ -1298,11 +1305,6 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// Keep dst unchanged only if cmp1 == cmp2,
// otherwise move src to dst, including the case where one of them is NaN.
// Clarification:
// java code : cmp1 == cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1318,7 +1320,7 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 == cmp2
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
// fallthrough (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
float_beq(cmp1, cmp2, no_set);
} else {
double_beq(cmp1, cmp2, no_set);
@@ -1327,14 +1329,6 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
// Clarification
// scenario 1:
// java code : cmp2 < cmp1 ? dst : src
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
// scenario 2:
// java code : cmp1 > cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1350,7 +1344,7 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 > cmp2
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
float_bgt(cmp1, cmp2, no_set);
} else {
double_bgt(cmp1, cmp2, no_set);
@@ -1359,14 +1353,30 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
// Clarification
// scenario 1:
// java code : cmp2 <= cmp1 ? dst : src
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
// scenario 2:
// java code : cmp1 >= cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
fle_s(t0, cmp2, cmp1);
} else {
fle_d(t0, cmp2, cmp1);
}
czero_nez(dst, dst, t0);
czero_eqz(t0 , src, t0);
orr(dst, dst, t0);
return;
}
Label no_set;
if (is_single) {
// jump if cmp1 < cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 >= cmp2
float_blt(cmp1, cmp2, no_set, false, true);
} else {
double_blt(cmp1, cmp2, no_set, false, true);
}
mv(dst, src);
bind(no_set);
}
void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1382,7 +1392,7 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 >= cmp2
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
float_bge(cmp1, cmp2, no_set);
} else {
double_bge(cmp1, cmp2, no_set);
@@ -1391,6 +1401,30 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
flt_s(t0, cmp2, cmp1);
} else {
flt_d(t0, cmp2, cmp1);
}
czero_nez(dst, dst, t0);
czero_eqz(t0 , src, t0);
orr(dst, dst, t0);
return;
}
Label no_set;
if (is_single) {
// jump if cmp1 <= cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 > cmp2
float_ble(cmp1, cmp2, no_set, false, true);
} else {
double_ble(cmp1, cmp2, no_set, false, true);
}
mv(dst, src);
bind(no_set);
}
// Float compare branch instructions
#define INSN(NAME, FLOATCMP, BRANCH) \
@@ -5310,42 +5344,6 @@ void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, R
add(final_dest_hi, dest_hi, carry);
}
/**
* Multiply 32 bit by 32 bit first loop.
*/
void MacroAssembler::multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
Register y, Register y_idx, Register z,
Register carry, Register product,
Register idx, Register kdx) {
// jlong carry, x[], y[], z[];
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
// long product = y[idx] * x[xstart] + carry;
// z[kdx] = (int)product;
// carry = product >>> 32;
// }
// z[xstart] = (int)carry;
Label L_first_loop, L_first_loop_exit;
blez(idx, L_first_loop_exit);
shadd(t0, xstart, x, t0, LogBytesPerInt);
lwu(x_xstart, Address(t0, 0));
bind(L_first_loop);
subiw(idx, idx, 1);
shadd(t0, idx, y, t0, LogBytesPerInt);
lwu(y_idx, Address(t0, 0));
mul(product, x_xstart, y_idx);
add(product, product, carry);
srli(carry, product, 32);
subiw(kdx, kdx, 1);
shadd(t0, kdx, z, t0, LogBytesPerInt);
sw(product, Address(t0, 0));
bgtz(idx, L_first_loop);
bind(L_first_loop_exit);
}
/**
* Multiply 64 bit by 64 bit first loop.
*/
@@ -5562,77 +5560,16 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
const Register carry = tmp5;
const Register product = xlen;
const Register x_xstart = tmp0;
const Register jdx = tmp1;
mv(idx, ylen); // idx = ylen;
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
mv(carry, zr); // carry = 0;
Label L_multiply_64_x_64_loop, L_done;
Label L_done;
subiw(xstart, xlen, 1);
bltz(xstart, L_done);
const Register jdx = tmp1;
if (AvoidUnalignedAccesses) {
int base_offset = arrayOopDesc::base_offset_in_bytes(T_INT);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
if ((base_offset % 8) == 0) {
// multiply_64_x_64_loop emits 8-byte load/store to access two elements
// at a time from int arrays x and y. When base_offset is 8 bytes, these
// accesses are naturally aligned if both xlen and ylen are even numbers.
orr(t0, xlen, ylen);
test_bit(t0, t0, 0);
beqz(t0, L_multiply_64_x_64_loop);
}
Label L_second_loop_unaligned, L_third_loop, L_third_loop_exit;
multiply_32_x_32_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
shadd(t0, xstart, z, t0, LogBytesPerInt);
sw(carry, Address(t0, 0));
bind(L_second_loop_unaligned);
mv(carry, zr);
mv(jdx, ylen);
subiw(xstart, xstart, 1);
bltz(xstart, L_done);
subi(sp, sp, 2 * wordSize);
sd(z, Address(sp, 0));
sd(zr, Address(sp, wordSize));
shadd(t0, xstart, z, t0, LogBytesPerInt);
addi(z, t0, 4);
shadd(t0, xstart, x, t0, LogBytesPerInt);
lwu(product, Address(t0, 0));
blez(jdx, L_third_loop_exit);
bind(L_third_loop);
subiw(jdx, jdx, 1);
shadd(t0, jdx, y, t0, LogBytesPerInt);
lwu(t0, Address(t0, 0));
mul(t1, t0, product);
add(t0, t1, carry);
shadd(tmp6, jdx, z, t1, LogBytesPerInt);
lwu(t1, Address(tmp6, 0));
add(t0, t0, t1);
sw(t0, Address(tmp6, 0));
srli(carry, t0, 32);
bgtz(jdx, L_third_loop);
bind(L_third_loop_exit);
ld(z, Address(sp, 0));
addi(sp, sp, 2 * wordSize);
shadd(t0, xstart, z, t0, LogBytesPerInt);
sw(carry, Address(t0, 0));
j(L_second_loop_unaligned);
}
bind(L_multiply_64_x_64_loop);
multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
Label L_second_loop_aligned;

View File

@@ -660,7 +660,9 @@ class MacroAssembler: public Assembler {
void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
public:
// We try to follow risc-v asm menomics.
@@ -1382,10 +1384,6 @@ public:
void adc(Register dst, Register src1, Register src2, Register carry);
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
Register src1, Register src2, Register carry);
void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
Register y, Register y_idx, Register z,
Register carry, Register product,
Register idx, Register kdx);
void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
Register y, Register y_idx, Register z,
Register carry, Register product,

View File

@@ -8431,6 +8431,17 @@ instruct castVV(vReg dst)
ins_pipe(pipe_class_empty);
%}
instruct castVVMask(vRegMask dst)
%{
match(Set dst (CastVV dst));
size(0);
format %{ "# castVV of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
// ============================================================================
// Convert Instructions

View File

@@ -203,15 +203,15 @@ void VM_Version::common_initialize() {
}
}
// Misc Intrinsics could depend on RVV
// Misc Intrinsics that could depend on RVV.
if (UseZba || UseRVV) {
if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
} else {
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
warning("CRC32 intrinsic requires Zba or RVV instructions (not available on this CPU)");
warning("CRC32 intrinsic are not available on this CPU.");
}
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
}
@@ -325,20 +325,40 @@ void VM_Version::c2_initialize() {
FLAG_SET_DEFAULT(UseMulAddIntrinsic, true);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
}
} else if (UseMultiplyToLenIntrinsic) {
warning("Intrinsics for BigInteger.multiplyToLen() not available on this CPU.");
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
}
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
}
} else if (UseSquareToLenIntrinsic) {
warning("Intrinsics for BigInteger.squareToLen() not available on this CPU.");
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
}
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
}
} else if (UseMontgomeryMultiplyIntrinsic) {
warning("Intrinsics for BigInteger.montgomeryMultiply() not available on this CPU.");
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
}
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
}
} else if (UseMontgomerySquareIntrinsic) {
warning("Intrinsics for BigInteger.montgomerySquare() not available on this CPU.");
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
}
// Adler32

View File

@@ -15681,6 +15681,8 @@ void Assembler::pusha_uncached() { // 64bit
// Push pair of original stack pointer along with remaining registers
// at 16B aligned boundary.
push2p(rax, r31);
// Restore the original contents of RAX register.
movq(rax, Address(rax));
push2p(r30, r29);
push2p(r28, r27);
push2p(r26, r25);

View File

@@ -4655,6 +4655,7 @@ static void convertF2I_slowpath(C2_MacroAssembler& masm, C2GeneralStub<Register,
__ subptr(rsp, 8);
__ movdbl(Address(rsp), src);
__ call(RuntimeAddress(target));
// APX REX2 encoding for pop(dst) increases the stub size by 1 byte.
__ pop(dst);
__ jmp(stub.continuation());
#undef __
@@ -4687,7 +4688,9 @@ void C2_MacroAssembler::convertF2I(BasicType dst_bt, BasicType src_bt, Register
}
}
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, 23, convertF2I_slowpath);
// Using the APX extended general purpose registers increases the instruction encoding size by 1 byte.
int max_size = 23 + (UseAPX ? 1 : 0);
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, max_size, convertF2I_slowpath);
jcc(Assembler::equal, stub->entry());
bind(stub->continuation());
}

View File

@@ -353,7 +353,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
// The rest is saved with the optimized path
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4;
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
__ subptr(rsp, num_saved_regs * wordSize);
uint slot = num_saved_regs;
if (dst != rax) {
@@ -367,6 +367,25 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ movptr(Address(rsp, (--slot) * wordSize), r9);
__ movptr(Address(rsp, (--slot) * wordSize), r10);
__ movptr(Address(rsp, (--slot) * wordSize), r11);
// Save APX extended registers r16r31 if enabled
if (UseAPX) {
__ movptr(Address(rsp, (--slot) * wordSize), r16);
__ movptr(Address(rsp, (--slot) * wordSize), r17);
__ movptr(Address(rsp, (--slot) * wordSize), r18);
__ movptr(Address(rsp, (--slot) * wordSize), r19);
__ movptr(Address(rsp, (--slot) * wordSize), r20);
__ movptr(Address(rsp, (--slot) * wordSize), r21);
__ movptr(Address(rsp, (--slot) * wordSize), r22);
__ movptr(Address(rsp, (--slot) * wordSize), r23);
__ movptr(Address(rsp, (--slot) * wordSize), r24);
__ movptr(Address(rsp, (--slot) * wordSize), r25);
__ movptr(Address(rsp, (--slot) * wordSize), r26);
__ movptr(Address(rsp, (--slot) * wordSize), r27);
__ movptr(Address(rsp, (--slot) * wordSize), r28);
__ movptr(Address(rsp, (--slot) * wordSize), r29);
__ movptr(Address(rsp, (--slot) * wordSize), r30);
__ movptr(Address(rsp, (--slot) * wordSize), r31);
}
// r12-r15 are callee saved in all calling conventions
assert(slot == 0, "must use all slots");
@@ -398,6 +417,25 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
}
// Restore APX extended registers r31r16 if previously saved
if (UseAPX) {
__ movptr(r31, Address(rsp, (slot++) * wordSize));
__ movptr(r30, Address(rsp, (slot++) * wordSize));
__ movptr(r29, Address(rsp, (slot++) * wordSize));
__ movptr(r28, Address(rsp, (slot++) * wordSize));
__ movptr(r27, Address(rsp, (slot++) * wordSize));
__ movptr(r26, Address(rsp, (slot++) * wordSize));
__ movptr(r25, Address(rsp, (slot++) * wordSize));
__ movptr(r24, Address(rsp, (slot++) * wordSize));
__ movptr(r23, Address(rsp, (slot++) * wordSize));
__ movptr(r22, Address(rsp, (slot++) * wordSize));
__ movptr(r21, Address(rsp, (slot++) * wordSize));
__ movptr(r20, Address(rsp, (slot++) * wordSize));
__ movptr(r19, Address(rsp, (slot++) * wordSize));
__ movptr(r18, Address(rsp, (slot++) * wordSize));
__ movptr(r17, Address(rsp, (slot++) * wordSize));
__ movptr(r16, Address(rsp, (slot++) * wordSize));
}
__ movptr(r11, Address(rsp, (slot++) * wordSize));
__ movptr(r10, Address(rsp, (slot++) * wordSize));
__ movptr(r9, Address(rsp, (slot++) * wordSize));

View File

@@ -30,7 +30,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
do_arch_blob(initial, PRODUCT_ONLY(20000) NOT_PRODUCT(21000) WINDOWS_ONLY(+1000)) \
do_stub(initial, verify_mxcsr) \
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
verify_mxcsr_entry) \
@@ -239,7 +239,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(final, 31000 \
do_arch_blob(final, 33000 \
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
#endif // CPU_X86_STUBDECLARATIONS_HPP

View File

@@ -46,6 +46,12 @@
//
/******************************************************************************/
/* Represents 0x7FFFFFFFFFFFFFFF double precision in lower 64 bits*/
ATTRIBUTE_ALIGNED(16) static const juint _ABS_MASK[] =
{
4294967295, 2147483647, 0, 0
};
ATTRIBUTE_ALIGNED(4) static const juint _SIG_MASK[] =
{
0, 1032192
@@ -188,10 +194,10 @@ address StubGenerator::generate_libmCbrt() {
StubCodeMark mark(this, stub_id);
address start = __ pc();
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1;
Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1;
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1;
Label B1_1, B1_2, B1_4;
address ABS_MASK = (address)_ABS_MASK;
address SIG_MASK = (address)_SIG_MASK;
address EXP_MASK = (address)_EXP_MASK;
address EXP_MSK2 = (address)_EXP_MSK2;
@@ -208,8 +214,12 @@ address StubGenerator::generate_libmCbrt() {
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ bind(B1_1);
__ subq(rsp, 24);
__ movsd(Address(rsp), xmm0);
__ ucomisd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if x is +/- zero or NaN
__ movq(xmm1, xmm0);
__ andpd(xmm1, ExternalAddress(ABS_MASK), r11 /*rscratch*/);
__ ucomisd(xmm1, ExternalAddress(INF), r11 /*rscratch*/);
__ jcc(Assembler::equal, B1_4); // Branch only if x is +/- INF
__ bind(B1_2);
__ movq(xmm7, xmm0);
@@ -228,8 +238,6 @@ address StubGenerator::generate_libmCbrt() {
__ andl(rdx, rax);
__ cmpl(rdx, 0);
__ jcc(Assembler::equal, L_2TAG_PACKET_0_0_1); // Branch only if |x| is denormalized
__ cmpl(rdx, 524032);
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if |x| is INF or NaN
__ shrl(rdx, 8);
__ shrq(r9, 8);
__ andpd(xmm2, xmm0);
@@ -297,8 +305,6 @@ address StubGenerator::generate_libmCbrt() {
__ andl(rdx, rax);
__ shrl(rdx, 8);
__ shrq(r9, 8);
__ cmpl(rdx, 0);
__ jcc(Assembler::equal, L_2TAG_PACKET_3_0_1); // Branch only if |x| is zero
__ andpd(xmm2, xmm0);
__ andpd(xmm0, xmm5);
__ orpd(xmm3, xmm2);
@@ -322,41 +328,10 @@ address StubGenerator::generate_libmCbrt() {
__ psllq(xmm7, 52);
__ jmp(L_2TAG_PACKET_2_0_1);
__ bind(L_2TAG_PACKET_3_0_1);
__ cmpq(r9, 0);
__ jcc(Assembler::notEqual, L_2TAG_PACKET_4_0_1); // Branch only if x is negative zero
__ xorpd(xmm0, xmm0);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_4_0_1);
__ movsd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_1_0_1);
__ movl(rax, Address(rsp, 4));
__ movl(rdx, Address(rsp));
__ movl(rcx, rax);
__ andl(rcx, 2147483647);
__ cmpl(rcx, 2146435072);
__ jcc(Assembler::above, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
__ cmpl(rdx, 0);
__ jcc(Assembler::notEqual, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
__ cmpl(rax, 2146435072);
__ jcc(Assembler::notEqual, L_2TAG_PACKET_6_0_1); // Branch only if x is negative INF
__ movsd(xmm0, ExternalAddress(INF), r11 /*rscratch*/);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_6_0_1);
__ movsd(xmm0, ExternalAddress(NEG_INF), r11 /*rscratch*/);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_5_0_1);
__ movsd(xmm0, Address(rsp));
__ addsd(xmm0, xmm0);
__ movq(Address(rsp, 8), xmm0);
__ bind(B1_4);
__ addq(rsp, 24);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);

View File

@@ -440,7 +440,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits apx_f
__ jcc(Assembler::equal, vector_save_restore);
#ifndef PRODUCT
bool save_apx = UseAPX;
VM_Version::set_apx_cpuFeatures();
UseAPX = true;
@@ -457,7 +456,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 8), r31);
UseAPX = save_apx;
#endif
__ bind(vector_save_restore);
//
// Check if OS has enabled XGETBV instruction to access XCR0
@@ -1022,8 +1020,6 @@ void VM_Version::get_processor_features() {
if (UseAPX && !apx_supported) {
warning("UseAPX is not supported on this CPU, setting it to false");
FLAG_SET_DEFAULT(UseAPX, false);
} else if (FLAG_IS_DEFAULT(UseAPX)) {
FLAG_SET_DEFAULT(UseAPX, apx_supported ? true : false);
}
if (!UseAPX) {
@@ -2111,7 +2107,7 @@ bool VM_Version::is_intel_cascade_lake() {
// has improved implementation of 64-byte load/stores and so the default
// threshold is set to 0 for these platforms.
int VM_Version::avx3_threshold() {
return (is_intel_family_core() &&
return (is_intel_server_family() &&
supports_serialize() &&
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
}
@@ -3151,17 +3147,11 @@ bool VM_Version::os_supports_apx_egprs() {
if (!supports_apx_f()) {
return false;
}
// Enable APX support for product builds after
// completion of planned features listed in JDK-8329030.
#if !defined(PRODUCT)
if (_cpuid_info.apx_save[0] != egpr_test_value() ||
_cpuid_info.apx_save[1] != egpr_test_value()) {
return false;
}
return true;
#else
return false;
#endif
}
uint VM_Version::cores_per_cpu() {

View File

@@ -10527,7 +10527,8 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
// Xor Register with Immediate
instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
%{
predicate(!UseAPX);
// Strict predicate check to make selection of xorI_rReg_im1 cost agnostic if immI src is -1.
predicate(!UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
match(Set dst (XorI dst src));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@@ -10541,7 +10542,8 @@ instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
%{
predicate(UseAPX);
// Strict predicate check to make selection of xorI_rReg_im1_ndd cost agnostic if immI src2 is -1.
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
match(Set dst (XorI src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@@ -10559,6 +10561,7 @@ instruct xorI_rReg_mem_imm_ndd(rRegI dst, memory src1, immI src2, rFlagsReg cr)
predicate(UseAPX);
match(Set dst (XorI (LoadI src1) src2));
effect(KILL cr);
ins_cost(150);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
@@ -11201,7 +11204,8 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
// Xor Register with Immediate
instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
%{
predicate(!UseAPX);
// Strict predicate check to make selection of xorL_rReg_im1 cost agnostic if immL32 src is -1.
predicate(!UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
match(Set dst (XorL dst src));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@@ -11215,7 +11219,8 @@ instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
%{
predicate(UseAPX);
// Strict predicate check to make selection of xorL_rReg_im1_ndd cost agnostic if immL32 src2 is -1.
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
match(Set dst (XorL src1 src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@@ -11234,6 +11239,7 @@ instruct xorL_rReg_mem_imm(rRegL dst, memory src1, immL32 src2, rFlagsReg cr)
match(Set dst (XorL (LoadL src1) src2));
effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
ins_cost(150);
format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{

View File

@@ -2623,7 +2623,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
}
#if !defined(PRODUCT)
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
// Verify that OS save/restore APX registers.
@@ -2631,7 +2630,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr_apx());
}
#endif
#endif
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (VMError::was_assert_poison_crash(exception_record)) {

View File

@@ -429,13 +429,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub = VM_Version::cpuinfo_cont_addr();
}
#if !defined(PRODUCT) && defined(_LP64)
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
// Verify that OS save/restore APX registers.
stub = VM_Version::cpuinfo_cont_addr_apx();
VM_Version::clear_apx_test_state();
}
#endif
// We test if stub is already set (by the stack overflow code
// above) so it is not overwritten by the code that follows. This

View File

@@ -255,13 +255,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub = VM_Version::cpuinfo_cont_addr();
}
#if !defined(PRODUCT) && defined(_LP64)
if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
// Verify that OS save/restore APX registers.
stub = VM_Version::cpuinfo_cont_addr_apx();
VM_Version::clear_apx_test_state();
}
#endif
if (thread->thread_state() == _thread_in_Java) {
// Java thread running in Java code => find exception handler if any

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -187,7 +187,13 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_Convert (Convert* x) { /* nothing to do */ }
void do_NullCheck (NullCheck* x) { /* nothing to do */ }
void do_TypeCast (TypeCast* x) { /* nothing to do */ }
void do_NewInstance (NewInstance* x) { /* nothing to do */ }
void do_NewInstance (NewInstance* x) {
ciInstanceKlass* c = x->klass();
if (c != nullptr && !c->is_initialized() &&
(!c->is_loaded() || c->has_class_initializer())) {
kill_memory();
}
}
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }
void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }

View File

@@ -147,7 +147,7 @@
product(bool, AOTVerifyTrainingData, trueInDebug, DIAGNOSTIC, \
"Verify archived training data") \
\
product(bool, AOTCompileEagerly, false, DIAGNOSTIC, \
product(bool, AOTCompileEagerly, false, EXPERIMENTAL, \
"Compile methods as soon as possible") \
\
/* AOT Code flags */ \

View File

@@ -837,11 +837,10 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
struct stat st;
if (os::stat(AOTCache, &st) != 0) {
tty->print_cr("AOTCache creation failed: %s", AOTCache);
vm_exit(0);
} else {
tty->print_cr("AOTCache creation is complete: %s " INT64_FORMAT " bytes", AOTCache, (int64_t)(st.st_size));
vm_exit(0);
}
vm_direct_exit(0);
}
}
}

View File

@@ -549,6 +549,11 @@ bool ciInstanceKlass::compute_has_trusted_loader() {
return java_lang_ClassLoader::is_trusted_loader(loader_oop);
}
bool ciInstanceKlass::has_class_initializer() {
VM_ENTRY_MARK;
return get_instanceKlass()->class_initializer() != nullptr;
}
// ------------------------------------------------------------------
// ciInstanceKlass::find_method
//

View File

@@ -231,6 +231,8 @@ public:
ciInstanceKlass* unique_concrete_subklass();
bool has_finalizable_subclass();
bool has_class_initializer();
bool contains_field_offset(int offset);
// Get the instance of java.lang.Class corresponding to

View File

@@ -3738,6 +3738,7 @@ void ClassFileParser::apply_parsed_class_metadata(
_cp->set_pool_holder(this_klass);
this_klass->set_constants(_cp);
this_klass->set_fieldinfo_stream(_fieldinfo_stream);
this_klass->set_fieldinfo_search_table(_fieldinfo_search_table);
this_klass->set_fields_status(_fields_status);
this_klass->set_methods(_methods);
this_klass->set_inner_classes(_inner_classes);
@@ -3747,6 +3748,8 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_permitted_subclasses(_permitted_subclasses);
this_klass->set_record_components(_record_components);
DEBUG_ONLY(FieldInfoStream::validate_search_table(_cp, _fieldinfo_stream, _fieldinfo_search_table));
// Delay the setting of _local_interfaces and _transitive_interfaces until after
// initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could
// be shared with _transitive_interfaces and _transitive_interfaces may be shared with
@@ -5054,6 +5057,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
// note that is not safe to use the fields in the parser from this point on
assert(nullptr == _cp, "invariant");
assert(nullptr == _fieldinfo_stream, "invariant");
assert(nullptr == _fieldinfo_search_table, "invariant");
assert(nullptr == _fields_status, "invariant");
assert(nullptr == _methods, "invariant");
assert(nullptr == _inner_classes, "invariant");
@@ -5274,6 +5278,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_super_klass(),
_cp(nullptr),
_fieldinfo_stream(nullptr),
_fieldinfo_search_table(nullptr),
_fields_status(nullptr),
_methods(nullptr),
_inner_classes(nullptr),
@@ -5350,6 +5355,7 @@ void ClassFileParser::clear_class_metadata() {
// deallocated if classfile parsing returns an error.
_cp = nullptr;
_fieldinfo_stream = nullptr;
_fieldinfo_search_table = nullptr;
_fields_status = nullptr;
_methods = nullptr;
_inner_classes = nullptr;
@@ -5372,6 +5378,7 @@ ClassFileParser::~ClassFileParser() {
if (_fieldinfo_stream != nullptr) {
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_stream);
}
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_search_table);
if (_fields_status != nullptr) {
MetadataFactory::free_array<FieldStatus>(_loader_data, _fields_status);
@@ -5772,6 +5779,7 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
_fieldinfo_stream =
FieldInfoStream::create_FieldInfoStream(_temp_field_info, _java_fields_count,
injected_fields_count, loader_data(), CHECK);
_fieldinfo_search_table = FieldInfoStream::create_search_table(_cp, _fieldinfo_stream, _loader_data, CHECK);
_fields_status =
MetadataFactory::new_array<FieldStatus>(_loader_data, _temp_field_info->length(),
FieldStatus(0), CHECK);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -123,6 +123,7 @@ class ClassFileParser {
const InstanceKlass* _super_klass;
ConstantPool* _cp;
Array<u1>* _fieldinfo_stream;
Array<u1>* _fieldinfo_search_table;
Array<FieldStatus>* _fields_status;
Array<Method*>* _methods;
Array<u2>* _inner_classes;

View File

@@ -301,7 +301,7 @@ void FieldLayout::reconstruct_layout(const InstanceKlass* ik, bool& has_instance
BasicType last_type;
int last_offset = -1;
while (ik != nullptr) {
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
BasicType type = Signature::basic_type(fs.signature());
// distinction between static and non-static fields is missing
if (fs.access_flags().is_static()) continue;
@@ -461,7 +461,7 @@ void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlas
bool found = false;
const InstanceKlass* ik = super;
while (!found && ik != nullptr) {
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
if (fs.offset() == b->offset()) {
output->print_cr(" @%d \"%s\" %s %d/%d %s",
b->offset(),

View File

@@ -967,6 +967,13 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
Array<u1>* new_fis = FieldInfoStream::create_FieldInfoStream(fields, java_fields, injected_fields, k->class_loader_data(), CHECK);
ik->set_fieldinfo_stream(new_fis);
MetadataFactory::free_array<u1>(k->class_loader_data(), old_stream);
Array<u1>* old_table = ik->fieldinfo_search_table();
Array<u1>* search_table = FieldInfoStream::create_search_table(ik->constants(), new_fis, k->class_loader_data(), CHECK);
ik->set_fieldinfo_search_table(search_table);
MetadataFactory::free_array<u1>(k->class_loader_data(), old_table);
DEBUG_ONLY(FieldInfoStream::validate_search_table(ik->constants(), new_fis, search_table));
}
}

View File

@@ -132,8 +132,16 @@ bool StackMapTable::match_stackmap(
}
void StackMapTable::check_jump_target(
StackMapFrame* frame, int32_t target, TRAPS) const {
StackMapFrame* frame, int bci, int offset, TRAPS) const {
ErrorContext ctx;
// Jump targets must be within the method and the method size is limited. See JVMS 4.11
int min_offset = -1 * max_method_code_size;
if (offset < min_offset || offset > max_method_code_size) {
frame->verifier()->verify_error(ErrorContext::bad_stackmap(bci, frame),
"Illegal target of jump or branch (bci %d + offset %d)", bci, offset);
return;
}
int target = bci + offset;
bool match = match_stackmap(
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
if (!match || (target < 0 || target >= _code_length)) {

View File

@@ -67,7 +67,7 @@ class StackMapTable : public StackObj {
// Check jump instructions. Make sure there are no uninitialized
// instances on backward branch.
void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
void check_jump_target(StackMapFrame* frame, int bci, int offset, TRAPS) const;
// The following methods are only used inside this class.

View File

@@ -32,6 +32,7 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/vmClasses.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
@@ -115,6 +116,7 @@ OopStorage* StringTable::_oop_storage;
static size_t _current_size = 0;
static volatile size_t _items_count = 0;
DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
volatile bool _alt_hash = false;
@@ -346,6 +348,10 @@ bool StringTable::has_work() {
return Atomic::load_acquire(&_has_work);
}
size_t StringTable::items_count_acquire() {
return Atomic::load_acquire(&_items_count);
}
void StringTable::trigger_concurrent_work() {
// Avoid churn on ServiceThread
if (!has_work()) {
@@ -504,6 +510,9 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
}
oop StringTable::intern(const StringWrapper& name, TRAPS) {
assert(!Atomic::load_acquire(&_disable_interning_during_cds_dump),
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
// shared table always uses java_lang_String::hash_code
unsigned int hash = hash_wrapped_string(name);
oop found_string = lookup_shared(name, hash);
@@ -793,7 +802,7 @@ void StringTable::verify() {
}
// Verification and comp
class VerifyCompStrings : StackObj {
class StringTable::VerifyCompStrings : StackObj {
static unsigned string_hash(oop const& str) {
return java_lang_String::hash_code_noupdate(str);
}
@@ -805,7 +814,7 @@ class VerifyCompStrings : StackObj {
string_hash, string_equals> _table;
public:
size_t _errors;
VerifyCompStrings() : _table(unsigned(_items_count / 8) + 1, 0 /* do not resize */), _errors(0) {}
VerifyCompStrings() : _table(unsigned(items_count_acquire() / 8) + 1, 0 /* do not resize */), _errors(0) {}
bool operator()(WeakHandle* val) {
oop s = val->resolve();
if (s == nullptr) {
@@ -939,20 +948,31 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
}
// This is called BEFORE we enter the CDS safepoint. We can allocate heap objects.
// This should be called when we know no more strings will be added (which will be easy
// to guarantee because CDS runs with a single Java thread. See JDK-8253495.)
// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
// be used by the shared strings table.
void StringTable::allocate_shared_strings_array(TRAPS) {
if (!CDSConfig::is_dumping_heap()) {
return;
}
assert(CDSConfig::allow_only_single_java_thread(), "No more interned strings can be added");
if (_items_count > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", _items_count);
CompileBroker::wait_for_no_active_tasks();
precond(CDSConfig::allow_only_single_java_thread());
// At this point, no more strings will be added:
// - There's only a single Java thread (this thread). It no longer executes Java bytecodes
// so JIT compilation will eventually stop.
// - CompileBroker has no more active tasks, so all JIT requests have been processed.
// This flag will be cleared after intern table dumping has completed, so we can run the
// compiler again (for future AOT method compilation, etc).
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, true));
if (items_count_acquire() > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", items_count_acquire());
}
int total = (int)_items_count;
int total = (int)items_count_acquire();
size_t single_array_size = objArrayOopDesc::object_size(total);
log_info(aot)("allocated string table for %d strings", total);
@@ -972,7 +992,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
// This can only happen if you have an extremely large number of classes that
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
// but bail out for safety.
log_error(aot)("Too many strings to be archived: %zu", _items_count);
log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
MetaspaceShared::unrecoverable_writing_error();
}
@@ -1070,7 +1090,7 @@ oop StringTable::init_shared_strings_array() {
void StringTable::write_shared_table() {
_shared_table.reset();
CompactHashtableWriter writer((int)_items_count, ArchiveBuilder::string_stats());
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
int index = 0;
auto copy_into_shared_table = [&] (WeakHandle* val) {
@@ -1084,6 +1104,8 @@ void StringTable::write_shared_table() {
};
_local_table->do_safepoint_scan(copy_into_shared_table);
writer.dump(&_shared_table, "string");
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, false));
}
void StringTable::set_shared_strings_array_index(int root_index) {

View File

@@ -40,7 +40,7 @@ class StringTableConfig;
class StringTable : AllStatic {
friend class StringTableConfig;
class VerifyCompStrings;
static volatile bool _has_work;
// Set if one bucket is out of balance due to hash algorithm deficiency
@@ -74,6 +74,7 @@ private:
static void item_added();
static void item_removed();
static size_t items_count_acquire();
static oop intern(const StringWrapper& name, TRAPS);
static oop do_intern(const StringWrapper& name, uintx hash, TRAPS);

View File

@@ -781,7 +781,6 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
// Merge with the next instruction
{
int target;
VerificationType type, type2;
VerificationType atype;
@@ -1606,9 +1605,8 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
case Bytecodes::_ifle:
current_frame.pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
target = bcs.dest();
stackmap_table.check_jump_target(
&current_frame, target, CHECK_VERIFY(this));
&current_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_if_acmpeq :
case Bytecodes::_if_acmpne :
@@ -1619,19 +1617,16 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
case Bytecodes::_ifnonnull :
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
target = bcs.dest();
stackmap_table.check_jump_target
(&current_frame, target, CHECK_VERIFY(this));
(&current_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_goto :
target = bcs.dest();
stackmap_table.check_jump_target(
&current_frame, target, CHECK_VERIFY(this));
&current_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
no_control_flow = true; break;
case Bytecodes::_goto_w :
target = bcs.dest_w();
stackmap_table.check_jump_target(
&current_frame, target, CHECK_VERIFY(this));
&current_frame, bcs.bci(), bcs.get_offset_s4(), CHECK_VERIFY(this));
no_control_flow = true; break;
case Bytecodes::_tableswitch :
case Bytecodes::_lookupswitch :
@@ -2280,15 +2275,14 @@ void ClassVerifier::verify_switch(
}
}
}
int target = bci + default_offset;
stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
stackmap_table->check_jump_target(current_frame, bci, default_offset, CHECK_VERIFY(this));
for (int i = 0; i < keys; i++) {
// Because check_jump_target() may safepoint, the bytecode could have
// moved, which means 'aligned_bcp' is no good and needs to be recalculated.
aligned_bcp = align_up(bcs->bcp() + 1, jintSize);
target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
stackmap_table->check_jump_target(
current_frame, target, CHECK_VERIFY(this));
current_frame, bci, offset, CHECK_VERIFY(this));
}
NOT_PRODUCT(aligned_bcp = nullptr); // no longer valid at this point
}
@@ -2549,7 +2543,12 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
case Bytecodes::_goto:
case Bytecodes::_goto_w: {
int target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
int offset = (opcode == Bytecodes::_goto ? bcs.get_offset_s2() : bcs.get_offset_s4());
int min_offset = -1 * max_method_code_size;
// Check offset for overflow
if (offset < min_offset || offset > max_method_code_size) return false;
int target = bci + offset;
if (visited_branches->contains(bci)) {
if (bci_stack->is_empty()) {
if (handler_stack->is_empty()) {
@@ -2607,7 +2606,10 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
// Push the switch alternatives onto the stack.
for (int i = 0; i < keys; i++) {
int target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
int min_offset = -1 * max_method_code_size;
int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
if (offset < min_offset || offset > max_method_code_size) return false;
int target = bci + offset;
if (target > code_length) return false;
bci_stack->push(target);
}

View File

@@ -344,6 +344,7 @@ AOTCodeCache::~AOTCodeCache() {
_store_buffer = nullptr;
}
if (_table != nullptr) {
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
delete _table;
_table = nullptr;
}
@@ -774,6 +775,9 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind
// we need to take a lock to prevent race between compiler threads generating AOT code
// and the main thread generating adapter
MutexLocker ml(Compile_lock);
if (!is_on()) {
return false; // AOT code cache was already dumped and closed.
}
if (!cache->align_write()) {
return false;
}
@@ -1434,6 +1438,9 @@ AOTCodeAddressTable::~AOTCodeAddressTable() {
if (_extrs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _extrs_addr);
}
if (_stubs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _stubs_addr);
}
if (_shared_blobs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
}
@@ -1485,6 +1492,7 @@ void AOTCodeCache::load_strings() {
int AOTCodeCache::store_strings() {
if (_C_strings_used > 0) {
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
uint offset = _write_position;
uint length = 0;
uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
@@ -1510,15 +1518,17 @@ int AOTCodeCache::store_strings() {
const char* AOTCodeCache::add_C_string(const char* str) {
if (is_on_for_dump() && str != nullptr) {
return _cache->_table->add_C_string(str);
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
AOTCodeAddressTable* table = addr_table();
if (table != nullptr) {
return table->add_C_string(str);
}
}
return str;
}
const char* AOTCodeAddressTable::add_C_string(const char* str) {
if (_extrs_complete) {
LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
// Check previous strings address
for (int i = 0; i < _C_strings_count; i++) {
if (_C_strings_in[i] == str) {
@@ -1535,9 +1545,7 @@ const char* AOTCodeAddressTable::add_C_string(const char* str) {
_C_strings_in[_C_strings_count] = str;
const char* dup = os::strdup(str);
_C_strings[_C_strings_count++] = dup;
if (log.is_enabled()) {
log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
}
log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
return dup;
} else {
assert(false, "Number of C strings >= MAX_STR_COUNT");

View File

@@ -136,6 +136,7 @@ private:
public:
AOTCodeAddressTable() :
_extrs_addr(nullptr),
_stubs_addr(nullptr),
_shared_blobs_addr(nullptr),
_C1_blobs_addr(nullptr),
_extrs_length(0),

View File

@@ -160,7 +160,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size
}
} else {
// We need unique and valid not null address
assert(_mutable_data = blob_end(), "sanity");
assert(_mutable_data == blob_end(), "sanity");
}
set_oop_maps(oop_maps);
@@ -177,6 +177,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
_code_offset(_content_offset),
_data_offset(size),
_frame_size(0),
_mutable_data_size(0),
S390_ONLY(_ctable_offset(0) COMMA)
_header_size(header_size),
_frame_complete_offset(CodeOffsets::frame_never_safe),
@@ -185,7 +186,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
{
assert(is_aligned(size, oopSize), "unaligned size");
assert(is_aligned(header_size, oopSize), "unaligned size");
assert(_mutable_data = blob_end(), "sanity");
assert(_mutable_data == blob_end(), "sanity");
}
void CodeBlob::restore_mutable_data(address reloc_data) {
@@ -195,8 +196,11 @@ void CodeBlob::restore_mutable_data(address reloc_data) {
if (_mutable_data == nullptr) {
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
}
} else {
_mutable_data = blob_end(); // default value
}
if (_relocation_size > 0) {
assert(_mutable_data_size > 0, "relocation is part of mutable data section");
memcpy((address)relocation_begin(), reloc_data, relocation_size());
}
}
@@ -206,6 +210,8 @@ void CodeBlob::purge() {
if (_mutable_data != blob_end()) {
os::free(_mutable_data);
_mutable_data = blob_end(); // Valid not null address
_mutable_data_size = 0;
_relocation_size = 0;
}
if (_oop_maps != nullptr) {
delete _oop_maps;

View File

@@ -247,7 +247,7 @@ public:
// Sizes
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); }
int relocation_size() const { return _relocation_size; }
int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); }
int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); }

View File

@@ -28,7 +28,6 @@
#include "code/dependencies.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.inline.hpp"
#include "code/relocInfo.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilationLog.hpp"
@@ -1653,10 +1652,6 @@ void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
}
void nmethod::print_nmethod(bool printmethod) {
// Enter a critical section to prevent a race with deopts that patch code and updates the relocation info.
// Unfortunately, we have to lock the NMethodState_lock before the tty lock due to the deadlock rules and
// cannot lock in a more finely grained manner.
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
ttyLocker ttyl; // keep the following output all in one block
if (xtty != nullptr) {
xtty->begin_head("print_nmethod");
@@ -2046,17 +2041,6 @@ bool nmethod::make_not_entrant(const char* reason) {
// cache call.
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
// Update the relocation info for the patched entry.
// First, get the old relocation info...
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
if (iter.next() && iter.addr() == verified_entry_point()) {
Relocation* old_reloc = iter.reloc();
// ...then reset the iterator to update it.
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
relocInfo::change_reloc_info_for_address(&iter, verified_entry_point(), old_reloc->type(),
relocInfo::relocType::runtime_call_type);
}
}
if (update_recompile_counts()) {
@@ -2182,6 +2166,7 @@ void nmethod::purge(bool unregister_nmethod) {
}
CodeCache::unregister_old_nmethod(this);
JVMCI_ONLY( _metadata_size = 0; )
CodeBlob::purge();
}

View File

@@ -1750,6 +1750,10 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
}
}
void CompileBroker::wait_for_no_active_tasks() {
CompileTask::wait_for_no_active_tasks();
}
/**
* Initialize compiler thread(s) + compiler object(s). The postcondition
* of this function is that the compiler runtimes are initialized and that

View File

@@ -383,6 +383,9 @@ public:
static bool is_compilation_disabled_forever() {
return _should_compile_new_jobs == shutdown_compilation;
}
static void wait_for_no_active_tasks();
static void handle_full_code_cache(CodeBlobType code_blob_type);
// Ensures that warning is only printed once.
static bool should_print_compiler_warning() {

View File

@@ -37,12 +37,13 @@
#include "runtime/mutexLocker.hpp"
CompileTask* CompileTask::_task_free_list = nullptr;
int CompileTask::_active_tasks = 0;
/**
* Allocate a CompileTask, from the free list if possible.
*/
CompileTask* CompileTask::allocate() {
MutexLocker locker(CompileTaskAlloc_lock);
MonitorLocker locker(CompileTaskAlloc_lock);
CompileTask* task = nullptr;
if (_task_free_list != nullptr) {
@@ -56,6 +57,7 @@ CompileTask* CompileTask::allocate() {
}
assert(task->is_free(), "Task must be free.");
task->set_is_free(false);
_active_tasks++;
return task;
}
@@ -63,7 +65,7 @@ CompileTask* CompileTask::allocate() {
* Add a task to the free list.
*/
void CompileTask::free(CompileTask* task) {
MutexLocker locker(CompileTaskAlloc_lock);
MonitorLocker locker(CompileTaskAlloc_lock);
if (!task->is_free()) {
if ((task->_method_holder != nullptr && JNIHandles::is_weak_global_handle(task->_method_holder))) {
JNIHandles::destroy_weak_global(task->_method_holder);
@@ -79,6 +81,17 @@ void CompileTask::free(CompileTask* task) {
task->set_is_free(true);
task->set_next(_task_free_list);
_task_free_list = task;
_active_tasks--;
if (_active_tasks == 0) {
locker.notify_all();
}
}
}
void CompileTask::wait_for_no_active_tasks() {
MonitorLocker locker(CompileTaskAlloc_lock);
while (_active_tasks > 0) {
locker.wait();
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,6 +83,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
private:
static CompileTask* _task_free_list;
static int _active_tasks;
int _compile_id;
Method* _method;
jobject _method_holder;
@@ -123,6 +124,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
static CompileTask* allocate();
static void free(CompileTask* task);
static void wait_for_no_active_tasks();
int compile_id() const { return _compile_id; }
Method* method() const { return _method; }

View File

@@ -625,6 +625,34 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
}
#endif
bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
Node* control) {
return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
phase->ctrl_or_self(maybe_load) == control;
}
void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
return;
}
Node* mem = maybe_store->in(MemNode::Memory);
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
Node* u = mem->fast_out(i);
if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
wq.push(u);
}
}
}
void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
wq.push(in);
}
}
}
bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
// That both nodes have the same control is not sufficient to prove
// domination, verify that there's no path from d to n
@@ -639,22 +667,9 @@ bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node*
if (m->is_Phi() && m->in(0)->is_Loop()) {
assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
} else {
if (m->is_Store() || m->is_LoadStore()) {
// Take anti-dependencies into account
Node* mem = m->in(MemNode::Memory);
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
Node* u = mem->fast_out(i);
if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
phase->ctrl_or_self(u) == c) {
wq.push(u);
}
}
}
for (uint i = 0; i < m->req(); i++) {
if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
wq.push(m->in(i));
}
}
// Take anti-dependencies into account
maybe_push_anti_dependent_loads(phase, m, c, wq);
push_data_inputs_at_control(phase, m, c, wq);
}
}
return true;
@@ -1006,7 +1021,20 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
phase->register_new_node(val, ctrl);
}
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
nodes_above_barrier.clear();
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
nodes_above_barrier.push(init_raw_mem);
}
for (uint next = 0; next < nodes_above_barrier.size(); next++) {
Node* n = nodes_above_barrier.at(next);
// Take anti-dependencies into account
maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
}
}
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
Node* ctrl = phase->get_ctrl(barrier);
Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
@@ -1017,30 +1045,17 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
// control will be after the expanded barrier. The raw memory (if
// its memory is control dependent on the barrier's input control)
// must stay above the barrier.
uses_to_ignore.clear();
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
uses_to_ignore.push(init_raw_mem);
}
for (uint next = 0; next < uses_to_ignore.size(); next++) {
Node *n = uses_to_ignore.at(next);
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
uses_to_ignore.push(in);
}
}
}
collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
Node* u = ctrl->fast_out(i);
if (u->_idx < last &&
u != barrier &&
!u->depends_only_on_test() && // preserve dependency on test
!uses_to_ignore.member(u) &&
!nodes_above_barrier.member(u) &&
(u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
(ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
Node* old_c = phase->ctrl_or_self(u);
Node* c = old_c;
if (c != ctrl ||
if (old_c != ctrl ||
is_dominator_same_ctrl(old_c, barrier, u, phase) ||
ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
phase->igvn().rehash_node_delayed(u);
@@ -1315,7 +1330,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
// Expand load-reference-barriers
MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
Unique_Node_List uses_to_ignore;
Unique_Node_List nodes_above_barriers;
for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
uint last = phase->C->unique();
@@ -1410,7 +1425,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* out_val = val_phi;
phase->register_new_node(val_phi, region);
fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
ctrl = orig_ctrl;

View File

@@ -62,8 +62,12 @@ private:
PhaseIdealLoop* phase, int flags);
static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
DecoratorSet decorators, PhaseIdealLoop* phase);
static void collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl,
Node* init_raw_mem);
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase);
static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb);
public:
@@ -76,6 +80,11 @@ public:
static bool expand(Compile* C, PhaseIterGVN& igvn);
static void pin_and_expand(PhaseIdealLoop* phase);
static void push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl,
Unique_Node_List &wq);
static bool is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store, Node* control);
static void maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq);
#ifdef ASSERT
static void verify(RootNode* root);
#endif

View File

@@ -415,10 +415,6 @@ void ShenandoahConcurrentGC::entry_reset() {
msg);
op_reset();
}
if (heap->mode()->is_generational()) {
heap->old_generation()->card_scan()->mark_read_table_as_clean();
}
}
void ShenandoahConcurrentGC::entry_scan_remembered_set() {
@@ -644,6 +640,10 @@ void ShenandoahConcurrentGC::op_reset() {
} else {
_generation->prepare_gc();
}
if (heap->mode()->is_generational()) {
heap->old_generation()->card_scan()->mark_read_table_as_clean();
}
}
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {

View File

@@ -136,9 +136,15 @@ void ShenandoahDegenGC::op_degenerated() {
heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
(!heap->mode()->is_generational() || _generation->is_global()));
if (heap->mode()->is_generational() && _generation->is_young()) {
// Swap remembered sets for young
_generation->swap_card_tables();
if (heap->mode()->is_generational()) {
// Clean the read table before swapping it. The end goal here is to have a clean
// write table, and to have the read table updated with the previous write table.
heap->old_generation()->card_scan()->mark_read_table_as_clean();
if (_generation->is_young()) {
// Swap remembered sets for young
_generation->swap_card_tables();
}
}
case _degenerated_roots:

View File

@@ -183,6 +183,29 @@ void ShenandoahGenerationalHeap::stop() {
regulator_thread()->stop();
}
bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
if (is_idle()) {
return false;
}
if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
// We are marking young, this object is in young, and it is below the TAMS
return true;
}
if (is_in_old(obj)) {
// Card marking barriers are required for objects in the old generation
return true;
}
if (has_forwarded_objects()) {
// Object may have pointers that need to be updated
return true;
}
return false;
}
void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
ShenandoahRegionIterator regions;
ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, false /* only promote regions */);

View File

@@ -128,6 +128,8 @@ public:
void stop() override;
bool requires_barriers(stackChunkOop obj) const override;
// Used for logging the result of a region transfer outside the heap lock
struct TransferResult {
bool success;

View File

@@ -1452,27 +1452,23 @@ void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
}
}
size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
size_t ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) const {
assert(start->is_humongous_start(), "reclaim regions starting with the first one");
oop humongous_obj = cast_to_oop(start->bottom());
size_t size = humongous_obj->size();
size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
size_t index = start->index() + required_regions - 1;
assert(!start->has_live(), "liveness must be zero");
for(size_t i = 0; i < required_regions; i++) {
// Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
// as it expects that every region belongs to a humongous region starting with a humongous start region.
ShenandoahHeapRegion* region = get_region(index --);
assert(region->is_humongous(), "expect correct humongous start or continuation");
// Do not try to get the size of this humongous object. STW collections will
// have already unloaded classes, so an unmarked object may have a bad klass pointer.
ShenandoahHeapRegion* region = start;
size_t index = region->index();
do {
assert(region->is_humongous(), "Expect correct humongous start or continuation");
assert(!region->is_cset(), "Humongous region should not be in collection set");
region->make_trash_immediate();
}
return required_regions;
region = get_region(++index);
} while (region != nullptr && region->is_humongous_continuation());
// Return number of regions trashed
return index - start->index();
}
class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {

View File

@@ -828,7 +828,7 @@ public:
static inline void atomic_clear_oop(narrowOop* addr, oop compare);
static inline void atomic_clear_oop(narrowOop* addr, narrowOop compare);
size_t trash_humongous_region_at(ShenandoahHeapRegion *r);
size_t trash_humongous_region_at(ShenandoahHeapRegion *r) const;
static inline void increase_object_age(oop obj, uint additional_age);

View File

@@ -624,7 +624,7 @@ void ShenandoahDirectCardMarkRememberedSet::swap_card_tables() {
#ifdef ASSERT
CardValue* start_bp = &(_card_table->write_byte_map())[0];
CardValue* end_bp = &(new_ptr)[_card_table->last_valid_index()];
CardValue* end_bp = &(start_bp[_card_table->last_valid_index()]);
while (start_bp <= end_bp) {
assert(*start_bp == CardTable::clean_card_val(), "Should be clean: " PTR_FORMAT, p2i(start_bp));

View File

@@ -1936,7 +1936,7 @@ void ZPageAllocator::cleanup_failed_commit_multi_partition(ZMultiPartitionAlloca
}
const size_t committed = allocation->committed_capacity();
const ZVirtualMemory non_harvested_vmem = vmem.last_part(allocation->harvested());
const ZVirtualMemory non_harvested_vmem = partial_vmem.last_part(allocation->harvested());
const ZVirtualMemory committed_vmem = non_harvested_vmem.first_part(committed);
const ZVirtualMemory non_committed_vmem = non_harvested_vmem.last_part(committed);

View File

@@ -214,9 +214,20 @@ void ZPhysicalMemoryManager::free(const ZVirtualMemory& vmem, uint32_t numa_id)
});
}
static size_t inject_commit_limit(const ZVirtualMemory& vmem) {
// To facilitate easier interoperability with multi partition allocations we
// divide by ZNUMA::count(). Users of ZFailLargerCommits need to be aware of
// this when writing tests. In the future we could probe the VirtualMemoryManager
// and condition this division on whether the vmem is in the multi partition
// address space.
return align_up(MIN2(ZFailLargerCommits / ZNUMA::count(), vmem.size()), ZGranuleSize);
}
size_t ZPhysicalMemoryManager::commit(const ZVirtualMemory& vmem, uint32_t numa_id) {
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
const size_t size = vmem.size();
const size_t size = ZFailLargerCommits > 0
? inject_commit_limit(vmem)
: vmem.size();
size_t total_committed = 0;

View File

@@ -118,6 +118,11 @@
develop(bool, ZVerifyOops, false, \
"Verify accessed oops") \
\
develop(size_t, ZFailLargerCommits, 0, \
"Commits larger than ZFailLargerCommits will be truncated, " \
"used to stress page allocation commit failure paths " \
"(0: Disabled)") \
\
develop(uint, ZFakeNUMA, 1, \
"ZFakeNUMA is used to test the internal NUMA memory support " \
"without the need for UseNUMA") \

View File

@@ -100,8 +100,23 @@ class BaseBytecodeStream: StackObj {
void set_next_bci(int bci) { assert(0 <= bci && bci <= method()->code_size(), "illegal bci"); _next_bci = bci; }
// Bytecode-specific attributes
int dest() const { return bci() + bytecode().get_offset_s2(raw_code()); }
int dest_w() const { return bci() + bytecode().get_offset_s4(raw_code()); }
int get_offset_s2() const { return bytecode().get_offset_s2(raw_code()); }
int get_offset_s4() const { return bytecode().get_offset_s4(raw_code()); }
// These methods are not safe to use before or during verification as they may
// have large offsets and cause overflows
int dest() const {
int min_offset = -1 * max_method_code_size;
int offset = bytecode().get_offset_s2(raw_code());
guarantee(offset >= min_offset && offset <= max_method_code_size, "must be");
return bci() + offset;
}
int dest_w() const {
int min_offset = -1 * max_method_code_size;
int offset = bytecode().get_offset_s4(raw_code());
guarantee(offset >= min_offset && offset <= max_method_code_size, "must be");
return bci() + offset;
}
// One-byte indices.
u1 get_index_u1() const { assert_raw_index_size(1); return *(jubyte*)(bcp()+1); }

View File

@@ -132,14 +132,14 @@ InstanceKlass* JfrClassTransformer::create_new_instance_klass(InstanceKlass* ik,
}
// Redefining / retransforming?
const Klass* JfrClassTransformer::find_existing_klass(const InstanceKlass* ik, JavaThread* thread) {
const InstanceKlass* JfrClassTransformer::find_existing_klass(const InstanceKlass* ik, JavaThread* thread) {
assert(ik != nullptr, "invariant");
assert(thread != nullptr, "invariant");
JvmtiThreadState* const state = thread->jvmti_thread_state();
return state != nullptr ? klass_being_redefined(ik, state) : nullptr;
}
const Klass* JfrClassTransformer::klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) {
const InstanceKlass* JfrClassTransformer::klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state) {
assert(ik != nullptr, "invariant");
assert(state != nullptr, "invariant");
const GrowableArray<Klass*>* const redef_klasses = state->get_classes_being_redefined();
@@ -149,9 +149,10 @@ const Klass* JfrClassTransformer::klass_being_redefined(const InstanceKlass* ik,
for (int i = 0; i < redef_klasses->length(); ++i) {
const Klass* const existing_klass = redef_klasses->at(i);
assert(existing_klass != nullptr, "invariant");
assert(existing_klass->is_instance_klass(), "invariant");
if (ik->name() == existing_klass->name() && ik->class_loader_data() == existing_klass->class_loader_data()) {
// 'ik' is a scratch klass. Return the klass being redefined.
return existing_klass;
return InstanceKlass::cast(existing_klass);
}
}
return nullptr;

View File

@@ -38,10 +38,10 @@ class InstanceKlass;
class JfrClassTransformer : AllStatic {
private:
static InstanceKlass* create_new_instance_klass(InstanceKlass* ik, ClassFileStream* stream, TRAPS);
static const Klass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state);
static const InstanceKlass* klass_being_redefined(const InstanceKlass* ik, JvmtiThreadState* state);
public:
static const Klass* find_existing_klass(const InstanceKlass* ik, JavaThread* thread);
static const InstanceKlass* find_existing_klass(const InstanceKlass* ik, JavaThread* thread);
static InstanceKlass* create_instance_klass(InstanceKlass*& ik, ClassFileStream* stream, bool is_initial_load, JavaThread* thread);
static void copy_traceid(const InstanceKlass* ik, const InstanceKlass* new_ik);
static void transfer_cached_class_file_data(InstanceKlass* ik, InstanceKlass* new_ik, const ClassFileParser& parser, JavaThread* thread);

View File

@@ -36,6 +36,7 @@
#include "jfr/support/jfrResolution.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "jfr/support/methodtracer/jfrMethodTracer.hpp"
#include "jfr/support/methodtracer/jfrTraceTagging.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/instanceKlass.inline.hpp"
#include "oops/klass.hpp"
@@ -88,12 +89,10 @@ void Jfr::on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS)
}
}
void Jfr::on_klass_redefinition(const InstanceKlass* ik, Thread* thread) {
assert(JfrMethodTracer::in_use(), "invariant");
JfrMethodTracer::on_klass_redefinition(ik, thread);
void Jfr::on_klass_redefinition(const InstanceKlass* ik, const InstanceKlass* scratch_klass) {
JfrTraceTagging::on_klass_redefinition(ik, scratch_klass);
}
bool Jfr::is_excluded(Thread* t) {
return JfrJavaSupport::is_excluded(t);
}

View File

@@ -61,7 +61,7 @@ class Jfr : AllStatic {
static void include_thread(Thread* thread);
static void exclude_thread(Thread* thread);
static void on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS);
static void on_klass_redefinition(const InstanceKlass* ik, Thread* thread);
static void on_klass_redefinition(const InstanceKlass* ik, const InstanceKlass* scratch_klass);
static void on_thread_start(Thread* thread);
static void on_thread_exit(Thread* thread);
static void on_resolution(const CallInfo& info, TRAPS);

View File

@@ -170,9 +170,15 @@ NO_TRANSITION(jboolean, jfr_set_throttle(JNIEnv* env, jclass jvm, jlong event_ty
return JNI_TRUE;
NO_TRANSITION_END
JVM_ENTRY_NO_ENV(void, jfr_set_cpu_throttle(JNIEnv* env, jclass jvm, jdouble rate, jboolean auto_adapt))
JVM_ENTRY_NO_ENV(void, jfr_set_cpu_rate(JNIEnv* env, jclass jvm, jdouble rate))
JfrEventSetting::set_enabled(JfrCPUTimeSampleEvent, rate > 0);
JfrCPUTimeThreadSampling::set_rate(rate, auto_adapt == JNI_TRUE);
JfrCPUTimeThreadSampling::set_rate(rate);
JVM_END
JVM_ENTRY_NO_ENV(void, jfr_set_cpu_period(JNIEnv* env, jclass jvm, jlong period_nanos))
assert(period_nanos >= 0, "invariant");
JfrEventSetting::set_enabled(JfrCPUTimeSampleEvent, period_nanos > 0);
JfrCPUTimeThreadSampling::set_period(period_nanos);
JVM_END
NO_TRANSITION(void, jfr_set_miscellaneous(JNIEnv* env, jclass jvm, jlong event_type_id, jlong value))

View File

@@ -129,7 +129,9 @@ jlong JNICALL jfr_get_unloaded_event_classes_count(JNIEnv* env, jclass jvm);
jboolean JNICALL jfr_set_throttle(JNIEnv* env, jclass jvm, jlong event_type_id, jlong event_sample_size, jlong period_ms);
void JNICALL jfr_set_cpu_throttle(JNIEnv* env, jclass jvm, jdouble rate, jboolean auto_adapt);
void JNICALL jfr_set_cpu_rate(JNIEnv* env, jclass jvm, jdouble rate);
void JNICALL jfr_set_cpu_period(JNIEnv* env, jclass jvm, jlong period_nanos);
void JNICALL jfr_set_miscellaneous(JNIEnv* env, jclass jvm, jlong id, jlong value);

View File

@@ -83,7 +83,8 @@ JfrJniMethodRegistration::JfrJniMethodRegistration(JNIEnv* env) {
(char*)"getUnloadedEventClassCount", (char*)"()J", (void*)jfr_get_unloaded_event_classes_count,
(char*)"setMiscellaneous", (char*)"(JJ)V", (void*)jfr_set_miscellaneous,
(char*)"setThrottle", (char*)"(JJJ)Z", (void*)jfr_set_throttle,
(char*)"setCPUThrottle", (char*)"(DZ)V", (void*)jfr_set_cpu_throttle,
(char*)"setCPURate", (char*)"(D)V", (void*)jfr_set_cpu_rate,
(char*)"setCPUPeriod", (char*)"(J)V", (void*)jfr_set_cpu_period,
(char*)"emitOldObjectSamples", (char*)"(JZZ)V", (void*)jfr_emit_old_object_samples,
(char*)"shouldRotateDisk", (char*)"()Z", (void*)jfr_should_rotate_disk,
(char*)"exclude", (char*)"(Ljava/lang/Thread;)V", (void*)jfr_exclude_thread,

View File

@@ -948,22 +948,24 @@
<Field type="long" contentType="bytes" name="freeSize" label="Free Size" description="Free swap space" />
</Event>
<Event name="ExecutionSample" category="Java Virtual Machine, Profiling" label="Method Profiling Sample" description="Snapshot of a threads state"
<Event name="ExecutionSample" category="Java Virtual Machine, Profiling" label="Java Execution Sample"
description="Snapshot of a thread executing Java code. Threads that are not executing Java code, including those waiting or executing native code, are not included."
period="everyChunk">
<Field type="Thread" name="sampledThread" label="Thread" />
<Field type="StackTrace" name="stackTrace" label="Stack Trace" />
<Field type="ThreadState" name="state" label="Thread State" />
</Event>
<Event name="NativeMethodSample" category="Java Virtual Machine, Profiling" label="Method Profiling Sample Native" description="Snapshot of a threads state when in native"
<Event name="NativeMethodSample" category="Java Virtual Machine, Profiling" label="Native Sample"
description="Snapshot of a thread in native code, executing or waiting. Threads that are executing Java code are not included."
period="everyChunk">
<Field type="Thread" name="sampledThread" label="Thread" />
<Field type="StackTrace" name="stackTrace" label="Stack Trace" />
<Field type="ThreadState" name="state" label="Thread State" />
</Event>
<Event name="CPUTimeSample" category="Java Virtual Machine, Profiling" label="CPU Time Method Sample"
description="Snapshot of a threads state from the CPU time sampler. The throttle can be either an upper bound for the event emission rate, e.g. 100/s, or the cpu-time period, e.g. 10ms, with s, ms, us and ns supported as time units."
<Event name="CPUTimeSample" category="Java Virtual Machine, Profiling" label="CPU Time Sample"
description="Snapshot of a threads state from the CPU time sampler, both threads executing native and Java code are included. The throttle setting can be either an upper bound for the event emission rate, e.g. 100/s, or the cpu-time period, e.g. 10ms, with s, ms, us and ns supported as time units."
throttle="true" thread="false" experimental="true" startTime="false">
<Field type="StackTrace" name="stackTrace" label="Stack Trace" />
<Field type="Thread" name="eventThread" label="Thread" />
@@ -972,7 +974,7 @@
<Field type="boolean" name="biased" label="Biased" description="The sample is safepoint-biased" />
</Event>
<Event name="CPUTimeSamplesLost" category="Java Virtual Machine, Profiling" label="CPU Time Method Profiling Lost Samples" description="Records that the CPU time sampler lost samples"
<Event name="CPUTimeSamplesLost" category="Java Virtual Machine, Profiling" label="CPU Time Samples Lost" description="Records that the CPU time sampler lost samples"
thread="false" stackTrace="false" startTime="false" experimental="true">
<Field type="int" name="lostSamples" label="Lost Samples" />
<Field type="Thread" name="eventThread" label="Thread" />

View File

@@ -45,7 +45,7 @@
#include "signals_posix.hpp"
static const int64_t AUTOADAPT_INTERVAL_MS = 100;
static const int64_t RECOMPUTE_INTERVAL_MS = 100;
static bool is_excluded(JavaThread* jt) {
return jt->is_hidden_from_external_view() ||
@@ -163,20 +163,42 @@ void JfrCPUTimeTraceQueue::clear() {
Atomic::release_store(&_head, (u4)0);
}
static int64_t compute_sampling_period(double rate) {
if (rate == 0) {
return 0;
// A throttle is either a rate or a fixed period
class JfrCPUSamplerThrottle {
union {
double _rate;
u8 _period_nanos;
};
bool _is_rate;
public:
JfrCPUSamplerThrottle(double rate) : _rate(rate), _is_rate(true) {
assert(rate >= 0, "invariant");
}
return os::active_processor_count() * 1000000000.0 / rate;
}
JfrCPUSamplerThrottle(u8 period_nanos) : _period_nanos(period_nanos), _is_rate(false) {}
bool enabled() const { return _is_rate ? _rate > 0 : _period_nanos > 0; }
int64_t compute_sampling_period() const {
if (_is_rate) {
if (_rate == 0) {
return 0;
}
return os::active_processor_count() * 1000000000.0 / _rate;
}
return _period_nanos;
}
};
class JfrCPUSamplerThread : public NonJavaThread {
friend class JfrCPUTimeThreadSampling;
private:
Semaphore _sample;
NonJavaThread* _sampler_thread;
double _rate;
bool _auto_adapt;
JfrCPUSamplerThrottle _throttle;
volatile int64_t _current_sampling_period_ns;
volatile bool _disenrolled;
// top bit is used to indicate that no signal handler should proceed
@@ -187,7 +209,7 @@ class JfrCPUSamplerThread : public NonJavaThread {
static const u4 STOP_SIGNAL_BIT = 0x80000000;
JfrCPUSamplerThread(double rate, bool auto_adapt);
JfrCPUSamplerThread(JfrCPUSamplerThrottle& throttle);
void start_thread();
@@ -195,9 +217,9 @@ class JfrCPUSamplerThread : public NonJavaThread {
void disenroll();
void update_all_thread_timers();
void auto_adapt_period_if_needed();
void recompute_period_if_needed();
void set_rate(double rate, bool auto_adapt);
void set_throttle(JfrCPUSamplerThrottle& throttle);
int64_t get_sampling_period() const { return Atomic::load(&_current_sampling_period_ns); };
void sample_thread(JfrSampleRequest& request, void* ucontext, JavaThread* jt, JfrThreadLocal* tl, JfrTicks& now);
@@ -231,18 +253,16 @@ public:
void trigger_async_processing_of_cpu_time_jfr_requests();
};
JfrCPUSamplerThread::JfrCPUSamplerThread(double rate, bool auto_adapt) :
JfrCPUSamplerThread::JfrCPUSamplerThread(JfrCPUSamplerThrottle& throttle) :
_sample(),
_sampler_thread(nullptr),
_rate(rate),
_auto_adapt(auto_adapt),
_current_sampling_period_ns(compute_sampling_period(rate)),
_throttle(throttle),
_current_sampling_period_ns(throttle.compute_sampling_period()),
_disenrolled(true),
_active_signal_handlers(STOP_SIGNAL_BIT),
_is_async_processing_of_cpu_time_jfr_requests_triggered(false),
_warned_about_timer_creation_failure(false),
_signal_handler_installed(false) {
assert(rate >= 0, "invariant");
}
void JfrCPUSamplerThread::trigger_async_processing_of_cpu_time_jfr_requests() {
@@ -321,7 +341,7 @@ void JfrCPUSamplerThread::disenroll() {
void JfrCPUSamplerThread::run() {
assert(_sampler_thread == nullptr, "invariant");
_sampler_thread = this;
int64_t last_auto_adapt_check = os::javaTimeNanos();
int64_t last_recompute_check = os::javaTimeNanos();
while (true) {
if (!_sample.trywait()) {
// disenrolled
@@ -329,9 +349,9 @@ void JfrCPUSamplerThread::run() {
}
_sample.signal();
if (os::javaTimeNanos() - last_auto_adapt_check > AUTOADAPT_INTERVAL_MS * 1000000) {
auto_adapt_period_if_needed();
last_auto_adapt_check = os::javaTimeNanos();
if (os::javaTimeNanos() - last_recompute_check > RECOMPUTE_INTERVAL_MS * 1000000) {
recompute_period_if_needed();
last_recompute_check = os::javaTimeNanos();
}
if (Atomic::cmpxchg(&_is_async_processing_of_cpu_time_jfr_requests_triggered, true, false)) {
@@ -442,42 +462,50 @@ JfrCPUTimeThreadSampling::~JfrCPUTimeThreadSampling() {
}
}
void JfrCPUTimeThreadSampling::create_sampler(double rate, bool auto_adapt) {
void JfrCPUTimeThreadSampling::create_sampler(JfrCPUSamplerThrottle& throttle) {
assert(_sampler == nullptr, "invariant");
_sampler = new JfrCPUSamplerThread(rate, auto_adapt);
_sampler = new JfrCPUSamplerThread(throttle);
_sampler->start_thread();
_sampler->enroll();
}
void JfrCPUTimeThreadSampling::update_run_state(double rate, bool auto_adapt) {
if (rate != 0) {
void JfrCPUTimeThreadSampling::update_run_state(JfrCPUSamplerThrottle& throttle) {
if (throttle.enabled()) {
if (_sampler == nullptr) {
create_sampler(rate, auto_adapt);
create_sampler(throttle);
} else {
_sampler->set_rate(rate, auto_adapt);
_sampler->set_throttle(throttle);
_sampler->enroll();
}
return;
}
if (_sampler != nullptr) {
_sampler->set_rate(rate /* 0 */, auto_adapt);
_sampler->set_throttle(throttle);
_sampler->disenroll();
}
}
void JfrCPUTimeThreadSampling::set_rate(double rate, bool auto_adapt) {
assert(rate >= 0, "invariant");
void JfrCPUTimeThreadSampling::set_rate(double rate) {
if (_instance == nullptr) {
return;
}
instance().set_rate_value(rate, auto_adapt);
JfrCPUSamplerThrottle throttle(rate);
instance().set_throttle_value(throttle);
}
void JfrCPUTimeThreadSampling::set_rate_value(double rate, bool auto_adapt) {
if (_sampler != nullptr) {
_sampler->set_rate(rate, auto_adapt);
void JfrCPUTimeThreadSampling::set_period(u8 nanos) {
if (_instance == nullptr) {
return;
}
update_run_state(rate, auto_adapt);
JfrCPUSamplerThrottle throttle(nanos);
instance().set_throttle_value(throttle);
}
void JfrCPUTimeThreadSampling::set_throttle_value(JfrCPUSamplerThrottle& throttle) {
if (_sampler != nullptr) {
_sampler->set_throttle(throttle);
}
update_run_state(throttle);
}
void JfrCPUTimeThreadSampling::on_javathread_create(JavaThread *thread) {
@@ -704,24 +732,21 @@ void JfrCPUSamplerThread::stop_timer() {
VMThread::execute(&op);
}
void JfrCPUSamplerThread::auto_adapt_period_if_needed() {
void JfrCPUSamplerThread::recompute_period_if_needed() {
int64_t current_period = get_sampling_period();
if (_auto_adapt || current_period == -1) {
int64_t period = compute_sampling_period(_rate);
if (period != current_period) {
Atomic::store(&_current_sampling_period_ns, period);
update_all_thread_timers();
}
int64_t period = _throttle.compute_sampling_period();
if (period != current_period) {
Atomic::store(&_current_sampling_period_ns, period);
update_all_thread_timers();
}
}
void JfrCPUSamplerThread::set_rate(double rate, bool auto_adapt) {
_rate = rate;
_auto_adapt = auto_adapt;
if (_rate > 0 && Atomic::load_acquire(&_disenrolled) == false) {
auto_adapt_period_if_needed();
void JfrCPUSamplerThread::set_throttle(JfrCPUSamplerThrottle& throttle) {
_throttle = throttle;
if (_throttle.enabled() && Atomic::load_acquire(&_disenrolled) == false) {
recompute_period_if_needed();
} else {
Atomic::store(&_current_sampling_period_ns, compute_sampling_period(rate));
Atomic::store(&_current_sampling_period_ns, _throttle.compute_sampling_period());
}
}
@@ -765,12 +790,18 @@ void JfrCPUTimeThreadSampling::destroy() {
_instance = nullptr;
}
void JfrCPUTimeThreadSampling::set_rate(double rate, bool auto_adapt) {
void JfrCPUTimeThreadSampling::set_rate(double rate) {
if (rate != 0) {
warn();
}
}
void JfrCPUTimeThreadSampling::set_period(u8 period_nanos) {
if (period_nanos != 0) {
warn();
}
}
void JfrCPUTimeThreadSampling::on_javathread_create(JavaThread* thread) {
}

View File

@@ -95,14 +95,16 @@ public:
class JfrCPUSamplerThread;
class JfrCPUSamplerThrottle;
class JfrCPUTimeThreadSampling : public JfrCHeapObj {
friend class JfrRecorder;
private:
JfrCPUSamplerThread* _sampler;
void create_sampler(double rate, bool auto_adapt);
void set_rate_value(double rate, bool auto_adapt);
void create_sampler(JfrCPUSamplerThrottle& throttle);
void set_throttle_value(JfrCPUSamplerThrottle& throttle);
JfrCPUTimeThreadSampling();
~JfrCPUTimeThreadSampling();
@@ -111,10 +113,13 @@ class JfrCPUTimeThreadSampling : public JfrCHeapObj {
static JfrCPUTimeThreadSampling* create();
static void destroy();
void update_run_state(double rate, bool auto_adapt);
void update_run_state(JfrCPUSamplerThrottle& throttle);
static void set_rate(JfrCPUSamplerThrottle& throttle);
public:
static void set_rate(double rate, bool auto_adapt);
static void set_rate(double rate);
static void set_period(u8 nanos);
static void on_javathread_create(JavaThread* thread);
static void on_javathread_terminate(JavaThread* thread);
@@ -140,7 +145,8 @@ private:
static void destroy();
public:
static void set_rate(double rate, bool auto_adapt);
static void set_rate(double rate);
static void set_period(u8 nanos);
static void on_javathread_create(JavaThread* thread);
static void on_javathread_terminate(JavaThread* thread);

View File

@@ -1,417 +0,0 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/growableArray.hpp"
static const int initial_array_size = 30;
class ThreadGroupExclusiveAccess : public StackObj {
private:
static Semaphore _mutex_semaphore;
public:
ThreadGroupExclusiveAccess() { _mutex_semaphore.wait(); }
~ThreadGroupExclusiveAccess() { _mutex_semaphore.signal(); }
};
Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1);
JfrThreadGroup* JfrThreadGroup::_instance = nullptr;
class JfrThreadGroupPointers : public ResourceObj {
private:
const Handle _thread_group_handle;
jweak _thread_group_weak_ref;
public:
JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref);
Handle thread_group_handle() const;
jweak thread_group_weak_ref() const;
oopDesc* thread_group_oop() const;
jweak transfer_weak_global_handle_ownership();
void clear_weak_ref();
};
JfrThreadGroupPointers::JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref) :
_thread_group_handle(thread_group_handle),
_thread_group_weak_ref(thread_group_weak_ref) {}
Handle JfrThreadGroupPointers::thread_group_handle() const {
return _thread_group_handle;
}
jweak JfrThreadGroupPointers::thread_group_weak_ref() const {
return _thread_group_weak_ref;
}
oopDesc* JfrThreadGroupPointers::thread_group_oop() const {
assert(_thread_group_weak_ref == nullptr ||
JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant");
return _thread_group_handle();
}
jweak JfrThreadGroupPointers::transfer_weak_global_handle_ownership() {
jweak temp = _thread_group_weak_ref;
_thread_group_weak_ref = nullptr;
return temp;
}
void JfrThreadGroupPointers::clear_weak_ref() {
if (nullptr != _thread_group_weak_ref) {
JNIHandles::destroy_weak_global(_thread_group_weak_ref);
}
}
class JfrThreadGroupsHelper : public ResourceObj {
private:
static const int invalid_iterator_pos = -1;
GrowableArray<JfrThreadGroupPointers*>* _thread_group_hierarchy;
int _current_iterator_pos;
int populate_thread_group_hierarchy(const JavaThread* jt, Thread* current);
JfrThreadGroupPointers& at(int index);
public:
JfrThreadGroupsHelper(const JavaThread* jt, Thread* current);
~JfrThreadGroupsHelper();
JfrThreadGroupPointers& next();
bool is_valid() const;
bool has_next() const;
};
JfrThreadGroupsHelper::JfrThreadGroupsHelper(const JavaThread* jt, Thread* current) {
_thread_group_hierarchy = new GrowableArray<JfrThreadGroupPointers*>(10);
_current_iterator_pos = populate_thread_group_hierarchy(jt, current) - 1;
}
JfrThreadGroupsHelper::~JfrThreadGroupsHelper() {
assert(_current_iterator_pos == invalid_iterator_pos, "invariant");
for (int i = 0; i < _thread_group_hierarchy->length(); ++i) {
_thread_group_hierarchy->at(i)->clear_weak_ref();
}
}
JfrThreadGroupPointers& JfrThreadGroupsHelper::at(int index) {
assert(_thread_group_hierarchy != nullptr, "invariant");
assert(index > invalid_iterator_pos && index < _thread_group_hierarchy->length(), "invariant");
return *(_thread_group_hierarchy->at(index));
}
bool JfrThreadGroupsHelper::has_next() const {
return _current_iterator_pos > invalid_iterator_pos;
}
bool JfrThreadGroupsHelper::is_valid() const {
return (_thread_group_hierarchy != nullptr && _thread_group_hierarchy->length() > 0);
}
JfrThreadGroupPointers& JfrThreadGroupsHelper::next() {
assert(is_valid(), "invariant");
return at(_current_iterator_pos--);
}
/*
* If not at a safepoint, we create global weak references for
* all reachable threadgroups for this thread.
* If we are at a safepoint, the caller is the VMThread during
* JFR checkpointing. It can use naked oops, because nothing
* will move before the list of threadgroups is cleared and
* mutator threads restarted. The threadgroup list is cleared
* later by the VMThread as one of the final steps in JFR checkpointing
* (not here).
*/
int JfrThreadGroupsHelper::populate_thread_group_hierarchy(const JavaThread* jt, Thread* current) {
assert(jt != nullptr && jt->is_Java_thread(), "invariant");
assert(current != nullptr, "invariant");
assert(_thread_group_hierarchy != nullptr, "invariant");
oop thread_oop = jt->threadObj();
if (thread_oop == nullptr) {
return 0;
}
// immediate thread group
Handle thread_group_handle(current, java_lang_Thread::threadGroup(thread_oop));
if (thread_group_handle == nullptr) {
return 0;
}
const bool use_weak_handles = !SafepointSynchronize::is_at_safepoint();
jweak thread_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(thread_group_handle) : nullptr;
JfrThreadGroupPointers* thread_group_pointers = new JfrThreadGroupPointers(thread_group_handle, thread_group_weak_ref);
_thread_group_hierarchy->append(thread_group_pointers);
// immediate parent thread group
oop parent_thread_group_obj = java_lang_ThreadGroup::parent(thread_group_handle());
Handle parent_thread_group_handle(current, parent_thread_group_obj);
// and check parents parents...
while (parent_thread_group_handle != nullptr) {
const jweak parent_group_weak_ref = use_weak_handles ? JNIHandles::make_weak_global(parent_thread_group_handle) : nullptr;
thread_group_pointers = new JfrThreadGroupPointers(parent_thread_group_handle, parent_group_weak_ref);
_thread_group_hierarchy->append(thread_group_pointers);
parent_thread_group_obj = java_lang_ThreadGroup::parent(parent_thread_group_handle());
parent_thread_group_handle = Handle(current, parent_thread_group_obj);
}
return _thread_group_hierarchy->length();
}
static traceid next_id() {
static traceid _current_threadgroup_id = 1; // 1 is reserved for thread group "VirtualThreads"
return ++_current_threadgroup_id;
}
class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj {
friend class JfrThreadGroup;
private:
traceid _thread_group_id;
traceid _parent_group_id;
char* _thread_group_name; // utf8 format
// If an entry is created during a safepoint, the
// _thread_group_oop contains a direct oop to
// the java.lang.ThreadGroup object.
// If an entry is created on javathread exit time (not at safepoint),
// _thread_group_weak_ref contains a JNI weak global handle
// indirection to the java.lang.ThreadGroup object.
// Note: we cannot use a union here since CHECK_UNHANDLED_OOPS makes oop have
// a ctor which isn't allowed in a union by the SunStudio compiler
oop _thread_group_oop;
jweak _thread_group_weak_ref;
JfrThreadGroupEntry(const char* tgstr, JfrThreadGroupPointers& ptrs);
~JfrThreadGroupEntry();
traceid thread_group_id() const { return _thread_group_id; }
void set_thread_group_id(traceid tgid) { _thread_group_id = tgid; }
const char* thread_group_name() const { return _thread_group_name; }
void set_thread_group_name(const char* tgname);
traceid parent_group_id() const { return _parent_group_id; }
void set_parent_group_id(traceid pgid) { _parent_group_id = pgid; }
void set_thread_group(JfrThreadGroupPointers& ptrs);
bool is_equal(const JfrThreadGroupPointers& ptrs) const;
oop thread_group() const;
};
JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) :
_thread_group_id(0),
_parent_group_id(0),
_thread_group_name(nullptr),
_thread_group_oop(nullptr),
_thread_group_weak_ref(nullptr) {
set_thread_group_name(tgname);
set_thread_group(ptrs);
}
JfrThreadGroup::JfrThreadGroupEntry::~JfrThreadGroupEntry() {
if (_thread_group_name != nullptr) {
JfrCHeapObj::free(_thread_group_name, strlen(_thread_group_name) + 1);
}
if (_thread_group_weak_ref != nullptr) {
JNIHandles::destroy_weak_global(_thread_group_weak_ref);
}
}
void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgname) {
assert(_thread_group_name == nullptr, "invariant");
if (tgname != nullptr) {
size_t len = strlen(tgname);
_thread_group_name = JfrCHeapObj::new_array<char>(len + 1);
strncpy(_thread_group_name, tgname, len + 1);
}
}
oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const {
return _thread_group_weak_ref != nullptr ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop;
}
void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group(JfrThreadGroupPointers& ptrs) {
_thread_group_weak_ref = ptrs.transfer_weak_global_handle_ownership();
if (_thread_group_weak_ref == nullptr) {
_thread_group_oop = ptrs.thread_group_oop();
assert(_thread_group_oop != nullptr, "invariant");
} else {
_thread_group_oop = nullptr;
}
}
JfrThreadGroup::JfrThreadGroup() :
_list(new (mtTracing) GrowableArray<JfrThreadGroupEntry*>(initial_array_size, mtTracing)) {}
JfrThreadGroup::~JfrThreadGroup() {
if (_list != nullptr) {
for (int i = 0; i < _list->length(); i++) {
JfrThreadGroupEntry* e = _list->at(i);
delete e;
}
delete _list;
}
}
JfrThreadGroup* JfrThreadGroup::instance() {
return _instance;
}
void JfrThreadGroup::set_instance(JfrThreadGroup* new_instance) {
_instance = new_instance;
}
traceid JfrThreadGroup::thread_group_id(const JavaThread* jt, Thread* current) {
HandleMark hm(current);
JfrThreadGroupsHelper helper(jt, current);
return helper.is_valid() ? thread_group_id_internal(helper) : 0;
}
traceid JfrThreadGroup::thread_group_id(JavaThread* const jt) {
return thread_group_id(jt, jt);
}
traceid JfrThreadGroup::thread_group_id_internal(JfrThreadGroupsHelper& helper) {
ThreadGroupExclusiveAccess lock;
JfrThreadGroup* tg_instance = instance();
if (tg_instance == nullptr) {
tg_instance = new JfrThreadGroup();
if (tg_instance == nullptr) {
return 0;
}
set_instance(tg_instance);
}
JfrThreadGroupEntry* tge = nullptr;
traceid parent_thread_group_id = 0;
while (helper.has_next()) {
JfrThreadGroupPointers& ptrs = helper.next();
tge = tg_instance->find_entry(ptrs);
if (nullptr == tge) {
tge = tg_instance->new_entry(ptrs);
assert(tge != nullptr, "invariant");
tge->set_parent_group_id(parent_thread_group_id);
}
parent_thread_group_id = tge->thread_group_id();
}
// the last entry in the hierarchy is the immediate thread group
return tge->thread_group_id();
}
bool JfrThreadGroup::JfrThreadGroupEntry::is_equal(const JfrThreadGroupPointers& ptrs) const {
return ptrs.thread_group_oop() == thread_group();
}
JfrThreadGroup::JfrThreadGroupEntry*
JfrThreadGroup::find_entry(const JfrThreadGroupPointers& ptrs) const {
for (int index = 0; index < _list->length(); ++index) {
JfrThreadGroupEntry* curtge = _list->at(index);
if (curtge->is_equal(ptrs)) {
return curtge;
}
}
return (JfrThreadGroupEntry*) nullptr;
}
// Assumes you already searched for the existence
// of a corresponding entry in find_entry().
JfrThreadGroup::JfrThreadGroupEntry*
JfrThreadGroup::new_entry(JfrThreadGroupPointers& ptrs) {
JfrThreadGroupEntry* const tge = new JfrThreadGroupEntry(java_lang_ThreadGroup::name(ptrs.thread_group_oop()), ptrs);
add_entry(tge);
return tge;
}
int JfrThreadGroup::add_entry(JfrThreadGroupEntry* tge) {
assert(tge != nullptr, "attempting to add a null entry!");
assert(0 == tge->thread_group_id(), "id must be unassigned!");
tge->set_thread_group_id(next_id());
return _list->append(tge);
}
void JfrThreadGroup::write_thread_group_entries(JfrCheckpointWriter& writer) const {
assert(_list != nullptr && !_list->is_empty(), "should not need be here!");
const int number_of_tg_entries = _list->length();
writer.write_count(number_of_tg_entries + 1); // + VirtualThread group
writer.write_key(1); // 1 is reserved for VirtualThread group
writer.write<traceid>(0); // parent
const oop vgroup = java_lang_Thread_Constants::get_VTHREAD_GROUP();
assert(vgroup != (oop)nullptr, "invariant");
const char* const vgroup_name = java_lang_ThreadGroup::name(vgroup);
assert(vgroup_name != nullptr, "invariant");
writer.write(vgroup_name);
for (int index = 0; index < number_of_tg_entries; ++index) {
const JfrThreadGroupEntry* const curtge = _list->at(index);
writer.write_key(curtge->thread_group_id());
writer.write(curtge->parent_group_id());
writer.write(curtge->thread_group_name());
}
}
void JfrThreadGroup::write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const {
assert(writer != nullptr, "invariant");
assert(_list != nullptr && !_list->is_empty(), "should not need be here!");
assert(thread_group_id != 1, "should not need be here!");
const int number_of_tg_entries = _list->length();
// save context
const JfrCheckpointContext ctx = writer->context();
writer->write_type(TYPE_THREADGROUP);
const jlong count_offset = writer->reserve(sizeof(u4)); // Don't know how many yet
int number_of_entries_written = 0;
for (int index = number_of_tg_entries - 1; index >= 0; --index) {
const JfrThreadGroupEntry* const curtge = _list->at(index);
if (thread_group_id == curtge->thread_group_id()) {
writer->write_key(curtge->thread_group_id());
writer->write(curtge->parent_group_id());
writer->write(curtge->thread_group_name());
++number_of_entries_written;
thread_group_id = curtge->parent_group_id();
}
}
if (number_of_entries_written == 0) {
// nothing to write, restore context
writer->set_context(ctx);
return;
}
assert(number_of_entries_written > 0, "invariant");
writer->write_count(number_of_entries_written, count_offset);
}
// Write out JfrThreadGroup instance and then delete it
void JfrThreadGroup::serialize(JfrCheckpointWriter& writer) {
ThreadGroupExclusiveAccess lock;
JfrThreadGroup* tg_instance = instance();
assert(tg_instance != nullptr, "invariant");
tg_instance->write_thread_group_entries(writer);
}
// for writing a particular thread group
void JfrThreadGroup::serialize(JfrCheckpointWriter* writer, traceid thread_group_id) {
assert(writer != nullptr, "invariant");
ThreadGroupExclusiveAccess lock;
JfrThreadGroup* const tg_instance = instance();
assert(tg_instance != nullptr, "invariant");
tg_instance->write_selective_thread_group(writer, thread_group_id);
}

View File

@@ -0,0 +1,331 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "jfr/jni/jfrJavaSupport.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrLinkedList.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/semaphore.hpp"
#include "runtime/thread.inline.hpp"
#include "utilities/growableArray.hpp"
class ThreadGroupExclusiveAccess : public StackObj {
private:
static Semaphore _mutex_semaphore;
public:
ThreadGroupExclusiveAccess() { _mutex_semaphore.wait(); }
~ThreadGroupExclusiveAccess() { _mutex_semaphore.signal(); }
};
Semaphore ThreadGroupExclusiveAccess::_mutex_semaphore(1);
static traceid next_id() {
static traceid _tgid = 1; // 1 is reserved for thread group "VirtualThreads"
return ++_tgid;
}
class JfrThreadGroup : public JfrCHeapObj {
template <typename, typename>
friend class JfrLinkedList;
private:
mutable const JfrThreadGroup* _next;
const JfrThreadGroup* _parent;
traceid _tgid;
char* _tg_name; // utf8 format
jweak _tg_handle;
mutable u2 _generation;
public:
JfrThreadGroup(Handle tg, const JfrThreadGroup* parent) :
_next(nullptr), _parent(parent), _tgid(next_id()), _tg_name(nullptr),
_tg_handle(JNIHandles::make_weak_global(tg)), _generation(0) {
const char* name = java_lang_ThreadGroup::name(tg());
if (name != nullptr) {
const size_t len = strlen(name);
_tg_name = JfrCHeapObj::new_array<char>(len + 1);
strncpy(_tg_name, name, len + 1);
}
}
~JfrThreadGroup() {
JNIHandles::destroy_weak_global(_tg_handle);
if (_tg_name != nullptr) {
JfrCHeapObj::free(_tg_name, strlen(_tg_name) + 1);
}
}
const JfrThreadGroup* next() const { return _next; }
traceid id() const { return _tgid; }
const char* name() const {
return _tg_name;
}
const JfrThreadGroup* parent() const { return _parent; }
traceid parent_id() const {
return _parent != nullptr ? _parent->id() : 0;
}
bool is_dead() const {
return JNIHandles::resolve(_tg_handle) == nullptr;
}
bool operator==(oop tg) const {
assert(tg != nullptr, "invariant");
return tg == JNIHandles::resolve(_tg_handle);
}
bool should_write() const {
return !JfrTraceIdEpoch::is_current_epoch_generation(_generation);
}
void set_written() const {
assert(should_write(), "invariant");
_generation = JfrTraceIdEpoch::epoch_generation();
}
};
typedef JfrLinkedList<const JfrThreadGroup> JfrThreadGroupList;
static JfrThreadGroupList* _list = nullptr;
static JfrThreadGroupList& list() {
assert(_list != nullptr, "invariant");
return *_list;
}
bool JfrThreadGroupManager::create() {
assert(_list == nullptr, "invariant");
_list = new JfrThreadGroupList();
return _list != nullptr;
}
void JfrThreadGroupManager::destroy() {
delete _list;
_list = nullptr;
}
static int populate(GrowableArray<Handle>* hierarchy, const JavaThread* jt, Thread* current) {
assert(hierarchy != nullptr, "invariant");
assert(jt != nullptr, "invariant");
assert(current == Thread::current(), "invariant");
oop thread_oop = jt->threadObj();
if (thread_oop == nullptr) {
return 0;
}
// Immediate thread group.
const Handle tg_handle(current, java_lang_Thread::threadGroup(thread_oop));
if (tg_handle.is_null()) {
return 0;
}
hierarchy->append(tg_handle);
// Thread group parent and then its parents...
Handle parent_tg_handle(current, java_lang_ThreadGroup::parent(tg_handle()));
while (parent_tg_handle != nullptr) {
hierarchy->append(parent_tg_handle);
parent_tg_handle = Handle(current, java_lang_ThreadGroup::parent(parent_tg_handle()));
}
return hierarchy->length();
}
class JfrThreadGroupLookup : public ResourceObj {
static const int invalid_iterator = -1;
private:
GrowableArray<Handle>* _hierarchy;
mutable int _iterator;
public:
JfrThreadGroupLookup(const JavaThread* jt, Thread* current) :
_hierarchy(new GrowableArray<Handle>(16)),
_iterator(populate(_hierarchy, jt, current) - 1) {}
bool has_next() const {
return _iterator > invalid_iterator;
}
const Handle& next() const {
assert(has_next(), "invariant");
return _hierarchy->at(_iterator--);
}
};
static const JfrThreadGroup* find_or_add(const Handle& tg_oop, const JfrThreadGroup* parent) {
assert(parent == nullptr || list().in_list(parent), "invariant");
const JfrThreadGroup* tg = list().head();
const JfrThreadGroup* result = nullptr;
while (tg != nullptr) {
if (*tg == tg_oop()) {
assert(tg->parent() == parent, "invariant");
result = tg;
tg = nullptr;
continue;
}
tg = tg->next();
}
if (result == nullptr) {
result = new JfrThreadGroup(tg_oop, parent);
list().add(result);
}
return result;
}
static traceid find_tgid(const JfrThreadGroupLookup& lookup) {
const JfrThreadGroup* tg = nullptr;
const JfrThreadGroup* ptg = nullptr;
while (lookup.has_next()) {
tg = find_or_add(lookup.next(), ptg);
ptg = tg;
}
return tg != nullptr ? tg->id() : 0;
}
static traceid find(const JfrThreadGroupLookup& lookup) {
ThreadGroupExclusiveAccess lock;
return find_tgid(lookup);
}
traceid JfrThreadGroupManager::thread_group_id(JavaThread* jt) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(jt);)
ResourceMark rm(jt);
HandleMark hm(jt);
const JfrThreadGroupLookup lookup(jt, jt);
return find(lookup);
}
traceid JfrThreadGroupManager::thread_group_id(const JavaThread* jt, Thread* current) {
assert(jt != nullptr, "invariant");
assert(current != nullptr, "invariant");
assert(!current->is_Java_thread() || JavaThread::cast(current)->thread_state() == _thread_in_vm, "invariant");
ResourceMark rm(current);
HandleMark hm(current);
const JfrThreadGroupLookup lookup(jt, current);
return find(lookup);
}
static void write_virtual_thread_group(JfrCheckpointWriter& writer) {
writer.write_key(1); // 1 is reserved for VirtualThread group
writer.write<traceid>(0); // parent
const oop vgroup = java_lang_Thread_Constants::get_VTHREAD_GROUP();
assert(vgroup != (oop)nullptr, "invariant");
const char* const vgroup_name = java_lang_ThreadGroup::name(vgroup);
assert(vgroup_name != nullptr, "invariant");
writer.write(vgroup_name);
}
static int write_thread_group(JfrCheckpointWriter& writer, const JfrThreadGroup* tg, bool to_blob = false) {
assert(tg != nullptr, "invariant");
if (tg->should_write() || to_blob) {
writer.write_key(tg->id());
writer.write(tg->parent_id());
writer.write(tg->name());
if (!to_blob) {
tg->set_written();
}
return 1;
}
return 0;
}
// For writing all live thread groups while removing and deleting dead thread groups.
void JfrThreadGroupManager::serialize(JfrCheckpointWriter& writer) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
const uint64_t count_offset = writer.reserve(sizeof(u4)); // Don't know how many yet
// First write the pre-defined ThreadGroup for virtual threads.
write_virtual_thread_group(writer);
int number_of_groups_written = 1;
const JfrThreadGroup* next = nullptr;
const JfrThreadGroup* prev = nullptr;
{
ThreadGroupExclusiveAccess lock;
const JfrThreadGroup* tg = list().head();
while (tg != nullptr) {
next = tg->next();
if (tg->is_dead()) {
prev = list().excise(prev, tg);
assert(!list().in_list(tg), "invariant");
delete tg;
tg = next;
continue;
}
number_of_groups_written += write_thread_group(writer, tg);
prev = tg;
tg = next;
}
}
assert(number_of_groups_written > 0, "invariant");
writer.write_count(number_of_groups_written, count_offset);
}
// For writing a specific thread group and its ancestry.
void JfrThreadGroupManager::serialize(JfrCheckpointWriter& writer, traceid tgid, bool to_blob) {
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(JavaThread::current());)
// save context
const JfrCheckpointContext ctx = writer.context();
writer.write_type(TYPE_THREADGROUP);
const uint64_t count_offset = writer.reserve(sizeof(u4)); // Don't know how many yet
int number_of_groups_written = 0;
{
ThreadGroupExclusiveAccess lock;
const JfrThreadGroup* tg = list().head();
while (tg != nullptr) {
if (tgid == tg->id()) {
while (tg != nullptr) {
number_of_groups_written += write_thread_group(writer, tg, to_blob);
tg = tg->parent();
}
break;
}
tg = tg->next();
}
}
if (number_of_groups_written == 0) {
// nothing to write, restore context
writer.set_context(ctx);
return;
}
assert(number_of_groups_written > 0, "invariant");
writer.write_count(number_of_groups_written, count_offset);
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,44 +22,27 @@
*
*/
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
#ifndef SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUPMANAGER_HPP
#define SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUPMANAGER_HPP
#include "jfr/utilities/jfrAllocation.hpp"
#include "jfr/utilities/jfrTypes.hpp"
#include "jni.h"
#include "memory/allStatic.hpp"
class JfrCheckpointWriter;
template <typename>
class GrowableArray;
class JfrThreadGroupsHelper;
class JfrThreadGroupPointers;
class JfrThreadGroup : public JfrCHeapObj {
friend class JfrCheckpointThreadClosure;
class JfrThreadGroupManager : public AllStatic {
friend class JfrRecorder;
private:
static JfrThreadGroup* _instance;
class JfrThreadGroupEntry;
GrowableArray<JfrThreadGroupEntry*>* _list;
JfrThreadGroup();
JfrThreadGroupEntry* find_entry(const JfrThreadGroupPointers& ptrs) const;
JfrThreadGroupEntry* new_entry(JfrThreadGroupPointers& ptrs);
int add_entry(JfrThreadGroupEntry* const tge);
void write_thread_group_entries(JfrCheckpointWriter& writer) const;
void write_selective_thread_group(JfrCheckpointWriter* writer, traceid thread_group_id) const;
static traceid thread_group_id_internal(JfrThreadGroupsHelper& helper);
static JfrThreadGroup* instance();
static void set_instance(JfrThreadGroup* new_instance);
static bool create();
static void destroy();
public:
~JfrThreadGroup();
static void serialize(JfrCheckpointWriter& w);
static void serialize(JfrCheckpointWriter* w, traceid thread_group_id);
static void serialize(JfrCheckpointWriter& w, traceid tgid, bool is_blob);
static traceid thread_group_id(JavaThread* thread);
static traceid thread_group_id(const JavaThread* thread, Thread* current);
};
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUP_HPP
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_JFRTHREADGROUPMANAGER_HPP

View File

@@ -32,7 +32,7 @@
#include "gc/shared/gcWhen.hpp"
#include "jfr/leakprofiler/leakProfiler.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointWriter.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroup.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadState.hpp"
#include "jfr/recorder/checkpoint/types/jfrType.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
@@ -106,7 +106,7 @@ void JfrCheckpointThreadClosure::do_thread(Thread* t) {
} else {
_writer.write(name);
_writer.write(tid);
_writer.write(JfrThreadGroup::thread_group_id(JavaThread::cast(t), _curthread));
_writer.write(JfrThreadGroupManager::thread_group_id(JavaThread::cast(t), _curthread));
}
_writer.write<bool>(false); // isVirtual
}
@@ -115,7 +115,10 @@ void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
JfrCheckpointThreadClosure tc(writer);
JfrJavaThreadIterator javathreads;
while (javathreads.has_next()) {
tc.do_thread(javathreads.next());
JavaThread* const jt = javathreads.next();
if (jt->jfr_thread_local()->should_write()) {
tc.do_thread(jt);
}
}
JfrNonJavaThreadIterator nonjavathreads;
while (nonjavathreads.has_next()) {
@@ -124,7 +127,7 @@ void JfrThreadConstantSet::serialize(JfrCheckpointWriter& writer) {
}
void JfrThreadGroupConstant::serialize(JfrCheckpointWriter& writer) {
JfrThreadGroup::serialize(writer);
JfrThreadGroupManager::serialize(writer);
}
static const char* flag_value_origin_to_string(JVMFlagOrigin origin) {
@@ -303,11 +306,11 @@ void JfrThreadConstant::serialize(JfrCheckpointWriter& writer) {
writer.write(JfrThreadId::jfr_id(_thread, _tid));
// java thread group - VirtualThread threadgroup reserved id 1
const traceid thread_group_id = is_vthread ? 1 :
JfrThreadGroup::thread_group_id(JavaThread::cast(_thread), Thread::current());
JfrThreadGroupManager::thread_group_id(JavaThread::cast(_thread), Thread::current());
writer.write(thread_group_id);
writer.write<bool>(is_vthread); // isVirtual
if (!is_vthread) {
JfrThreadGroup::serialize(&writer, thread_group_id);
if (thread_group_id > 1) {
JfrThreadGroupManager::serialize(writer, thread_group_id, _to_blob);
}
// VirtualThread threadgroup already serialized invariant.
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -109,11 +109,12 @@ class JfrThreadConstant : public JfrSerializer {
oop _vthread;
const char* _name;
int _length;
const bool _to_blob;
void write_name(JfrCheckpointWriter& writer);
void write_os_name(JfrCheckpointWriter& writer, bool is_vthread);
public:
JfrThreadConstant(Thread* t, traceid tid, oop vthread = nullptr) :
_thread(t), _tid(tid), _vthread(vthread), _name(nullptr), _length(-1) {}
JfrThreadConstant(Thread* t, traceid tid, bool to_blob, oop vthread = nullptr) :
_thread(t), _tid(tid), _vthread(vthread), _name(nullptr), _length(-1), _to_blob(to_blob) {}
void serialize(JfrCheckpointWriter& writer);
};

View File

@@ -109,7 +109,7 @@ JfrBlobHandle JfrTypeManager::create_thread_blob(JavaThread* jt, traceid tid /*
// TYPE_THREAD and count is written unconditionally for blobs, also for vthreads.
writer.write_type(TYPE_THREAD);
writer.write_count(1);
JfrThreadConstant type_thread(jt, tid, vthread);
JfrThreadConstant type_thread(jt, tid, true, vthread);
type_thread.serialize(writer);
return writer.move();
}
@@ -128,7 +128,7 @@ void JfrTypeManager::write_checkpoint(Thread* t, traceid tid /* 0 */, oop vthrea
writer.write_type(TYPE_THREAD);
writer.write_count(1);
}
JfrThreadConstant type_thread(t, tid, vthread);
JfrThreadConstant type_thread(t, tid, false, vthread);
type_thread.serialize(writer);
}

View File

@@ -533,8 +533,9 @@ static void clear_method_tracer_klasses() {
static void do_unloading_klass(Klass* klass) {
assert(klass != nullptr, "invariant");
assert(_subsystem_callback != nullptr, "invariant");
if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_scratch_class()) {
return;
if (!used(klass) && klass->is_instance_klass() && InstanceKlass::cast(klass)->is_scratch_class()) {
SET_TRANSIENT(klass);
assert(used(klass), "invariant");
}
if (JfrKlassUnloading::on_unload(klass)) {
if (JfrTraceId::has_sticky_bit(klass)) {

View File

@@ -152,7 +152,7 @@ public:
if (!klass->is_instance_klass()) {
return false;
}
return _current_epoch ? METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_PREVIOUS_EPOCH(klass);
return _current_epoch ? USED_THIS_EPOCH(klass) : USED_PREVIOUS_EPOCH(klass);
}
};

View File

@@ -30,6 +30,7 @@
#include "memory/allStatic.hpp"
class ClassLoaderData;
class InstanceKlass;
class Klass;
class Method;
class ModuleEntry;
@@ -86,7 +87,6 @@ class JfrTraceId : public AllStatic {
// through load barrier
static traceid load(const Klass* klass);
static traceid load_previous_epoch(const Klass* klass);
static traceid load(jclass jc, bool raw = false);
static traceid load(const Method* method);
static traceid load(const Klass* klass, const Method* method);
@@ -146,10 +146,8 @@ class JfrTraceId : public AllStatic {
static void set_sticky_bit(const Method* method);
static void clear_sticky_bit(const Klass* k);
static void clear_sticky_bit(const Method* method);
static bool has_timing_bit(const Klass* k);
static void set_timing_bit(const Klass* k);
static void clear_timing_bit(const Klass* k);
static bool has_timing_bit(const InstanceKlass* scratch_klass);
static void set_timing_bit(const InstanceKlass* scratch_klass);
};
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_HPP

View File

@@ -32,6 +32,7 @@
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
#include "jfr/support/jfrKlassExtension.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/mutexLocker.hpp"
@@ -81,10 +82,6 @@ inline traceid JfrTraceId::load_leakp_previous_epoch(const Klass* klass, const M
return JfrTraceIdLoadBarrier::load_leakp_previous_epoch(klass, method);
}
inline traceid JfrTraceId::load_previous_epoch(const Klass* klass) {
return JfrTraceIdLoadBarrier::load_previous_epoch(klass);
}
template <typename T>
inline traceid raw_load(const T* t) {
assert(t != nullptr, "invariant");
@@ -198,6 +195,7 @@ inline void JfrTraceId::set_sticky_bit(const Method* method) {
assert(method != nullptr, "invariant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(!has_sticky_bit(method), "invariant");
assert(!method->is_old(), "invariant");
SET_METHOD_STICKY_BIT(method);
assert(has_sticky_bit(method), "invariant");
}
@@ -205,30 +203,22 @@ inline void JfrTraceId::set_sticky_bit(const Method* method) {
inline void JfrTraceId::clear_sticky_bit(const Method* method) {
assert(method != nullptr, "invarriant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(!method->is_old(), "invariant");
assert(JfrTraceId::has_sticky_bit(method), "invariant");
CLEAR_STICKY_BIT_METHOD(method);
assert(!JfrTraceId::has_sticky_bit(method), "invariant");
}
inline bool JfrTraceId::has_timing_bit(const Klass* k) {
assert(k != nullptr, "invariant");
return HAS_TIMING_BIT(k);
inline bool JfrTraceId::has_timing_bit(const InstanceKlass* scratch_klass) {
assert(scratch_klass != nullptr, "invariant");
return HAS_TIMING_BIT(scratch_klass);
}
inline void JfrTraceId::set_timing_bit(const Klass* k) {
assert(k != nullptr, "invariant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(!has_timing_bit(k), "invariant");
SET_TIMING_BIT(k);
assert(has_timing_bit(k), "invariant");
}
inline void JfrTraceId::clear_timing_bit(const Klass* k) {
assert(k != nullptr, "invarriant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
assert(JfrTraceId::has_timing_bit(k), "invariant");
CLEAR_TIMING_BIT(k);
assert(!JfrTraceId::has_timing_bit(k), "invariant");
inline void JfrTraceId::set_timing_bit(const InstanceKlass* scratch_klass) {
assert(scratch_klass != nullptr, "invariant");
assert(!has_timing_bit(scratch_klass), "invariant");
SET_TIMING_BIT(scratch_klass);
assert(has_timing_bit(scratch_klass), "invariant");
}
#endif // SHARE_JFR_RECORDER_CHECKPOINT_TYPES_TRACEID_JFRTRACEID_INLINE_HPP

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -78,7 +78,7 @@ inline uint8_t* traceid_meta_byte(const T* ptr) {
template <>
inline uint8_t* traceid_meta_byte<Method>(const Method* ptr) {
assert(ptr != nullptr, "invariant");
return ptr->trace_meta_addr();
return ptr->trace_flags_meta_addr();
}
inline uint8_t traceid_and(uint8_t bits, uint8_t current) {

View File

@@ -86,6 +86,27 @@ inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass) {
return TRACE_ID(klass);
}
inline const Method* latest_version(const Klass* klass, const Method* method) {
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
assert(klass == method->method_holder(), "invariant");
assert(method->is_old(), "invariant");
const InstanceKlass* const ik = InstanceKlass::cast(klass);
assert(ik->has_been_redefined(), "invariant");
const Method* const latest_version = ik->method_with_orig_idnum(method->orig_method_idnum());
if (latest_version == nullptr) {
assert(AllowRedefinitionToAddDeleteMethods, "invariant");
// method has been removed. Return old version.
return method;
}
assert(latest_version != nullptr, "invariant");
assert(latest_version != method, "invariant");
assert(!latest_version->is_old(), "invariant");
assert(latest_version->orig_method_idnum() == method->orig_method_idnum(), "invariant");
assert(latest_version->name() == method->name() && latest_version->signature() == method->signature(), "invariant");
return latest_version;
}
inline traceid JfrTraceIdLoadBarrier::load(const Method* method) {
return load(method->method_holder(), method);
}
@@ -93,6 +114,9 @@ inline traceid JfrTraceIdLoadBarrier::load(const Method* method) {
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass, const Method* method) {
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
if (method->is_old()) {
method = latest_version(klass, method);
}
if (should_tag(method)) {
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
@@ -111,6 +135,9 @@ inline traceid JfrTraceIdLoadBarrier::load_no_enqueue(const Method* method) {
inline traceid JfrTraceIdLoadBarrier::load_no_enqueue(const Klass* klass, const Method* method) {
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
if (method->is_old()) {
method = latest_version(klass, method);
}
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
SET_METHOD_FLAG_USED_THIS_EPOCH(method);
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
@@ -123,11 +150,12 @@ inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) {
if (cld->has_class_mirror_holder()) {
return 0;
}
const traceid id = set_used_and_get(cld);
const Klass* const class_loader_klass = cld->class_loader_klass();
if (class_loader_klass != nullptr) {
load(class_loader_klass);
}
return set_used_and_get(cld);
return id;
}
inline traceid JfrTraceIdLoadBarrier::load(const ModuleEntry* module) {
@@ -158,6 +186,7 @@ inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass) {
inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) {
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
assert(!method->is_old(), "invariant");
assert(klass == method->method_holder(), "invariant");
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
if (should_tag(method)) {
@@ -175,6 +204,7 @@ inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Metho
inline traceid JfrTraceIdLoadBarrier::load_leakp_previous_epoch(const Klass* klass, const Method* method) {
assert(klass != nullptr, "invariant");
assert(method != nullptr, "invariant");
assert(!method->is_old(), "invariant");
assert(klass == method->method_holder(), "invariant");
assert(METHOD_AND_CLASS_USED_PREVIOUS_EPOCH(klass), "invariant");
if (METHOD_FLAG_NOT_USED_PREVIOUS_EPOCH(method)) {

View File

@@ -33,6 +33,7 @@
#include "jfr/periodic/sampling/jfrThreadSampler.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/types/jfrThreadGroupManager.hpp"
#include "jfr/recorder/repository/jfrRepository.hpp"
#include "jfr/recorder/service/jfrEventThrottler.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
@@ -311,6 +312,9 @@ bool JfrRecorder::create_components() {
if (!create_event_throttler()) {
return false;
}
if (!create_thread_group_manager()) {
return false;
}
return true;
}
@@ -405,6 +409,10 @@ bool JfrRecorder::create_event_throttler() {
return JfrEventThrottler::create();
}
bool JfrRecorder::create_thread_group_manager() {
return JfrThreadGroupManager::create();
}
void JfrRecorder::destroy_components() {
JfrJvmtiAgent::destroy();
if (_post_box != nullptr) {
@@ -444,6 +452,7 @@ void JfrRecorder::destroy_components() {
_cpu_time_thread_sampling = nullptr;
}
JfrEventThrottler::destroy();
JfrThreadGroupManager::destroy();
}
bool JfrRecorder::create_recorder_thread() {

View File

@@ -53,6 +53,7 @@ class JfrRecorder : public JfrCHeapObj {
static bool create_stacktrace_repository();
static bool create_storage();
static bool create_stringpool();
static bool create_thread_group_manager();
static bool create_thread_sampler();
static bool create_cpu_time_thread_sampling();
static bool create_event_throttler();

View File

@@ -645,7 +645,7 @@ static void write_thread_local_buffer(JfrChunkWriter& chunkwriter, Thread* t) {
size_t JfrRecorderService::flush() {
size_t total_elements = flush_metadata(_chunkwriter);
total_elements = flush_storage(_storage, _chunkwriter);
total_elements += flush_storage(_storage, _chunkwriter);
if (_string_pool.is_modified()) {
total_elements += flush_stringpool(_string_pool, _chunkwriter);
}

View File

@@ -36,14 +36,6 @@
#include "utilities/preserveException.hpp"
#include "utilities/macros.hpp"
class JfrRecorderThread : public JavaThread {
public:
JfrRecorderThread(ThreadFunction entry_point) : JavaThread(entry_point) {}
virtual ~JfrRecorderThread() {}
virtual bool is_JfrRecorder_thread() const { return true; }
};
static Thread* start_thread(instanceHandle thread_oop, ThreadFunction proc, TRAPS) {
assert(thread_oop.not_null(), "invariant");
assert(proc != nullptr, "invariant");

View File

@@ -26,9 +26,9 @@
#define SHARE_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP
#include "memory/allStatic.hpp"
#include "runtime/javaThread.hpp"
#include "utilities/debug.hpp"
class JavaThread;
class JfrCheckpointManager;
class JfrPostBox;
class Thread;
@@ -42,4 +42,12 @@ class JfrRecorderThreadEntry : AllStatic {
static bool start(JfrCheckpointManager* cp_manager, JfrPostBox* post_box, TRAPS);
};
class JfrRecorderThread : public JavaThread {
public:
JfrRecorderThread(ThreadFunction entry_point) : JavaThread(entry_point) {}
virtual ~JfrRecorderThread() {}
virtual bool is_JfrRecorder_thread() const { return true; }
};
#endif // SHARE_JFR_RECORDER_SERVICE_JFRRECORDERTHREAD_HPP

View File

@@ -29,6 +29,7 @@
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
#include "jfr/support/jfrThreadLocal.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
/*
* There are two separate repository instances.
@@ -186,6 +187,7 @@ void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_threa
}
traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
assert(!SafepointSynchronize::is_at_safepoint(), "invariant");
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
assert(stacktrace.number_of_frames() > 0, "invariant");
const size_t index = stacktrace._hash % TABLE_SIZE;

View File

@@ -390,15 +390,16 @@ static inline void write_stacktraces(JfrChunkWriter& cw) {
_resolved_list.iterate(scw);
}
// First, we consolidate all stack trace blobs into a single TYPE_STACKTRACE checkpoint
// and serialize it to the chunk. Then, all events are serialized, and unique type set blobs
// written into the JfrCheckpoint system to be serialized to the chunk upon return.
// First, all events are serialized, and unique type set blobs are written into the
// JfrCheckpoint system to be serialized to the chunk upon return.
// Then, we consolidate all stack trace blobs into a single TYPE_STACKTRACE checkpoint
// and serialize it directly to the chunk.
void JfrDeprecationManager::write_edges(JfrChunkWriter& cw, Thread* thread, bool on_error /* false */) {
if (_resolved_list.is_nonempty() && JfrEventSetting::is_enabled(JfrDeprecatedInvocationEvent)) {
write_events(cw, thread, on_error);
if (has_stacktrace()) {
write_stacktraces(cw);
}
write_events(cw, thread, on_error);
}
}

View File

@@ -40,8 +40,6 @@
#define EVENT_STICKY_BIT 8192
#define IS_EVENT_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS)) != 0)
#define IS_EVENT_OR_HOST_KLASS(ptr) (((ptr)->trace_id() & (JDK_JFR_EVENT_KLASS | JDK_JFR_EVENT_SUBKLASS | EVENT_HOST_KLASS)) != 0)
#define KLASS_HAS_STICKY_BIT(ptr) (((ptr)->trace_id() & STICKY_BIT) != 0)
#define ON_KLASS_REDEFINITION(k, t) if (KLASS_HAS_STICKY_BIT(k)) Jfr::on_klass_redefinition(k, t)
#define ON_KLASS_CREATION(k, p, t) Jfr::on_klass_creation(k, p, t)
#endif // SHARE_JFR_SUPPORT_JFRKLASSEXTENSION_HPP

View File

@@ -29,6 +29,7 @@
#include "jfr/periodic/sampling/jfrCPUTimeThreadSampler.hpp"
#include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrOopTraceId.inline.hpp"
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
#include "jfr/recorder/jfrRecorder.hpp"
#include "jfr/recorder/service/jfrOptionSet.hpp"
#include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
@@ -45,6 +46,7 @@
#include "runtime/os.hpp"
#include "runtime/threadIdentifier.hpp"
#include "utilities/sizes.hpp"
#include "utilities/spinYield.hpp"
JfrThreadLocal::JfrThreadLocal() :
_sample_request(),
@@ -74,12 +76,14 @@ JfrThreadLocal::JfrThreadLocal() :
_wallclock_time(os::javaTimeNanos()),
_non_reentrant_nesting(0),
_vthread_epoch(0),
_generation(0),
_vthread_excluded(false),
_jvm_thread_excluded(false),
_enqueued_requests(false),
_vthread(false),
_notified(false),
_dead(false)
_dead(false),
_sampling_critical_section(false)
#ifdef LINUX
,_cpu_timer(nullptr),
_cpu_time_jfr_locked(UNLOCKED),
@@ -134,17 +138,33 @@ static void send_java_thread_start_event(JavaThread* jt) {
}
void JfrThreadLocal::on_start(Thread* t) {
assign_thread_id(t, t->jfr_thread_local());
JfrThreadLocal* const tl = t->jfr_thread_local();
assert(tl != nullptr, "invariant");
assign_thread_id(t, tl);
if (JfrRecorder::is_recording()) {
JfrCheckpointManager::write_checkpoint(t);
if (t->is_Java_thread()) {
JavaThread *const jt = JavaThread::cast(t);
if (!t->is_Java_thread()) {
JfrCheckpointManager::write_checkpoint(t);
return;
}
JavaThread* const jt = JavaThread::cast(t);
if (jt->thread_state() == _thread_new) {
JfrCPUTimeThreadSampling::on_javathread_create(jt);
} else {
assert(jt->thread_state() == _thread_in_vm, "invariant");
if (tl->should_write()) {
JfrCheckpointManager::write_checkpoint(t);
}
send_java_thread_start_event(jt);
if (tl->has_cached_stack_trace()) {
tl->clear_cached_stack_trace();
}
return;
}
}
if (t->jfr_thread_local()->has_cached_stack_trace()) {
t->jfr_thread_local()->clear_cached_stack_trace();
if (t->is_Java_thread() && JavaThread::cast(t)->thread_state() == _thread_in_vm) {
if (tl->has_cached_stack_trace()) {
tl->clear_cached_stack_trace();
}
}
}
@@ -227,13 +247,18 @@ void JfrThreadLocal::on_exit(Thread* t) {
JfrThreadLocal * const tl = t->jfr_thread_local();
assert(!tl->is_dead(), "invariant");
if (JfrRecorder::is_recording()) {
JfrCheckpointManager::write_checkpoint(t);
}
if (t->is_Java_thread()) {
JavaThread* const jt = JavaThread::cast(t);
send_java_thread_end_event(jt, JfrThreadLocal::jvm_thread_id(jt));
JfrCPUTimeThreadSampling::on_javathread_terminate(jt);
JfrThreadCPULoadEvent::send_event_for_thread(jt);
if (!t->is_Java_thread()) {
JfrCheckpointManager::write_checkpoint(t);
} else {
JavaThread* const jt = JavaThread::cast(t);
assert(jt->thread_state() == _thread_in_vm, "invariant");
if (tl->should_write()) {
JfrCheckpointManager::write_checkpoint(t);
}
send_java_thread_end_event(jt, JfrThreadLocal::jvm_thread_id(jt));
JfrCPUTimeThreadSampling::on_javathread_terminate(jt);
JfrThreadCPULoadEvent::send_event_for_thread(jt);
}
}
release(tl, Thread::current()); // because it could be that Thread::current() != t
}
@@ -423,6 +448,15 @@ u2 JfrThreadLocal::vthread_epoch(const JavaThread* jt) {
return Atomic::load(&jt->jfr_thread_local()->_vthread_epoch);
}
bool JfrThreadLocal::should_write() const {
const u2 current_generation = JfrTraceIdEpoch::epoch_generation();
if (Atomic::load(&_generation) != current_generation) {
Atomic::store(&_generation, current_generation);
return true;
}
return false;
}
traceid JfrThreadLocal::thread_id(const Thread* t) {
assert(t != nullptr, "invariant");
if (is_impersonating(t)) {
@@ -599,7 +633,10 @@ bool JfrThreadLocal::try_acquire_cpu_time_jfr_dequeue_lock() {
}
void JfrThreadLocal::acquire_cpu_time_jfr_dequeue_lock() {
while (Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE) != UNLOCKED);
SpinYield s;
while (Atomic::cmpxchg(&_cpu_time_jfr_locked, UNLOCKED, DEQUEUE) != UNLOCKED) {
s.wait();
}
}
void JfrThreadLocal::release_cpu_time_jfr_queue_lock() {

View File

@@ -75,6 +75,7 @@ class JfrThreadLocal {
jlong _wallclock_time;
int32_t _non_reentrant_nesting;
u2 _vthread_epoch;
mutable u2 _generation;
bool _vthread_excluded;
bool _jvm_thread_excluded;
volatile bool _enqueued_requests;
@@ -348,6 +349,9 @@ class JfrThreadLocal {
return _sampling_critical_section;
}
// Serialization state.
bool should_write() const;
static int32_t make_non_reentrant(Thread* thread);
static void make_reentrant(Thread* thread, int32_t previous_nesting);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -95,11 +95,13 @@ class JfrTraceFlag {
uint8_t* trace_flags_addr() const { \
return _trace_flags.flags_addr(); \
} \
uint8_t* trace_meta_addr() const { \
uint8_t* trace_flags_meta_addr() const { \
return _trace_flags.meta_addr(); \
} \
void copy_trace_flags(uint16_t rhs_flags) const { \
_trace_flags.set_flags(_trace_flags.flags() | rhs_flags); \
void copy_trace_flags(const Method* rhm) const { \
assert(rhm != nullptr, "invariant"); \
set_trace_flags(rhm->trace_flags()); \
assert(trace_flags()==rhm->trace_flags(), ""); \
}
#endif // SHARE_JFR_SUPPORT_JFRTRACEIDEXTENSION_HPP

View File

@@ -36,16 +36,16 @@ class InstanceKlass;
class JfrInstrumentedClass {
private:
traceid _trace_id;
const InstanceKlass* _instance_klass;
const InstanceKlass* _ik;
bool _unloaded;
public:
JfrInstrumentedClass(traceid trace_id = 0, const InstanceKlass* instance_klass = nullptr, bool unloaded = false) :
_trace_id(trace_id), _instance_klass(instance_klass), _unloaded(unloaded) {
JfrInstrumentedClass(traceid trace_id = 0, const InstanceKlass* ik = nullptr, bool unloaded = false) :
_trace_id(trace_id), _ik(ik), _unloaded(unloaded) {
}
const InstanceKlass* instance_klass() const {
return _instance_klass;
return _ik;
}
traceid trace_id() const {

View File

@@ -67,6 +67,8 @@ void JfrMethodProcessor::update_methods(const InstanceKlass* ik) {
const uint32_t idx = _methods->at(i).methods_array_index();
Method* const method = ik_methods->at(idx);
assert(method != nullptr, "invariant");
assert(method->name() == _methods->at(i).name(), "invariant");
assert(method->signature() == _methods->at(i).signature(), "invariant");
_methods->at(i).set_method(method);
// This is to keep the method from being unloaded during redefine / retransform.
// Equivalent functionality to that provided by the methodHandle. Unfortunately,

View File

@@ -128,11 +128,11 @@ void JfrMethodTracer::retransform(JNIEnv* env, const JfrFilterClassClosure& clas
}
}
static void handle_no_bytecode_result(const Klass* klass) {
assert(klass != nullptr, "invariant");
if (JfrTraceId::has_sticky_bit(klass)) {
static void handle_no_bytecode_result(const InstanceKlass* ik) {
assert(ik != nullptr, "invariant");
if (JfrTraceId::has_sticky_bit(ik)) {
MutexLocker lock(ClassLoaderDataGraph_lock);
JfrTraceTagging::clear_sticky_bit(InstanceKlass::cast(klass));
JfrTraceTagging::clear_sticky(ik);
}
}
@@ -143,11 +143,11 @@ void JfrMethodTracer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& par
ResourceMark rm(THREAD);
// 1. Is the ik the initial load, i.e.the first InstanceKlass, or a scratch klass, denoting a redefine / retransform?
const Klass* const existing_klass = JfrClassTransformer::find_existing_klass(ik, THREAD);
const bool is_retransform = existing_klass != nullptr;
const InstanceKlass* const existing_ik = JfrClassTransformer::find_existing_klass(ik, THREAD);
const bool is_retransform = existing_ik != nullptr;
// 2. Test the ik and its methods against the currently installed filter object.
JfrMethodProcessor mp(is_retransform ? InstanceKlass::cast(existing_klass) : ik, THREAD);
JfrMethodProcessor mp(is_retransform ? existing_ik : ik, THREAD);
if (!mp.has_methods()) {
return;
}
@@ -159,7 +159,7 @@ void JfrMethodTracer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& par
// If no bytecode is returned, either an error occurred during transformation, but more
// likely the matched instructions were negative, i.e. instructions to remove existing instrumentation
// and so Java added no new instrumentation. By not returning a bytecode result, the klass is restored to its original, non-instrumented, version.
handle_no_bytecode_result(is_retransform ? InstanceKlass::cast(existing_klass) : ik);
handle_no_bytecode_result(is_retransform ? existing_ik : ik);
return;
}
// 4. Now create a new InstanceKlass representation from the modified bytecode.
@@ -173,13 +173,12 @@ void JfrMethodTracer::on_klass_creation(InstanceKlass*& ik, ClassFileParser& par
// Keep the original cached class file data from the existing class.
JfrClassTransformer::transfer_cached_class_file_data(ik, new_ik, parser, THREAD);
JfrClassTransformer::rewrite_klass_pointer(ik, new_ik, parser, THREAD); // The ik is modified to point to new_ik here.
const InstanceKlass* const existing_ik = InstanceKlass::cast(existing_klass);
mp.update_methods(existing_ik);
existing_ik->module()->add_read(jdk_jfr_module());
// By setting the sticky bit on the existng klass, we receive a callback into on_klass_redefinition (see below)
// when our new methods are installed into the existing klass as part of retransformation / redefinition.
// Only when we know our new methods have been installed can we add the klass to the instrumented list (done as part of callback).
JfrTraceTagging::install_sticky_bit_for_retransform_klass(existing_ik, mp.methods(), mp.has_timing());
JfrTraceTagging::tag_sticky_for_retransform_klass(existing_ik, ik, mp.methods(), mp.has_timing());
return;
}
// Initial class load.
@@ -203,28 +202,22 @@ static inline void log_add(const InstanceKlass* ik) {
}
}
void JfrMethodTracer::add_timing_entry(const InstanceKlass* ik, traceid klass_id) {
assert(ik != nullptr, "invariant");
void JfrMethodTracer::add_timing_entry(traceid klass_id) {
assert(_timing_entries != nullptr, "invariant");
if (JfrTraceId::has_timing_bit(ik)) {
JfrTraceId::clear_timing_bit(ik);
_timing_entries->append(klass_id);
}
_timing_entries->append(klass_id);
}
// At this point we have installed our new retransformed methods into the original klass, which is ik.
// jvmtiRedefineClassses::redefine_single_class() has finished so we are still at a safepoint.
// If the original klass is not already in the list, add it and also dynamically tag all
// artifacts that have the sticky bit set. If the klass has an associated TimedClass,
// also add the klass to the list of _timing_entries for publication.
void JfrMethodTracer::on_klass_redefinition(const InstanceKlass* ik, Thread* thread) {
// jvmtiRedefineClassses::redefine_single_class() is about to finish so we are still at a safepoint.
// If the original klass is not already in the list, add it. If the klass has an associated TimedClass,
// add also the klass_id to the list of _timing_entries for publication.
void JfrMethodTracer::on_klass_redefinition(const InstanceKlass* ik, bool has_timing) {
assert(ik != nullptr, "invariant");
assert(!ik->is_scratch_class(), "invarint");
assert(ik->has_been_redefined(), "invariant");
assert(JfrTraceId::has_sticky_bit(ik), "invariant");
assert(in_use(), "invariant");
assert(SafepointSynchronize::is_at_safepoint(), "invariant");
assert_locked_or_safepoint(ClassLoaderDataGraph_lock);
const traceid klass_id = JfrTraceId::load_raw(ik);
const JfrInstrumentedClass jic(klass_id, ik, false);
@@ -235,8 +228,9 @@ void JfrMethodTracer::on_klass_redefinition(const InstanceKlass* ik, Thread* thr
assert(!JfrTraceIdEpoch::has_method_tracer_changed_tag_state(), "invariant");
JfrTraceIdEpoch::set_method_tracer_tag_state();
}
add_timing_entry(ik, klass_id);
JfrTraceTagging::set_dynamic_tag_for_sticky_bit(ik);
if (has_timing) {
add_timing_entry(klass_id);
}
log_add(ik);
}
}
@@ -258,8 +252,7 @@ void JfrMethodTracer::add_instrumented_class(InstanceKlass* ik, GrowableArray<Jf
ik->module()->add_read(jdk_jfr_module());
MutexLocker lock(ClassLoaderDataGraph_lock);
assert(!in_instrumented_list(ik, instrumented_classes()), "invariant");
JfrTraceTagging::set_dynamic_tag(ik, methods);
JfrTraceTagging::set_sticky_bit(ik, methods);
JfrTraceTagging::tag_sticky(ik, methods);
const JfrInstrumentedClass jik(JfrTraceId::load_raw(ik), ik, false);
const int idx = instrumented_classes()->append(jik);
if (idx == 0) {

View File

@@ -51,7 +51,7 @@ class JfrMethodTracer: AllStatic {
static GrowableArray<jlong>* _timing_entries; // Guarded by ClassLoaderDataGraph_lock
static ModuleEntry* jdk_jfr_module();
static void add_timing_entry(const InstanceKlass* ik, traceid klass_id);
static void add_timing_entry(traceid klass_id);
static void retransform(JNIEnv* env, const JfrFilterClassClosure& classes, TRAPS);
static void add_instrumented_class(InstanceKlass* ik, GrowableArray<JfrTracedMethod>* methods);
@@ -61,7 +61,7 @@ class JfrMethodTracer: AllStatic {
static void add_to_unloaded_set(const Klass* k);
static void trim_instrumented_classes(bool trim);
static GrowableArray<JfrInstrumentedClass>* instrumented_classes();
static void on_klass_redefinition(const InstanceKlass* ik, Thread* thread);
static void on_klass_redefinition(const InstanceKlass* ik, bool has_timing);
static void on_klass_creation(InstanceKlass*& ik, ClassFileParser& parser, TRAPS);
static jlongArray set_filters(JNIEnv* env,
jobjectArray classes,

Some files were not shown because too many files have changed in this diff Show More