Compare commits

..

156 Commits

Author SHA1 Message Date
Rob McKenna
78770bfaef 8367031: [backout] Change java.time month/day field types to 'byte'
Backport-of: 00be643fa3eff6fd66d39f5f5ea70ff347296318
2025-09-25 16:16:56 +00:00
Nibedita Jena
5100536d49 8368308: ISO 4217 Amendment 180 Update
Backport-of: 3f9c665586705c833674ae998f49cabbc7e15615
2025-09-25 14:42:40 +00:00
Nibedita Jena
415f2adffb 8366223: ZGC: ZPageAllocator::cleanup_failed_commit_multi_partition is broken
Backport-of: 009612805f79e37d9ce4e3f5c90627b635b095cf
2025-09-02 11:16:22 +00:00
Nibedita Jena
f92ad752ab Merge 2025-08-19 10:27:06 +00:00
Matias Saavedra Silva
4f265785a9 8352637: Enhance bytecode verification
Reviewed-by: dlong
Backport-of: d9bf0c2ca2d52d783a8122504cac9566d42b22df
2025-08-13 16:03:34 +00:00
Prasanta Sadhukhan
6c48f4ed70 8348760: RadioButton is not shown if JRadioButtonMenuItem is rendered with ImageIcon in WindowsLookAndFeel
Reviewed-by: prr, aivanov
Backport-of: e29346dbd6
2025-08-12 17:15:36 +00:00
Nibedita Jena
c5d85e09e1 8360647: [XWayland] [OL10] NumPad keys are not triggered
Backport-of: 4d5fb6eb8bb66556f06dada72df531d537cf32c2
2025-08-12 11:47:25 +00:00
Ravi Reddy
408ae8637c 8356294: Enhance Path Factories
Backport-of: 5835cefe4946524af3be4933b20cd1b0005b0ad0
2025-08-11 12:57:00 +00:00
Nibedita Jena
1962c746dc Merge
Reviewed-by: rreddy
2025-08-06 15:52:35 +00:00
Nibedita Jena
c02fce22ed 8361212: Remove AffirmTrust root CAs
Backport-of: e58859e8acc19bfd8aaa80e98534651e83850a97
2025-08-06 08:43:16 +00:00
Jesper Wilhelmsson
73c28c2e3d 8364038: Remove EA from the JDK 25 version string with first RC promotion
Reviewed-by: mikael, erikj, iris, cstein
2025-08-05 23:17:32 +00:00
Markus Grönlund
1e2bf070f0 8364258: ThreadGroup constant pool serialization is not normalized
Reviewed-by: egahlin
Backport-of: 3bc449797e
2025-08-04 14:52:19 +00:00
Manuel Hässig
24936b9295 8364409: [BACKOUT] Consolidate Identity of self-inverse operations
Reviewed-by: chagedorn, bmaillard
Backport-of: ddb64836e5
2025-08-04 08:39:05 +00:00
Justin Lu
b5bec8db3f 8364370: java.text.DecimalFormat specification indentation correction
Reviewed-by: liach, naoto
Backport-of: 8e921aee5a
2025-08-01 21:25:52 +00:00
Justin Lu
9bdf9ebadd 8360416: Incorrect l10n test case in sun/security/tools/keytool/i18n.java
Reviewed-by: hchao
Backport-of: 5540a7859b
2025-08-01 16:09:52 +00:00
Nibedita Jena
99f80700d7 8356587: Missing object ID X in pool jdk.types.Method
Backport-of: 9fe2aa59ff
2025-07-31 11:24:44 +00:00
Nibedita Jena
d30e89c381 8360679: Shenandoah: AOT saved adapter calls into broken GC barrier stub
Backport-of: 033a121c96
2025-07-31 11:05:30 +00:00
Nibedita Jena
1d92cd3517 8362882: Update SubmissionPublisher() specification to reflect use of ForkJoinPool.asyncCommonPool()
Backport-of: 3db8262445
2025-07-31 11:05:12 +00:00
Nibedita Jena
6fcaf66539 8315131: Clarify VarHandle set/get access on 32-bit platforms
Backport-of: 1867effcc0
2025-07-31 11:00:02 +00:00
Jiangli Zhou
7b69679175 8362564: hotspot/jtreg/compiler/c2/TestLWLockingCodeGen.java fails on static JDK on x86_64 with AVX instruction extensions
Reviewed-by: kvn
Backport-of: c239c0ab00
2025-07-30 23:19:40 +00:00
Alisen Chung
bf31e50754 8364089: JDK 25 RDP2 L10n resource files update
Reviewed-by: jlu, naoto
Backport-of: c671089d6e
2025-07-30 22:52:03 +00:00
Markus Grönlund
9fe2aa59ff 8356587: Missing object ID X in pool jdk.types.Method
Reviewed-by: egahlin
Backport-of: a34994476e
2025-07-29 11:40:55 +00:00
Alan Bateman
3db8262445 8362882: Update SubmissionPublisher() specification to reflect use of ForkJoinPool.asyncCommonPool()
Reviewed-by: jpai
Backport-of: f79bd54bbb
2025-07-25 11:21:52 +00:00
Aleksey Shipilev
033a121c96 8360679: Shenandoah: AOT saved adapter calls into broken GC barrier stub
Reviewed-by: kvn
Backport-of: 8477630970
2025-07-25 08:09:02 +00:00
Chen Liang
1867effcc0 8315131: Clarify VarHandle set/get access on 32-bit platforms
Reviewed-by: rriggs, iris
Backport-of: ea6674fec8
2025-07-24 17:49:18 +00:00
pavel_kharskii
3eee56e456 8362109: Change milestone to fcs for all releases
Reviewed-by: coffeys, mvs
2025-07-22 12:14:38 +00:00
Nibedita Jena
5dab0808b1 Merge 2025-07-22 05:13:25 +00:00
Rob McKenna
987af5af16 Merge 2025-07-21 17:45:35 +00:00
Thomas Schatzl
e8f2cd8f3d 8347052: Update java man page documentation to reflect current state of the UseNUMA flag
Reviewed-by: ayang
Backport-of: ea774b74e8
2025-07-18 11:36:08 +00:00
SendaoYan
e599ee4a88 8361827: [TESTBUG] serviceability/HeapDump/UnmountedVThreadNativeMethodAtTop.java throws OutOfMemoryError
Reviewed-by: rrich, lmesnik
Backport-of: cbb3d23e19
2025-07-18 06:08:46 +00:00
David Holmes
3a8e9dfe85 8362565: ProblemList jdk/jfr/event/io/TestIOTopFrame.java
Reviewed-by: egahlin
Backport-of: 04c0b130f0
2025-07-18 02:39:10 +00:00
William Kemper
347084bfbd 8360288: Shenandoah crash at size_given_klass in op_degenerated
Reviewed-by: shade
Backport-of: 3b44d7bfa4
2025-07-17 16:50:07 +00:00
SendaoYan
5cc7a31b3f 8361869: Tests which call ThreadController should mark as /native
Reviewed-by: jpai
Backport-of: 3bacf7ea85
2025-07-17 12:50:53 +00:00
SendaoYan
f1f6452e01 8358004: Delete applications/scimark/Scimark.java test
Reviewed-by: coleenp
Backport-of: a5c9bc7032
2025-07-17 12:41:34 +00:00
Erik Gahlin
331adac38e 8361639: JFR: Incorrect top frame for I/O events
Reviewed-by: mgronlun
Backport-of: 1a6cbe421f
2025-07-17 12:20:22 +00:00
Brian Burkhalter
e989c1d138 8362429: AssertionError in File.listFiles(FileFilter | FilenameFilter)
Reviewed-by: alanb
Backport-of: be0161a8e6
2025-07-17 06:57:58 +00:00
Boris Ulasevich
5129887dfe 8362250: ARM32: forward_exception_entry missing return address
Reviewed-by: shade
Backport-of: 6ed81641b1
2025-07-17 01:29:57 +00:00
Brian Burkhalter
69ea85ee12 8361587: AssertionError in File.listFiles() when path is empty and -esa is enabled
Reviewed-by: alanb
Backport-of: eefbfdce31
2025-07-16 15:35:50 +00:00
Erik Gahlin
93260d639e 8361640: JFR: RandomAccessFile::readLine emits events for each character
Reviewed-by: mgronlun, alanb
Backport-of: 9bef2d1610
2025-07-16 15:35:30 +00:00
Tobias Hartmann
b67fb82a03 8362171: C2 fails with unexpected node in SuperWord truncation: ModI
Reviewed-by: chagedorn
Backport-of: 70c1ff7e15
2025-07-16 14:51:08 +00:00
Johannes Bechberger
a626c1d92c 8358619: Fix interval recomputation in CPU Time Profiler
Reviewed-by: jbachorik
Backport-of: c70258ca1c
2025-07-16 10:13:57 +00:00
Johannes Bechberger
533211af73 8358621: Reduce busy waiting in worse case at the synchronization point returning from native in CPU Time Profiler
Reviewed-by: shade
Backport-of: d2082c58ff
2025-07-16 07:31:23 +00:00
Erik Gahlin
07bb0e3e2f 8362097: JFR: Active Settings view broken
Reviewed-by: mgronlun
Backport-of: 25e509b0db
2025-07-16 06:56:09 +00:00
Tobias Hartmann
60196a6b6f 8361952: Installation of MethodData::extra_data_lock() misses synchronization on reader side
Reviewed-by: chagedorn
Backport-of: 272e66d017
2025-07-16 06:33:47 +00:00
Brent Christian
0e6bf00550 Merge
Reviewed-by: jpai
2025-07-16 03:57:34 +00:00
Calvin Cheung
e1926a6d0e 8361328: cds/appcds/dynamicArchive/TestAutoCreateSharedArchive.java archive timestamps comparison failed
Reviewed-by: matsaave, iklam
Backport-of: 4a351e3e57
2025-07-15 21:46:00 +00:00
David Holmes
03a67a969b 8356942: invokeinterface Throws AbstractMethodError Instead of IncompatibleClassChangeError
Reviewed-by: iklam, coleenp
Backport-of: f36147b326
2025-07-15 20:56:47 +00:00
Chris Plummer
cf92877aa5 8361905: Problem list serviceability/sa/ClhsdbThreadContext.java on Windows due to JDK-8356704
Reviewed-by: sspitsyn
Backport-of: f7e8d255cc
2025-07-15 18:29:32 +00:00
Nibedita Jena
05bf5e3a50 Merge 2025-07-15 13:44:52 +00:00
Nibedita Jena
cc2cf97834 8360937: Enhance certificate handling
Reviewed-by: mullan
Backport-of: f2fba5a55176ca82985ca42996cef36be7b7500a
2025-07-15 13:39:47 +00:00
Phil Race
121f5a72e4 8360147: Better Glyph drawing redux
Reviewed-by: rhalade, ahgross, psadhukhan, jdv
2025-07-15 19:00:48 +05:30
Phil Race
52e1e739af 8355884: [macos] java/awt/Frame/I18NTitle.java fails on MacOS
Reviewed-by: kcr, dmarkov, aivanov, honkar, kizune
2025-07-15 19:00:48 +05:30
Darragh Clarke
5ae719c8fc 8350991: Improve HTTP client header handling
Reviewed-by: rhalade, dfuchs, michaelm
2025-07-15 19:00:47 +05:30
Kevin Driver
3ec6eb6482 8349594: Enhance TLS protocol support
Reviewed-by: rhalade, ahgross, wetmore, jnimeh
2025-07-15 19:00:47 +05:30
Christian Hagedorn
fae2345971 8349584: Improve compiler processing
Reviewed-by: rhalade, ahgross, epeter, thartmann
2025-07-15 19:00:47 +05:30
Prasanta Sadhukhan
6e490a465a 8349111: Enhance Swing supports
Reviewed-by: rhalade, jdv, prr
2025-07-15 19:00:47 +05:30
Phil Race
2555b5a632 8348989: Better Glyph drawing
Reviewed-by: mschoene, psadhukhan, jdv, rhalade
2025-07-15 19:00:47 +05:30
Volkan Yazici
caac8172ad 8349551: Failures in tests after JDK-8345625
Reviewed-by: jpai, dfuchs
2025-07-15 19:00:47 +05:30
Volkan Yazici
d1ea951d39 8345625: Better HTTP connections
Reviewed-by: skoivu, rhalade, ahgross, dfuchs, jpai, aefimov
2025-07-15 19:00:47 +05:30
Tobias Hartmann
7aa3f31724 8359678: C2: assert(static_cast<T1>(result) == thing) caused by ReverseBytesNode::Value()
Reviewed-by: chagedorn
Backport-of: e5ab210713
2025-07-15 11:35:53 +00:00
Richard Reingruber
ce85123f3a 8361602: [TESTBUG] serviceability/HeapDump/UnmountedVThreadNativeMethodAtTop.java deadlocks on exception
Reviewed-by: clanger, cjplummer
Backport-of: 917d0182cb
2025-07-15 08:02:44 +00:00
William Kemper
20fc8f74d5 8361529: GenShen: Fix bad assert in swap card tables
Reviewed-by: shade
Backport-of: 1de2acea77
2025-07-14 16:50:47 +00:00
Taizo Kurashige
db6230991b 8358819: The first year is not displayed correctly in Japanese Calendar
Backport-of: 99829950f6
2025-07-14 15:26:25 +00:00
Tobias Hartmann
dd82a0922b 8350177: C2 SuperWord: Integer.numberOfLeadingZeros, numberOfTrailingZeros, reverse and bitCount have input types wrongly truncated for byte and short
Reviewed-by: chagedorn
Backport-of: 77bd417c99
2025-07-14 07:31:27 +00:00
Srinivas Vamsi Parasa
9f21845262 8360775: Fix Shenandoah GC test failures when APX is enabled
Reviewed-by: shade, sviswanathan, jbhateja
Backport-of: 1c560727b8
2025-07-14 02:55:02 +00:00
Srinivas Vamsi Parasa
c5d0f1bc5e 8360776: Disable Intel APX by default and enable it with -XX:+UnlockExperimentalVMOptions -XX:+UseAPX in all builds
Reviewed-by: kvn, sviswanathan
Backport-of: 26b002805a
2025-07-12 21:34:48 +00:00
Chen Liang
c374ac6df4 8361615: CodeBuilder::parameterSlot throws undocumented IOOBE
Reviewed-by: asotona
Backport-of: c9bea77342
2025-07-11 22:52:41 +00:00
Dingli Zhang
98bc22a969 8361829: [TESTBUG] RISC-V: compiler/vectorization/runner/BasicIntOpTest.java fails with RVV but not Zvbb
Backport-of: 2e7e272d7b
2025-07-11 20:59:26 +00:00
Dingli Zhang
05dab283f2 8361532: RISC-V: Several vector tests fail after JDK-8354383
Backport-of: e0245682c8
2025-07-11 20:59:07 +00:00
Rob McKenna
8229274b2d Merge 2025-07-11 19:16:38 +00:00
Boris Ulasevich
44f5dfef97 8358183: [JVMCI] crash accessing nmethod::jvmci_name in CodeCache::aggregate
Reviewed-by: thartmann
Backport-of: 74822ce12a
2025-07-11 11:59:32 +00:00
David Holmes
9adc480ec3 8361447: [REDO] Checked version of JNI Release<type>ArrayElements needs to filter out known wrapped arrays
8361754: New test runtime/jni/checked/TestCharArrayReleasing.java can cause disk full errors

Reviewed-by: coleenp
Backport-of: f67e435431
2025-07-11 00:21:36 +00:00
William Kemper
4d5211ccb0 8357976: GenShen crash in swap_card_tables: Should be clean
Reviewed-by: shade
Backport-of: 382f870cd5
2025-07-10 22:26:14 +00:00
Vladimir Kozlov
e92f387ab5 8360942: [ubsan] aotCache tests trigger runtime error: applying non-zero offset 16 to null pointer in CodeBlob::relocation_end()
Reviewed-by: shade, thartmann
Backport-of: dedcce0450
2025-07-10 17:04:29 +00:00
Chris Plummer
96380509b3 8360312: Serviceability Agent tests fail with JFR enabled due to unknown thread type JfrRecorderThread
Reviewed-by: kevinw, sspitsyn
Backport-of: 712d866b72
2025-07-10 15:43:11 +00:00
Brian Burkhalter
9b99ed8b39 8361299: (bf) CharBuffer.getChars(int,int,char[],int) violates pre-existing specification
Reviewed-by: liach, alanb
Backport-of: 6249259c80
2025-07-10 15:14:31 +00:00
Richard Reingruber
532b1c732e 8360599: [TESTBUG] DumpThreadsWithEliminatedLock.java fails because of unstable inlining
Reviewed-by: mdoerr, kevinw
Backport-of: fea73c1d40
2025-07-10 07:34:40 +00:00
Erik Gahlin
1de8943731 8361175: JFR: Document differences between method sample events
Reviewed-by: mgronlun
Backport-of: 63e08d4af7
2025-07-09 15:32:57 +00:00
Jan Lahoda
50751da562 8361570: Incorrect 'sealed is not allowed here' compile-time error
Reviewed-by: liach, vromero
Backport-of: 853319439e
2025-07-09 13:41:05 +00:00
Nibedita Jena
83d69cab8b Merge
Reviewed-by: rreddy
2025-07-09 05:29:11 +00:00
Jan Lahoda
21cb2acda0 8361445: javac crashes on unresolvable constant in @SuppressWarnings
Reviewed-by: liach, asotona
Backport-of: 0bd2f9cba2
2025-07-09 05:07:20 +00:00
Vicente Romero
0e4422b284 8361214: An anonymous class is erroneously being classify as an abstract class
Reviewed-by: liach
Backport-of: 05c9eec8d0
2025-07-08 21:13:43 +00:00
Ioi Lam
1e985168d6 8358680: AOT cache creation fails: no strings should have been added
Reviewed-by: shade, kvn
Backport-of: 3daa03c30f
2025-07-08 19:02:36 +00:00
Ioi Lam
b8965318c1 8360164: AOT cache creation crashes in ~ThreadTotalCPUTimeClosure()
Reviewed-by: shade, kvn
Backport-of: 7d7e60c8ae
2025-07-08 17:36:10 +00:00
Ioi Lam
afe6bd6910 8336147: Clarify CDS documentation about static vs dynamic archive
Reviewed-by: shade
Backport-of: 854de8c9c6
2025-07-08 17:34:39 +00:00
Matthias Baesken
5500a2d134 8357826: Avoid running some jtreg tests when asan is configured
Backport-of: d7aa349820
2025-07-08 15:12:25 +00:00
Erik Gahlin
b3b5595362 8361338: JFR: Min and max time in MethodTime event is confusing
Reviewed-by: shade
Backport-of: f3e0588d0b
2025-07-08 14:03:56 +00:00
Jan Lahoda
5e716fd7d1 8359596: Behavior change when both -Xlint:options and -Xlint:-options flags are given
Reviewed-by: liach
Backport-of: 3525a40f39
2025-07-08 07:16:25 +00:00
Roger Riggs
3e93b98baf 8354872: Clarify java.lang.Process resource cleanup
Reviewed-by: iris
Backport-of: afb4a1be9e
2025-07-07 22:18:03 +00:00
Kieran Farrell
1ce41821b5 8359454: Enhance String handling
Backport-of: 2f2665738a67aeed224b54870608a346eb627d2a
2025-07-07 18:46:07 +00:00
Ian Myers
829742bcb4 8358577: Test serviceability/jvmti/thread/GetCurrentContendedMonitor/contmon01/contmon01.java failed: unexpexcted monitor object
Backport-of: 8f487d26c0
2025-07-07 16:24:14 +00:00
Manukumar V S
9a73987f9b 8359889: java/awt/MenuItem/SetLabelTest.java inadvertently triggers clicks on items pinned to the taskbar
Reviewed-by: abhiscxk, aivanov
Backport-of: b7fcd0b235
2025-07-07 13:14:30 +00:00
Matthias Baesken
622c743470 8360533: ContainerRuntimeVersionTestUtils fromVersionString fails with some docker versions
Backport-of: 97ec9d3e0a
2025-07-07 07:16:40 +00:00
Erik Gahlin
8707167ef3 8358750: JFR: EventInstrumentation MASK_THROTTLE* constants should be computed in longs
Reviewed-by: mgronlun
Backport-of: 77e69e02eb
2025-07-04 15:07:32 +00:00
Erik Gahlin
e3bd9c6e1c 8360287: JFR: PlatformTracer class should be loaded lazily
Reviewed-by: mgronlun
Backport-of: 8ea544c33f
2025-07-03 18:34:38 +00:00
Martin Doerr
993215f3dd 8361259: JDK25: Backout JDK-8258229
Reviewed-by: mhaessig, thartmann, dlong
2025-07-03 08:52:23 +00:00
Martin Doerr
8a98738f44 8361183: JDK-8360887 needs fixes to avoid cycles and better tests (aix)
Reviewed-by: mbaesken
Backport-of: c460f842bf
2025-07-03 08:46:22 +00:00
Ashutosh Mehra
ab01396209 8361101: AOTCodeAddressTable::_stubs_addr not initialized/freed properly
Reviewed-by: shade
Backport-of: 3066a67e62
2025-07-02 17:49:52 +00:00
Kevin Walls
92268e17be 8359870: JVM crashes in AccessInternal::PostRuntimeDispatch
Reviewed-by: alanb, sspitsyn
Backport-of: 13a3927855
2025-07-02 16:59:29 +00:00
Renjith Kannath Pariyangad
94b6b99ba4 8358452: JNI exception pending in Java_sun_awt_screencast_ScreencastHelper_remoteDesktopKeyImpl of screencast_pipewire.c:1214 (ID: 51119)
Backport-of: 2103dc15cb
2025-07-02 16:44:01 +00:00
Martin Doerr
a98a5e54fc 8360887: (fs) Files.getFileAttributeView returns unusable FileAttributeView if UserDefinedFileAttributeView unavailable (aix)
Reviewed-by: mbaesken
Backport-of: 0572b6ece7
2025-07-02 15:34:12 +00:00
Aleksey Shipilev
b245c517e3 8359436: AOTCompileEagerly should not be diagnostic
Reviewed-by: kvn
Backport-of: e138297323
2025-07-02 11:52:28 +00:00
Tobias Hartmann
0a151c68d6 8358179: Performance regression in Math.cbrt
Reviewed-by: epeter
Backport-of: 38f59f84c9
2025-07-02 08:23:19 +00:00
Jaikiran Pai
554e38dd5a 8359337: XML/JAXP tests that make network connections should ensure that no proxy is selected
Reviewed-by: dfuchs, iris, joehw
Backport-of: 7583a7b857
2025-07-02 01:36:10 +00:00
Fei Yang
26d99e045a 8359270: C2: alignment check should consider base offset when emitting arraycopy runtime call
Backport-of: 6b4393917a
2025-07-01 00:49:45 +00:00
Archie Cobbs
16addb192b 8359596: Behavior change when both -Xlint:options and -Xlint:-options flags are given
Backport-of: 3525a40f39
2025-06-30 16:54:20 +00:00
Aleksey Shipilev
b5b0b3a33a 8360201: JFR: Initialize JfrThreadLocal::_sampling_critical_section
Reviewed-by: zgu
Backport-of: 5c1f77fab1
2025-06-30 13:28:03 +00:00
David Holmes
0dc9e8447b 8358645: Access violation in ThreadsSMRSupport::print_info_on during thread dump
Reviewed-by: shade, dcubed
Backport-of: 334683e634
2025-06-30 01:06:46 +00:00
Alisen Chung
12ffb0c131 8359761: JDK 25 RDP1 L10n resource files update
Reviewed-by: jlu, aivanov
Backport-of: da7080fffb
2025-06-27 19:28:15 +00:00
Roland Westrelin
eaaaae5be9 8356708: C2: loop strip mining expansion doesn't take sunk stores into account
Reviewed-by: thartmann, epeter
Backport-of: c11f36e620
2025-06-27 16:27:33 +00:00
Jaikiran Pai
926c900efa 8359830: Incorrect os.version reported on macOS Tahoe 26 (Beta)
Reviewed-by: rriggs
Backport-of: 8df6b2c4a3
2025-06-27 02:18:57 +00:00
Roman Kennke
658f80e392 8355319: Update Manpage for Compact Object Headers (Production)
Reviewed-by: coleenp
Backport-of: 75ce44aa84
2025-06-26 12:32:36 +00:00
Martin Doerr
274a2dd729 8360405: [PPC64] some environments don't support mfdscr instruction
Reviewed-by: haosun, rrich
Backport-of: f71d64fbeb
2025-06-26 09:14:18 +00:00
Michael McMahon
a84946dde4 8359268: 3 JNI exception pending defect groups in 2 files
Reviewed-by: dfuchs, djelinski
Backport-of: 1fa090524a
2025-06-25 16:17:18 +00:00
Igor Veresov
fdb3e37c71 8359788: Internal Error: assert(get_instanceKlass()->is_loaded()) failed: must be at least loaded
Reviewed-by: shade
Backport-of: 5c4f92ba9a
2025-06-25 16:12:45 +00:00
Fei Yang
e23c817521 8360179: RISC-V: Only enable BigInteger intrinsics when AvoidUnalignedAccess == false
Backport-of: 34412da52b
2025-06-25 11:16:01 +00:00
Ravi Reddy
0ad5402463 8359059: Bump version numbers for 25.0.1
Reviewed-by: erikj
Backport-of: bff98e7d4d
2025-06-25 09:59:47 +00:00
Hannes Wallnöfer
80cb773b7e 8328848: Inaccuracy in the documentation of the -group option
Reviewed-by: liach
Backport-of: f8de5bc582
2025-06-25 05:40:18 +00:00
Hannes Wallnöfer
a576952039 8359024: Accessibility bugs in API documentation
Reviewed-by: liach
Backport-of: 9a726df373
2025-06-25 05:36:31 +00:00
Anthony Scarpino
b89f364842 8358099: PEM spec updates
Reviewed-by: weijun
Backport-of: 78158f30ae
2025-06-24 19:32:07 +00:00
Coleen Phillimore
0694cc1d52 8352075: Perf regression accessing fields
Reviewed-by: shade, iklam
Backport-of: e18277b470
2025-06-24 17:10:28 +00:00
Markus Grönlund
a3abaadc15 8360403: Disable constant pool ID assert during troubleshooting
Reviewed-by: egahlin
Backport-of: cbcf401170
2025-06-24 16:49:43 +00:00
Aleksey Shipilev
7cc1f82b84 8360042: GHA: Bump MSVC to 14.44
Reviewed-by: serb
Backport-of: 72679c94ee
2025-06-24 05:48:20 +00:00
William Kemper
636b56374e 8357550: GenShen crashes during freeze: assert(!chunk->requires_barriers()) failed
Reviewed-by: shade
Backport-of: 17cf49746d
2025-06-23 21:03:04 +00:00
Phil Race
fe9efb75b0 8358526: Clarify behavior of java.awt.HeadlessException constructed with no-args
Reviewed-by: honkar, tr, azvegint
Backport-of: 81985d422d
2025-06-23 17:05:48 +00:00
Erik Gahlin
ca6b165003 8359895: JFR: method-timing view doesn't work
Reviewed-by: mgronlun
Backport-of: 984d7f9cdf
2025-06-23 13:09:03 +00:00
Erik Gahlin
d5aa225451 8359242: JFR: Missing help text for method trace and timing
Reviewed-by: mgronlun
Backport-of: e57a214e2a
2025-06-23 12:22:30 +00:00
Matthias Bläsing
79a85df074 8353950: Clipboard interaction on Windows is unstable
8332271: Reading data from the clipboard from multiple threads crashes the JVM

Reviewed-by: prr
Backport-of: 92be7821f5
2025-06-20 21:49:26 +00:00
Jaikiran Pai
41928aed7d 8359709: java.net.HttpURLConnection sends unexpected "Host" request header in some cases after JDK-8344190
Reviewed-by: dfuchs
Backport-of: 57266064a7
2025-06-20 09:47:26 +00:00
Tobias Hartmann
3f6b0c69c3 8359386: Fix incorrect value for max_size of C2CodeStub when APX is used
Reviewed-by: mhaessig, epeter
Backport-of: b52af182c4
2025-06-20 08:29:10 +00:00
SendaoYan
36b185a930 8359402: Test CloseDescriptors.java should throw SkippedException when there is no lsof/sctp
Reviewed-by: jpai
Backport-of: a16d23557b
2025-06-20 06:26:52 +00:00
Erik Gahlin
c832f001e4 8359593: JFR: Instrumentation of java.lang.String corrupts recording
Reviewed-by: mgronlun
Backport-of: 2f2acb2e3f
2025-06-19 14:19:16 +00:00
Vladimir Kozlov
e5ac75a35b 8359646: C1 crash in AOTCodeAddressTable::add_C_string
Reviewed-by: shade, thartmann
Backport-of: 96070212ad
2025-06-19 13:41:06 +00:00
Erik Gahlin
b79ca5f03b 8359248: JFR: Help text for-XX:StartFlightRecording:report-on-exit should explain option can be repeated
Reviewed-by: mgronlun
Backport-of: fedd0a0ee3
2025-06-19 12:56:19 +00:00
Fei Yang
ee45ba9138 8359218: RISC-V: Only enable CRC32 intrinsic when AvoidUnalignedAccess == false
Backport-of: 65e63b6ab4
2025-06-18 11:50:22 +00:00
Stuart Marks
5bcea92eaa 8338140: (str) Add notes to String.trim and String.isEmpty pointing to newer APIs
Reviewed-by: naoto, bpb, liach
Backport-of: 06d804a0f0
2025-06-17 20:45:27 +00:00
Damon Fenacci
cc4e9716ac 8358129: compiler/startup/StartupOutput.java runs into out of memory on Windows after JDK-8347406
Reviewed-by: shade
Backport-of: 534a8605e5
2025-06-17 13:10:06 +00:00
Roland Westrelin
46cfc1e194 8358334: C2/Shenandoah: incorrect execution with Unsafe
Reviewed-by: thartmann
Backport-of: 1fcede053c
2025-06-17 08:06:58 +00:00
Rajan Halade
ae71782e77 8359170: Add 2 TLS and 2 CS Sectigo roots
Reviewed-by: mullan
Backport-of: 9586817cea
2025-06-17 06:10:35 +00:00
Ioi Lam
753700182d 8355556: JVM crash because archived method handle intrinsics are not restored
Reviewed-by: shade
Backport-of: 366650a438
2025-06-17 04:36:41 +00:00
SendaoYan
eb727dcb51 8359272: Several vmTestbase/compact tests timed out on large memory machine
Reviewed-by: ayang
Backport-of: a0fb35c837
2025-06-17 00:43:52 +00:00
Johannes Bechberger
b6cacfcbc8 8359135: New test TestCPUTimeSampleThrottling fails intermittently
Reviewed-by: mdoerr
Backport-of: 3f0fef2c9c
2025-06-16 16:20:54 +00:00
Hamlin Li
d870a48880 8358892: RISC-V: jvm crash when running dacapo sunflow after JDK-8352504
8359045: RISC-V: construct test to verify invocation of C2_MacroAssembler::enc_cmove_cmp_fp => BoolTest::ge/gt

Reviewed-by: fyang
Backport-of: 9d060574e5
2025-06-16 11:18:32 +00:00
Fernando Guallini
2ea2f74f92 8358171: Additional code coverage for PEM API
Reviewed-by: rhalade, ascarpino
Backport-of: b2e7cda6a0
2025-06-16 09:54:18 +00:00
Alan Bateman
077ce2edc7 8358764: (sc) SocketChannel.close when thread blocked in read causes connection to be reset (win)
Reviewed-by: iris, jpai
Backport-of: e5196fc24d
2025-06-16 09:19:56 +00:00
Tobias Hartmann
2a3294571a 8359327: Incorrect AVX3Threshold results into code buffer overflows on APX targets
Reviewed-by: chagedorn
Backport-of: e7f63ba310
2025-06-16 08:48:49 +00:00
SendaoYan
3877746eb9 8359181: Error messages generated by configure --help after 8301197
Reviewed-by: ihse
Backport-of: 7b7136b4ec
2025-06-15 12:25:17 +00:00
Tobias Hartmann
3bd80fe3ba 8357782: JVM JIT Causes Static Initialization Order Issue
Reviewed-by: shade
Backport-of: e8ef93ae9d
2025-06-15 09:05:56 +00:00
Tobias Hartmann
03232d4a5d 8359200: Memory corruption in MStack::push
Reviewed-by: epeter, shade
Backport-of: ed39e17e34
2025-06-15 09:04:55 +00:00
Daniel Fuchs
4111730845 8359364: java/net/URL/EarlyOrDelayedParsing test fails intermittently
Reviewed-by: alanb
Backport-of: 57cabc6d74
2025-06-13 16:54:40 +00:00
Kevin Walls
74ea38e406 8358701: Remove misleading javax.management.remote API doc wording about JMX spec, and historic link to JMXMP
Reviewed-by: alanb
Backport-of: 66535fe26d
2025-06-13 14:28:14 +00:00
Tobias Hartmann
839a91e14b 8357982: Fix several failing BMI tests with -XX:+UseAPX
Reviewed-by: chagedorn
Backport-of: c98dffa186
2025-06-12 11:11:41 +00:00
Daniel Fuchs
aa4f79eaec 8358617: java/net/HttpURLConnection/HttpURLConnectionExpectContinueTest.java fails with 403 due to system proxies
Reviewed-by: jpai
Backport-of: a377773fa7
2025-06-11 16:22:34 +00:00
Rob McKenna
bff98e7d4d 8359059: Bump version numbers for 25.0.1
Reviewed-by: iris
2025-06-09 23:01:41 +00:00
Stuart Marks
c7df72ff0f 8358809: Improve link to stdin.encoding from java.lang.IO
Reviewed-by: naoto
Backport-of: d024f58e61
2025-06-07 00:56:45 +00:00
Rajan Halade
80e066e733 8345414: Google CAInterop test failures
Reviewed-by: weijun
Backport-of: 8e9ba788ae
2025-06-06 21:31:33 +00:00
700 changed files with 13613 additions and 8349 deletions

View File

@@ -310,7 +310,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-x64
msvc-toolset-version: '14.43'
msvc-toolset-version: '14.44'
msvc-toolset-architecture: 'x86.x64'
configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }}
@@ -322,7 +322,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml
with:
platform: windows-aarch64
msvc-toolset-version: '14.43'
msvc-toolset-version: '14.44'
msvc-toolset-architecture: 'arm64'
make-target: 'hotspot'
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'

View File

@@ -1,7 +1,7 @@
[general]
project=jdk
project=jdk-updates
jbs=JDK
version=26
version=25.0.1
[checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright

View File

@@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -366,7 +366,7 @@ EOT
# Print additional help, e.g. a list of toolchains and JVM features.
# This must be done by the autoconf script.
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf )
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
cat <<EOT

View File

@@ -28,7 +28,7 @@
################################################################################
# Minimum supported versions
JTREG_MINIMUM_VERSION=7.5.2
JTREG_MINIMUM_VERSION=7.5.1
GTEST_MINIMUM_VERSION=1.14.0
################################################################################

View File

@@ -26,7 +26,7 @@
# Versions and download locations for dependencies used by GitHub Actions (GHA)
GTEST_VERSION=1.14.0
JTREG_VERSION=7.5.2+1
JTREG_VERSION=7.5.1+1
LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz

View File

@@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: {
server: "jpg",
product: "jtreg",
version: "7.5.2",
version: "7.5.1",
build_number: "1",
file: "bundles/jtreg-7.5.2+1.zip",
file: "bundles/jtreg-7.5.1+1.zip",
environment_name: "JT_HOME",
environment_path: input.get("jtreg", "home_path") + "/bin",
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
@@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
server: "jpg",
product: "jcov",
version: "3.0",
build_number: "3",
file: "bundles/jcov-3.0+3.zip",
build_number: "1",
file: "bundles/jcov-3.0+1.zip",
environment_name: "JCOV_HOME",
},

View File

@@ -26,17 +26,17 @@
# Default version, product, and vendor information to use,
# unless overridden by configure
DEFAULT_VERSION_FEATURE=26
DEFAULT_VERSION_FEATURE=25
DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_UPDATE=1
DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0
DEFAULT_VERSION_DATE=2026-03-17
DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_DATE=2025-10-21
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25 26"
DEFAULT_JDK_SOURCE_TARGET_VERSION=26
DEFAULT_PROMOTED_VERSION_PRE=ea
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25"
DEFAULT_JDK_SOURCE_TARGET_VERSION=25
DEFAULT_PROMOTED_VERSION_PRE=

View File

@@ -542,10 +542,10 @@ class Bundle {
if (pattern != null) {
// Perform date-time format pattern conversion which is
// applicable to both SimpleDateFormat and j.t.f.DateTimeFormatter.
String transPattern = translateDateFormatLetters(calendarType, pattern, this::convertDateTimePatternLetter);
String transPattern = translateDateFormatLetters(calendarType, key, pattern, this::convertDateTimePatternLetter);
dateTimePatterns.add(i, transPattern);
// Additionally, perform SDF specific date-time format pattern conversion
sdfPatterns.add(i, translateDateFormatLetters(calendarType, transPattern, this::convertSDFLetter));
sdfPatterns.add(i, translateDateFormatLetters(calendarType, key, transPattern, this::convertSDFLetter));
} else {
dateTimePatterns.add(i, null);
sdfPatterns.add(i, null);
@@ -568,7 +568,7 @@ class Bundle {
}
}
private String translateDateFormatLetters(CalendarType calendarType, String cldrFormat, ConvertDateTimeLetters converter) {
private String translateDateFormatLetters(CalendarType calendarType, String patternKey, String cldrFormat, ConvertDateTimeLetters converter) {
String pattern = cldrFormat;
int length = pattern.length();
boolean inQuote = false;
@@ -587,7 +587,7 @@ class Bundle {
if (nextc == '\'') {
i++;
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = 0;
count = 0;
}
@@ -597,7 +597,7 @@ class Bundle {
}
if (!inQuote) {
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = 0;
count = 0;
}
@@ -614,7 +614,7 @@ class Bundle {
}
if (!(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z')) {
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = 0;
count = 0;
}
@@ -627,7 +627,7 @@ class Bundle {
count++;
continue;
}
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
lastLetter = c;
count = 1;
}
@@ -637,7 +637,7 @@ class Bundle {
}
if (count != 0) {
converter.convert(calendarType, lastLetter, count, jrePattern);
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
}
if (cldrFormat.contentEquals(jrePattern)) {
return cldrFormat;
@@ -661,7 +661,7 @@ class Bundle {
* on the support given by the SimpleDateFormat and the j.t.f.DateTimeFormatter
* for date-time formatting.
*/
private void convertDateTimePatternLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
private void convertDateTimePatternLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
switch (cldrLetter) {
case 'u':
case 'U':
@@ -683,7 +683,7 @@ class Bundle {
* Perform a conversion of CLDR date-time format pattern letter which is
* specific to the SimpleDateFormat.
*/
private void convertSDFLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
private void convertSDFLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
switch (cldrLetter) {
case 'G':
if (calendarType != CalendarType.GREGORIAN) {
@@ -722,6 +722,17 @@ class Bundle {
appendN('z', count, sb);
break;
case 'y':
// If the style is FULL/LONG for a Japanese Calendar, make the
// count == 4 for Gan-nen
if (calendarType == CalendarType.JAPANESE &&
(patternKey.contains("full-") ||
patternKey.contains("long-"))) {
count = 4;
}
appendN(cldrLetter, count, sb);
break;
case 'Z':
if (count == 4 || count == 5) {
sb.append("XXX");
@@ -767,6 +778,7 @@ class Bundle {
.collect(Collectors.toMap(
e -> calendarPrefix + e.getKey(),
e -> translateDateFormatLetters(calendarType,
e.getKey(),
(String)e.getValue(),
this::convertDateTimePatternLetter)
))
@@ -775,7 +787,7 @@ class Bundle {
@FunctionalInterface
private interface ConvertDateTimeLetters {
void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb);
void convert(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb);
}
/**

View File

@@ -46,8 +46,6 @@ CLDR_GEN_DONE := $(GENSRC_DIR)/_cldr-gensrc.marker
TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata
ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template
# The `-utf8` option is used even for US English, as some names
# may contain non-ASCII characters, such as “Türkiye”.
$(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
$(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \
$(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \
@@ -63,8 +61,7 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
-basemodule \
-year $(COPYRIGHT_YEAR) \
-zntempfile $(ZONENAME_TEMPLATE) \
-tzdatadir $(TZ_DATA_DIR) \
-utf8)
-tzdatadir $(TZ_DATA_DIR))
$(TOUCH) $@
TARGETS += $(CLDR_GEN_DONE)

View File

@@ -45,8 +45,7 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
-baselocales "en-US" \
-year $(COPYRIGHT_YEAR) \
-o $(GENSRC_DIR) \
-tzdatadir $(TZ_DATA_DIR) \
-utf8)
-tzdatadir $(TZ_DATA_DIR))
$(TOUCH) $@
TARGETS += $(CLDR_GEN_DONE)

View File

@@ -187,18 +187,22 @@ public class HelloWorld {
new Run("none", "Hello from Cupertino")
}),
new Paragraph("title", new Run[] {
new Run("none", "台北問候您!")
new Run("none", "\u53F0\u5317\u554F\u5019\u60A8\u0021")
}),
new Paragraph("title", new Run[] {
new Run("none", "Αθηναι ασπαζονται υμας!") // Greek
new Run("none", "\u0391\u03B8\u03B7\u03BD\u03B1\u03B9\u0020" // Greek
+ "\u03B1\u03C3\u03C0\u03B1\u03B6\u03BF\u03BD"
+ "\u03C4\u03B1\u03B9\u0020\u03C5\u03BC\u03B1"
+ "\u03C2\u0021")
}),
new Paragraph("title", new Run[] {
new Run("none", "東京から今日は")
new Run("none", "\u6771\u4eac\u304b\u3089\u4eca\u65e5\u306f")
}),
new Paragraph("title", new Run[] {
new Run("none", "שלום מירושלים")
new Run("none", "\u05e9\u05dc\u05d5\u05dd \u05de\u05d9\u05e8\u05d5"
+ "\u05e9\u05dc\u05d9\u05dd")
}),
new Paragraph("title", new Run[] {
new Run("none", "سلام")
new Run("none", "\u0633\u0644\u0627\u0645")
}), };
}

View File

@@ -456,13 +456,13 @@ SliderDemo.horizontal=Horizontal
SliderDemo.vertical=Vertikal
SliderDemo.plain=Einfach
SliderDemo.a_plain_slider=Ein einfacher Schieberegler
SliderDemo.majorticks=Grobteilungen
SliderDemo.majorticksdescription=Ein Schieberegler mit Grobteilungsmarkierungen
SliderDemo.ticks=Feinteilungen, Teilungen zum Einrasten und Labels
SliderDemo.minorticks=Feinteilungen
SliderDemo.minorticksdescription=Ein Schieberegler mit Grob- und Feinteilungen, mit Teilungen, in die der Schieberegler einrastet, wobei einige Teilungen mit einem sichtbaren Label versehen sind
SliderDemo.majorticks=Hauptteilstriche
SliderDemo.majorticksdescription=Ein Schieberegler mit Hauptteilstrichen
SliderDemo.ticks=Hilfsteilstriche, zum Einrasten und Beschriften
SliderDemo.minorticks=Hilfsteilstriche
SliderDemo.minorticksdescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, in die der Schieberegler einrastet, wobei einige Teilstriche mit einer sichtbaren Beschriftung versehen sind
SliderDemo.disabled=Deaktiviert
SliderDemo.disableddescription=Ein Schieberegler mit Grob- und Feinteilungen, der nicht aktiviert ist (kann nicht bearbeitet werden)
SliderDemo.disableddescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, der nicht aktiviert ist (kann nicht bearbeitet werden)
### SplitPane Demo ###

View File

@@ -3921,10 +3921,6 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
// compute_padding() function must be
// provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in

View File

@@ -292,7 +292,8 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
} else {
assert(is_phantom, "only remaining strength");
assert(!is_narrow, "phantom access cannot be narrow");
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
// AOT saved adapters need relocation for this call.
__ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
}
__ blr(lr);
__ mov(rscratch1, r0);

View File

@@ -106,13 +106,6 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
match(Set dst (LoadP mem));
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr);
// The main load is a candidate to implement implicit null checks, as long as
// legitimize_address() does not require a preceding lea instruction to
// materialize the memory operand. The absence of a preceding lea instruction
// is guaranteed for immLoffset8 memory operands, because these do not lead to
// out-of-range offsets (see definition of immLoffset8). Fortunately,
// immLoffset8 memory operands are the most common ones in practice.
ins_is_late_expanded_null_check_candidate(opnd_array(1)->opcode() == INDOFFL8);
ins_cost(4 * INSN_COST);
@@ -124,11 +117,7 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
// Fix up any out-of-range offsets.
assert_different_registers(rscratch2, as_Register($mem$$base));
assert_different_registers(rscratch2, $dst$$Register);
int size = 8;
assert(!this->is_late_expanded_null_check_candidate() ||
!MacroAssembler::legitimize_address_requires_lea(ref_addr, size),
"an instruction that can be used for implicit null checking should emit the candidate memory access first");
ref_addr = __ legitimize_address(ref_addr, size, rscratch2);
ref_addr = __ legitimize_address(ref_addr, 8, rscratch2);
}
__ ldr($dst$$Register, ref_addr);
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);

View File

@@ -129,21 +129,16 @@ class MacroAssembler: public Assembler {
a.lea(this, r);
}
// Whether materializing the given address for a LDR/STR requires an
// additional lea instruction.
static bool legitimize_address_requires_lea(const Address &a, int size) {
return a.getMode() == Address::base_plus_offset &&
!Address::offset_ok_for_immed(a.offset(), exact_log2(size));
}
/* Sometimes we get misaligned loads and stores, usually from Unsafe
accesses, and these can exceed the offset range. */
Address legitimize_address(const Address &a, int size, Register scratch) {
if (legitimize_address_requires_lea(a, size)) {
block_comment("legitimize_address {");
lea(scratch, a);
block_comment("} legitimize_address");
return Address(scratch);
if (a.getMode() == Address::base_plus_offset) {
if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
block_comment("legitimize_address {");
lea(scratch, a);
block_comment("} legitimize_address");
return Address(scratch);
}
}
return a;
}

View File

@@ -8888,13 +8888,8 @@ instruct TailCalljmpInd(IPRegP jump_target, inline_cache_regP method_ptr) %{
match(TailCall jump_target method_ptr);
ins_cost(CALL_COST);
format %{ "MOV Rexception_pc, LR\n\t"
"jump $jump_target \t! $method_ptr holds method" %}
format %{ "jump $jump_target \t! $method_ptr holds method" %}
ins_encode %{
__ mov(Rexception_pc, LR); // this is used only to call
// StubRoutines::forward_exception_entry()
// which expects PC of exception in
// R5. FIXME?
__ jump($jump_target$$Register);
%}
ins_pipe(tail_call);
@@ -8939,8 +8934,10 @@ instruct ForwardExceptionjmp()
match(ForwardException);
ins_cost(CALL_COST);
format %{ "b forward_exception_stub" %}
format %{ "MOV Rexception_pc, LR\n\t"
"b forward_exception_entry" %}
ins_encode %{
__ mov(Rexception_pc, LR);
// OK to trash Rtemp, because Rtemp is used by stub
__ jump(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type, Rtemp);
%}

View File

@@ -141,7 +141,6 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
%{
match(Set dst (LoadP mem));
effect(TEMP_DEF dst, KILL cr0);
ins_is_late_expanded_null_check_candidate(true);
ins_cost(MEMORY_REF_COST);
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
@@ -161,7 +160,6 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
%{
match(Set dst (LoadP mem));
effect(TEMP_DEF dst, KILL cr0);
ins_is_late_expanded_null_check_candidate(true);
ins_cost(3 * MEMORY_REF_COST);
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation

View File

@@ -3928,8 +3928,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
Label L_outer_loop, L_inner_loop, L_last;
// Set DSCR pre-fetch to deepest.
load_const_optimized(t0, VM_Version::_dscr_val | 7);
mtdscr(t0);
if (VM_Version::has_mfdscr()) {
load_const_optimized(t0, VM_Version::_dscr_val | 7);
mtdscr(t0);
}
mtvrwz(VCRC, crc); // crc lives in VCRC, now
@@ -4073,8 +4075,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
// ********** Main loop end **********
// Restore DSCR pre-fetch value.
load_const_optimized(t0, VM_Version::_dscr_val);
mtdscr(t0);
if (VM_Version::has_mfdscr()) {
load_const_optimized(t0, VM_Version::_dscr_val);
mtdscr(t0);
}
// ********** Simple loop for remaining 16 byte blocks **********
{

View File

@@ -4036,10 +4036,6 @@ ins_attrib ins_field_cbuf_insts_offset(-1);
ins_attrib ins_field_load_ic_hi_node(0);
ins_attrib ins_field_load_ic_node(0);
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct
// parsing in the ADLC because operands constitute user defined types

View File

@@ -952,8 +952,10 @@ class StubGenerator: public StubCodeGenerator {
address start_pc = __ pc();
Register tmp1 = R6_ARG4;
// probably copy stub would have changed value reset it.
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
__ mtdscr(tmp1);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
__ mtdscr(tmp1);
}
__ li(R3_RET, 0); // return 0
__ blr();
return start_pc;
@@ -1070,9 +1072,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1092,8 +1095,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_10); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
@@ -1344,8 +1349,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// If supported set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. It's not aligned 16-byte
@@ -1365,8 +1372,11 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_9); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
__ bind(l_6);
@@ -1527,9 +1537,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1549,9 +1560,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_7); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
@@ -1672,9 +1684,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1694,8 +1707,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
__ cmpwi(CR0, R5_ARG3, 0);
__ beq(CR0, l_6);
@@ -1788,9 +1803,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1810,8 +1826,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_5); // Dec CTR and loop if not zero.
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
} // FasterArrayCopy
@@ -1910,9 +1928,10 @@ class StubGenerator: public StubCodeGenerator {
__ dcbt(R3_ARG1, 0);
// Set DSCR pre-fetch to deepest.
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
__ mtdscr(tmp2);
}
__ li(tmp1, 16);
// Backbranch target aligned to 32-byte. Not 16-byte align as
@@ -1932,8 +1951,10 @@ class StubGenerator: public StubCodeGenerator {
__ bdnz(l_4);
// Restore DSCR pre-fetch value.
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
if (VM_Version::has_mfdscr()) {
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
__ mtdscr(tmp2);
}
__ cmpwi(CR0, R5_ARG3, 0);
__ beq(CR0, l_1);

View File

@@ -80,7 +80,9 @@ void VM_Version::initialize() {
"%zu on this machine", PowerArchitecturePPC64);
// Power 8: Configure Data Stream Control Register.
config_dscr();
if (VM_Version::has_mfdscr()) {
config_dscr();
}
if (!UseSIGTRAP) {
MSG(TrapBasedICMissChecks);
@@ -170,7 +172,8 @@ void VM_Version::initialize() {
// Create and print feature-string.
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
jio_snprintf(buf, sizeof(buf),
"ppc64 sha aes%s%s",
"ppc64 sha aes%s%s%s",
(has_mfdscr() ? " mfdscr" : ""),
(has_darn() ? " darn" : ""),
(has_brw() ? " brw" : "")
// Make sure number of %s matches num_features!
@@ -488,6 +491,7 @@ void VM_Version::determine_features() {
uint32_t *code = (uint32_t *)a->pc();
// Keep R3_ARG1 unmodified, it contains &field (see below).
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
a->mfdscr(R0);
a->darn(R7);
a->brw(R5, R6);
a->blr();
@@ -524,6 +528,7 @@ void VM_Version::determine_features() {
// determine which instructions are legal.
int feature_cntr = 0;
if (code[feature_cntr++]) features |= mfdscr_m;
if (code[feature_cntr++]) features |= darn_m;
if (code[feature_cntr++]) features |= brw_m;

View File

@@ -32,12 +32,14 @@
class VM_Version: public Abstract_VM_Version {
protected:
enum Feature_Flag {
mfdscr,
darn,
brw,
num_features // last entry to count features
};
enum Feature_Flag_Set {
unknown_m = 0,
mfdscr_m = (1 << mfdscr ),
darn_m = (1 << darn ),
brw_m = (1 << brw ),
all_features_m = (unsigned long)-1
@@ -67,8 +69,9 @@ public:
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
// CPU instruction support
static bool has_darn() { return (_features & darn_m) != 0; }
static bool has_brw() { return (_features & brw_m) != 0; }
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; } // Power8, but may be unavailable (QEMU)
static bool has_darn() { return (_features & darn_m) != 0; }
static bool has_brw() { return (_features & brw_m) != 0; }
// Assembler testing
static void allow_all();

View File

@@ -2170,15 +2170,13 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
cmov_cmp_fp_le(op1, op2, dst, src, is_single);
break;
case BoolTest::ge:
assert(false, "Should go to BoolTest::le case");
ShouldNotReachHere();
cmov_cmp_fp_ge(op1, op2, dst, src, is_single);
break;
case BoolTest::lt:
cmov_cmp_fp_lt(op1, op2, dst, src, is_single);
break;
case BoolTest::gt:
assert(false, "Should go to BoolTest::lt case");
ShouldNotReachHere();
cmov_cmp_fp_gt(op1, op2, dst, src, is_single);
break;
default:
assert(false, "unsupported compare condition");

View File

@@ -96,7 +96,6 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
match(Set dst (LoadP mem));
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, TEMP tmp, KILL cr);
ins_is_late_expanded_null_check_candidate(true);
ins_cost(4 * DEFAULT_COST);

View File

@@ -1268,12 +1268,19 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
}
// ----------- cmove, compare float -----------
//
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
// so, just list behaviour of unordered ones as follow.
//
// Set dst (CMoveI (Binary cop (CmpF/D op1 op2)) (Binary dst src))
// (If one or both inputs to the compare are NaN, then)
// 1. (op1 lt op2) => true => CMove: dst = src
// 2. (op1 le op2) => true => CMove: dst = src
// 3. (op1 gt op2) => false => CMove: dst = dst
// 4. (op1 ge op2) => false => CMove: dst = dst
// 5. (op1 eq op2) => false => CMove: dst = dst
// 6. (op1 ne op2) => true => CMove: dst = src
// Move src to dst only if cmp1 == cmp2,
// otherwise leave dst unchanged, including the case where one of them is NaN.
// Clarification:
// java code : cmp1 != cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1289,7 +1296,7 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 != cmp2, including the case of NaN
// not jump (i.e. move src to dst) if cmp1 == cmp2
// fallthrough (i.e. move src to dst) if cmp1 == cmp2
float_bne(cmp1, cmp2, no_set);
} else {
double_bne(cmp1, cmp2, no_set);
@@ -1298,11 +1305,6 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// Keep dst unchanged only if cmp1 == cmp2,
// otherwise move src to dst, including the case where one of them is NaN.
// Clarification:
// java code : cmp1 == cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1318,7 +1320,7 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 == cmp2
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
// fallthrough (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
float_beq(cmp1, cmp2, no_set);
} else {
double_beq(cmp1, cmp2, no_set);
@@ -1327,14 +1329,6 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
// Clarification
// scenario 1:
// java code : cmp2 < cmp1 ? dst : src
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
// scenario 2:
// java code : cmp1 > cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1350,7 +1344,7 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 > cmp2
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
float_bgt(cmp1, cmp2, no_set);
} else {
double_bgt(cmp1, cmp2, no_set);
@@ -1359,14 +1353,30 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
// Clarification
// scenario 1:
// java code : cmp2 <= cmp1 ? dst : src
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
// scenario 2:
// java code : cmp1 >= cmp2 ? dst : src
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
void MacroAssembler::cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
fle_s(t0, cmp2, cmp1);
} else {
fle_d(t0, cmp2, cmp1);
}
czero_nez(dst, dst, t0);
czero_eqz(t0 , src, t0);
orr(dst, dst, t0);
return;
}
Label no_set;
if (is_single) {
// jump if cmp1 < cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 >= cmp2
float_blt(cmp1, cmp2, no_set, false, true);
} else {
double_blt(cmp1, cmp2, no_set, false, true);
}
mv(dst, src);
bind(no_set);
}
void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
@@ -1382,7 +1392,7 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
Label no_set;
if (is_single) {
// jump if cmp1 >= cmp2
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
float_bge(cmp1, cmp2, no_set);
} else {
double_bge(cmp1, cmp2, no_set);
@@ -1391,6 +1401,30 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
bind(no_set);
}
void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
if (UseZicond) {
if (is_single) {
flt_s(t0, cmp2, cmp1);
} else {
flt_d(t0, cmp2, cmp1);
}
czero_nez(dst, dst, t0);
czero_eqz(t0 , src, t0);
orr(dst, dst, t0);
return;
}
Label no_set;
if (is_single) {
// jump if cmp1 <= cmp2 or either is NaN
// fallthrough (i.e. move src to dst) if cmp1 > cmp2
float_ble(cmp1, cmp2, no_set, false, true);
} else {
double_ble(cmp1, cmp2, no_set, false, true);
}
mv(dst, src);
bind(no_set);
}
// Float compare branch instructions
#define INSN(NAME, FLOATCMP, BRANCH) \
@@ -5310,42 +5344,6 @@ void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, R
add(final_dest_hi, dest_hi, carry);
}
/**
* Multiply 32 bit by 32 bit first loop.
*/
void MacroAssembler::multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
Register y, Register y_idx, Register z,
Register carry, Register product,
Register idx, Register kdx) {
// jlong carry, x[], y[], z[];
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
// long product = y[idx] * x[xstart] + carry;
// z[kdx] = (int)product;
// carry = product >>> 32;
// }
// z[xstart] = (int)carry;
Label L_first_loop, L_first_loop_exit;
blez(idx, L_first_loop_exit);
shadd(t0, xstart, x, t0, LogBytesPerInt);
lwu(x_xstart, Address(t0, 0));
bind(L_first_loop);
subiw(idx, idx, 1);
shadd(t0, idx, y, t0, LogBytesPerInt);
lwu(y_idx, Address(t0, 0));
mul(product, x_xstart, y_idx);
add(product, product, carry);
srli(carry, product, 32);
subiw(kdx, kdx, 1);
shadd(t0, kdx, z, t0, LogBytesPerInt);
sw(product, Address(t0, 0));
bgtz(idx, L_first_loop);
bind(L_first_loop_exit);
}
/**
* Multiply 64 bit by 64 bit first loop.
*/
@@ -5562,77 +5560,16 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
const Register carry = tmp5;
const Register product = xlen;
const Register x_xstart = tmp0;
const Register jdx = tmp1;
mv(idx, ylen); // idx = ylen;
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
mv(carry, zr); // carry = 0;
Label L_multiply_64_x_64_loop, L_done;
Label L_done;
subiw(xstart, xlen, 1);
bltz(xstart, L_done);
const Register jdx = tmp1;
if (AvoidUnalignedAccesses) {
int base_offset = arrayOopDesc::base_offset_in_bytes(T_INT);
assert((base_offset % (UseCompactObjectHeaders ? 4 :
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
if ((base_offset % 8) == 0) {
// multiply_64_x_64_loop emits 8-byte load/store to access two elements
// at a time from int arrays x and y. When base_offset is 8 bytes, these
// accesses are naturally aligned if both xlen and ylen are even numbers.
orr(t0, xlen, ylen);
test_bit(t0, t0, 0);
beqz(t0, L_multiply_64_x_64_loop);
}
Label L_second_loop_unaligned, L_third_loop, L_third_loop_exit;
multiply_32_x_32_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
shadd(t0, xstart, z, t0, LogBytesPerInt);
sw(carry, Address(t0, 0));
bind(L_second_loop_unaligned);
mv(carry, zr);
mv(jdx, ylen);
subiw(xstart, xstart, 1);
bltz(xstart, L_done);
subi(sp, sp, 2 * wordSize);
sd(z, Address(sp, 0));
sd(zr, Address(sp, wordSize));
shadd(t0, xstart, z, t0, LogBytesPerInt);
addi(z, t0, 4);
shadd(t0, xstart, x, t0, LogBytesPerInt);
lwu(product, Address(t0, 0));
blez(jdx, L_third_loop_exit);
bind(L_third_loop);
subiw(jdx, jdx, 1);
shadd(t0, jdx, y, t0, LogBytesPerInt);
lwu(t0, Address(t0, 0));
mul(t1, t0, product);
add(t0, t1, carry);
shadd(tmp6, jdx, z, t1, LogBytesPerInt);
lwu(t1, Address(tmp6, 0));
add(t0, t0, t1);
sw(t0, Address(tmp6, 0));
srli(carry, t0, 32);
bgtz(jdx, L_third_loop);
bind(L_third_loop_exit);
ld(z, Address(sp, 0));
addi(sp, sp, 2 * wordSize);
shadd(t0, xstart, z, t0, LogBytesPerInt);
sw(carry, Address(t0, 0));
j(L_second_loop_unaligned);
}
bind(L_multiply_64_x_64_loop);
multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
Label L_second_loop_aligned;

View File

@@ -660,7 +660,9 @@ class MacroAssembler: public Assembler {
void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
public:
// We try to follow risc-v asm menomics.
@@ -1382,10 +1384,6 @@ public:
void adc(Register dst, Register src1, Register src2, Register carry);
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
Register src1, Register src2, Register carry);
void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
Register y, Register y_idx, Register z,
Register carry, Register product,
Register idx, Register kdx);
void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
Register y, Register y_idx, Register z,
Register carry, Register product,

View File

@@ -2619,10 +2619,6 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
// compute_padding() function must be
// provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in
@@ -8435,6 +8431,17 @@ instruct castVV(vReg dst)
ins_pipe(pipe_class_empty);
%}
instruct castVVMask(vRegMask dst)
%{
match(Set dst (CastVV dst));
size(0);
format %{ "# castVV of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
// ============================================================================
// Convert Instructions

View File

@@ -203,15 +203,15 @@ void VM_Version::common_initialize() {
}
}
// Misc Intrinsics could depend on RVV
// Misc Intrinsics that could depend on RVV.
if (UseZba || UseRVV) {
if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
}
} else {
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
warning("CRC32 intrinsic requires Zba or RVV instructions (not available on this CPU)");
warning("CRC32 intrinsic are not available on this CPU.");
}
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
}
@@ -325,20 +325,40 @@ void VM_Version::c2_initialize() {
FLAG_SET_DEFAULT(UseMulAddIntrinsic, true);
}
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
}
} else if (UseMultiplyToLenIntrinsic) {
warning("Intrinsics for BigInteger.multiplyToLen() not available on this CPU.");
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
}
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
}
} else if (UseSquareToLenIntrinsic) {
warning("Intrinsics for BigInteger.squareToLen() not available on this CPU.");
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
}
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
}
} else if (UseMontgomeryMultiplyIntrinsic) {
warning("Intrinsics for BigInteger.montgomeryMultiply() not available on this CPU.");
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
}
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
if (!AvoidUnalignedAccesses) {
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
}
} else if (UseMontgomerySquareIntrinsic) {
warning("Intrinsics for BigInteger.montgomerySquare() not available on this CPU.");
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
}
// Adler32

View File

@@ -410,7 +410,7 @@
// C2I adapter frames:
//
// STACK (interpreted called from compiled, on entry to template interpreter):
// STACK (interpreted called from compiled, on entry to frame manager):
//
// [TOP_C2I_FRAME]
// [JIT_FRAME]

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -414,7 +414,7 @@ constexpr FloatRegister Z_FARG2 = Z_F2;
constexpr FloatRegister Z_FARG3 = Z_F4;
constexpr FloatRegister Z_FARG4 = Z_F6;
// Register declarations to be used in template interpreter assembly code.
// Register declarations to be used in frame manager assembly code.
// Use only non-volatile registers in order to keep values across C-calls.
// Register to cache the integer value on top of the operand stack.
@@ -439,7 +439,7 @@ constexpr Register Z_bcp = Z_R13;
// Bytecode which is dispatched (short lived!).
constexpr Register Z_bytecode = Z_R14;
// Temporary registers to be used within template interpreter. We can use
// Temporary registers to be used within frame manager. We can use
// the nonvolatile ones because the call stub has saved them.
// Use only non-volatile registers in order to keep values across C-calls.
constexpr Register Z_tmp_1 = Z_R10;

View File

@@ -118,7 +118,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
__ z_lgr(Z_SP, saved_sp);
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
// C2I adapter extensions are now removed by a resize in the template interpreter
// C2I adapter extensions are now removed by a resize in the frame manager
// (unwind_initial_activation_pending_exception).
#ifdef ASSERT
__ z_ltgr(handle_exception, handle_exception);

View File

@@ -2139,7 +2139,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
Register value = Z_R12;
// Remember the senderSP so we can pop the interpreter arguments off of the stack.
// In addition, template interpreter expects initial_caller_sp in Z_R10.
// In addition, frame manager expects initial_caller_sp in Z_R10.
__ z_lgr(sender_SP, Z_SP);
// This should always fit in 14 bit immediate.

View File

@@ -115,7 +115,7 @@ class StubGenerator: public StubCodeGenerator {
// [SP+176] - thread : Thread*
//
address generate_call_stub(address& return_address) {
// Set up a new C frame, copy Java arguments, call template interpreter
// Set up a new C frame, copy Java arguments, call frame manager
// or native_entry, and process result.
StubGenStubId stub_id = StubGenStubId::call_stub_id;
@@ -272,10 +272,10 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("call {");
{
// Call template interpreter or native entry.
// Call frame manager or native entry.
//
// Register state on entry to template interpreter / native entry:
// Register state on entry to frame manager / native entry:
//
// Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed)
// Lesp = (SP) + copied_arguments_offset - 8
@@ -290,7 +290,7 @@ class StubGenerator: public StubCodeGenerator {
__ z_lgr(Z_esp, r_top_of_arguments_addr);
//
// Stack on entry to template interpreter / native entry:
// Stack on entry to frame manager / native entry:
//
// F0 [TOP_IJAVA_FRAME_ABI]
// [outgoing Java arguments]
@@ -300,7 +300,7 @@ class StubGenerator: public StubCodeGenerator {
//
// Do a light-weight C-call here, r_new_arg_entry holds the address
// of the interpreter entry point (template interpreter or native entry)
// of the interpreter entry point (frame manager or native entry)
// and save runtime-value of return_pc in return_address
// (call by reference argument).
return_address = __ call_stub(r_new_arg_entry);
@@ -309,11 +309,11 @@ class StubGenerator: public StubCodeGenerator {
{
BLOCK_COMMENT("restore registers {");
// Returned from template interpreter or native entry.
// Returned from frame manager or native entry.
// Now pop frame, process result, and return to caller.
//
// Stack on exit from template interpreter / native entry:
// Stack on exit from frame manager / native entry:
//
// F0 [ABI]
// ...
@@ -330,7 +330,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop_frame();
// Reload some volatile registers which we've spilled before the call
// to template interpreter / native entry.
// to frame manager / native entry.
// Access all locals via frame pointer, because we know nothing about
// the topmost frame's size.
__ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp);

View File

@@ -1217,7 +1217,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Various method entries
// Math function, template interpreter must set up an interpreter state, etc.
// Math function, frame manager must set up an interpreter state, etc.
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// Decide what to do: Use same platform specific instructions and runtime calls as compilers.

View File

@@ -15681,6 +15681,8 @@ void Assembler::pusha_uncached() { // 64bit
// Push pair of original stack pointer along with remaining registers
// at 16B aligned boundary.
push2p(rax, r31);
// Restore the original contents of RAX register.
movq(rax, Address(rax));
push2p(r30, r29);
push2p(r28, r27);
push2p(r26, r25);

View File

@@ -4655,6 +4655,7 @@ static void convertF2I_slowpath(C2_MacroAssembler& masm, C2GeneralStub<Register,
__ subptr(rsp, 8);
__ movdbl(Address(rsp), src);
__ call(RuntimeAddress(target));
// APX REX2 encoding for pop(dst) increases the stub size by 1 byte.
__ pop(dst);
__ jmp(stub.continuation());
#undef __
@@ -4687,7 +4688,9 @@ void C2_MacroAssembler::convertF2I(BasicType dst_bt, BasicType src_bt, Register
}
}
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, 23, convertF2I_slowpath);
// Using the APX extended general purpose registers increases the instruction encoding size by 1 byte.
int max_size = 23 + (UseAPX ? 1 : 0);
auto stub = C2CodeStub::make<Register, XMMRegister, address>(dst, src, slowpath_target, max_size, convertF2I_slowpath);
jcc(Assembler::equal, stub->entry());
bind(stub->continuation());
}

View File

@@ -353,7 +353,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
// The rest is saved with the optimized path
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4;
uint num_saved_regs = 4 + (dst != rax ? 1 : 0) + 4 + (UseAPX ? 16 : 0);
__ subptr(rsp, num_saved_regs * wordSize);
uint slot = num_saved_regs;
if (dst != rax) {
@@ -367,6 +367,25 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ movptr(Address(rsp, (--slot) * wordSize), r9);
__ movptr(Address(rsp, (--slot) * wordSize), r10);
__ movptr(Address(rsp, (--slot) * wordSize), r11);
// Save APX extended registers r16r31 if enabled
if (UseAPX) {
__ movptr(Address(rsp, (--slot) * wordSize), r16);
__ movptr(Address(rsp, (--slot) * wordSize), r17);
__ movptr(Address(rsp, (--slot) * wordSize), r18);
__ movptr(Address(rsp, (--slot) * wordSize), r19);
__ movptr(Address(rsp, (--slot) * wordSize), r20);
__ movptr(Address(rsp, (--slot) * wordSize), r21);
__ movptr(Address(rsp, (--slot) * wordSize), r22);
__ movptr(Address(rsp, (--slot) * wordSize), r23);
__ movptr(Address(rsp, (--slot) * wordSize), r24);
__ movptr(Address(rsp, (--slot) * wordSize), r25);
__ movptr(Address(rsp, (--slot) * wordSize), r26);
__ movptr(Address(rsp, (--slot) * wordSize), r27);
__ movptr(Address(rsp, (--slot) * wordSize), r28);
__ movptr(Address(rsp, (--slot) * wordSize), r29);
__ movptr(Address(rsp, (--slot) * wordSize), r30);
__ movptr(Address(rsp, (--slot) * wordSize), r31);
}
// r12-r15 are callee saved in all calling conventions
assert(slot == 0, "must use all slots");
@@ -398,6 +417,25 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom), arg0, arg1);
}
// Restore APX extended registers r31r16 if previously saved
if (UseAPX) {
__ movptr(r31, Address(rsp, (slot++) * wordSize));
__ movptr(r30, Address(rsp, (slot++) * wordSize));
__ movptr(r29, Address(rsp, (slot++) * wordSize));
__ movptr(r28, Address(rsp, (slot++) * wordSize));
__ movptr(r27, Address(rsp, (slot++) * wordSize));
__ movptr(r26, Address(rsp, (slot++) * wordSize));
__ movptr(r25, Address(rsp, (slot++) * wordSize));
__ movptr(r24, Address(rsp, (slot++) * wordSize));
__ movptr(r23, Address(rsp, (slot++) * wordSize));
__ movptr(r22, Address(rsp, (slot++) * wordSize));
__ movptr(r21, Address(rsp, (slot++) * wordSize));
__ movptr(r20, Address(rsp, (slot++) * wordSize));
__ movptr(r19, Address(rsp, (slot++) * wordSize));
__ movptr(r18, Address(rsp, (slot++) * wordSize));
__ movptr(r17, Address(rsp, (slot++) * wordSize));
__ movptr(r16, Address(rsp, (slot++) * wordSize));
}
__ movptr(r11, Address(rsp, (slot++) * wordSize));
__ movptr(r10, Address(rsp, (slot++) * wordSize));
__ movptr(r9, Address(rsp, (slot++) * wordSize));

View File

@@ -118,10 +118,6 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
match(Set dst (LoadP mem));
effect(TEMP dst, KILL cr);
// The main load is a candidate to implement implicit null checks. The
// barrier's slow path includes an identical reload, which does not need to be
// registered in the exception table because it is dominated by the main one.
ins_is_late_expanded_null_check_candidate(true);
ins_cost(125);

View File

@@ -30,7 +30,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
do_arch_blob(initial, PRODUCT_ONLY(20000) NOT_PRODUCT(21000) WINDOWS_ONLY(+1000)) \
do_stub(initial, verify_mxcsr) \
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
verify_mxcsr_entry) \
@@ -239,7 +239,7 @@
do_arch_blob, \
do_arch_entry, \
do_arch_entry_init) \
do_arch_blob(final, 31000 \
do_arch_blob(final, 33000 \
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
#endif // CPU_X86_STUBDECLARATIONS_HPP

View File

@@ -46,6 +46,12 @@
//
/******************************************************************************/
/* Represents 0x7FFFFFFFFFFFFFFF double precision in lower 64 bits*/
ATTRIBUTE_ALIGNED(16) static const juint _ABS_MASK[] =
{
4294967295, 2147483647, 0, 0
};
ATTRIBUTE_ALIGNED(4) static const juint _SIG_MASK[] =
{
0, 1032192
@@ -188,10 +194,10 @@ address StubGenerator::generate_libmCbrt() {
StubCodeMark mark(this, stub_id);
address start = __ pc();
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1;
Label L_2TAG_PACKET_4_0_1, L_2TAG_PACKET_5_0_1, L_2TAG_PACKET_6_0_1;
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1;
Label B1_1, B1_2, B1_4;
address ABS_MASK = (address)_ABS_MASK;
address SIG_MASK = (address)_SIG_MASK;
address EXP_MASK = (address)_EXP_MASK;
address EXP_MSK2 = (address)_EXP_MSK2;
@@ -208,8 +214,12 @@ address StubGenerator::generate_libmCbrt() {
__ enter(); // required for proper stackwalking of RuntimeStub frame
__ bind(B1_1);
__ subq(rsp, 24);
__ movsd(Address(rsp), xmm0);
__ ucomisd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if x is +/- zero or NaN
__ movq(xmm1, xmm0);
__ andpd(xmm1, ExternalAddress(ABS_MASK), r11 /*rscratch*/);
__ ucomisd(xmm1, ExternalAddress(INF), r11 /*rscratch*/);
__ jcc(Assembler::equal, B1_4); // Branch only if x is +/- INF
__ bind(B1_2);
__ movq(xmm7, xmm0);
@@ -228,8 +238,6 @@ address StubGenerator::generate_libmCbrt() {
__ andl(rdx, rax);
__ cmpl(rdx, 0);
__ jcc(Assembler::equal, L_2TAG_PACKET_0_0_1); // Branch only if |x| is denormalized
__ cmpl(rdx, 524032);
__ jcc(Assembler::equal, L_2TAG_PACKET_1_0_1); // Branch only if |x| is INF or NaN
__ shrl(rdx, 8);
__ shrq(r9, 8);
__ andpd(xmm2, xmm0);
@@ -297,8 +305,6 @@ address StubGenerator::generate_libmCbrt() {
__ andl(rdx, rax);
__ shrl(rdx, 8);
__ shrq(r9, 8);
__ cmpl(rdx, 0);
__ jcc(Assembler::equal, L_2TAG_PACKET_3_0_1); // Branch only if |x| is zero
__ andpd(xmm2, xmm0);
__ andpd(xmm0, xmm5);
__ orpd(xmm3, xmm2);
@@ -322,41 +328,10 @@ address StubGenerator::generate_libmCbrt() {
__ psllq(xmm7, 52);
__ jmp(L_2TAG_PACKET_2_0_1);
__ bind(L_2TAG_PACKET_3_0_1);
__ cmpq(r9, 0);
__ jcc(Assembler::notEqual, L_2TAG_PACKET_4_0_1); // Branch only if x is negative zero
__ xorpd(xmm0, xmm0);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_4_0_1);
__ movsd(xmm0, ExternalAddress(ZERON), r11 /*rscratch*/);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_1_0_1);
__ movl(rax, Address(rsp, 4));
__ movl(rdx, Address(rsp));
__ movl(rcx, rax);
__ andl(rcx, 2147483647);
__ cmpl(rcx, 2146435072);
__ jcc(Assembler::above, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
__ cmpl(rdx, 0);
__ jcc(Assembler::notEqual, L_2TAG_PACKET_5_0_1); // Branch only if |x| is NaN
__ cmpl(rax, 2146435072);
__ jcc(Assembler::notEqual, L_2TAG_PACKET_6_0_1); // Branch only if x is negative INF
__ movsd(xmm0, ExternalAddress(INF), r11 /*rscratch*/);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_6_0_1);
__ movsd(xmm0, ExternalAddress(NEG_INF), r11 /*rscratch*/);
__ jmp(B1_4);
__ bind(L_2TAG_PACKET_5_0_1);
__ movsd(xmm0, Address(rsp));
__ addsd(xmm0, xmm0);
__ movq(Address(rsp, 8), xmm0);
__ bind(B1_4);
__ addq(rsp, 24);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);

View File

@@ -465,19 +465,13 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
}
} else if (kind == Interpreter::java_lang_math_tanh) {
if (StubRoutines::dtanh() != nullptr) {
assert(StubRoutines::dtanh() != nullptr, "not initialized");
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtanh())));
} else {
return nullptr; // Fallback to default implementation
}
} else if (kind == Interpreter::java_lang_math_cbrt) {
if (StubRoutines::dcbrt() != nullptr) {
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
} else {
return nullptr; // Fallback to default implementation
}
assert(StubRoutines::dcbrt() != nullptr, "not initialized");
__ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
} else if (kind == Interpreter::java_lang_math_abs) {
assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
__ movdbl(xmm0, Address(rsp, wordSize));

View File

@@ -440,7 +440,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits apx_f
__ jcc(Assembler::equal, vector_save_restore);
#ifndef PRODUCT
bool save_apx = UseAPX;
VM_Version::set_apx_cpuFeatures();
UseAPX = true;
@@ -457,7 +456,6 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movq(Address(rsi, 8), r31);
UseAPX = save_apx;
#endif
__ bind(vector_save_restore);
//
// Check if OS has enabled XGETBV instruction to access XCR0
@@ -1022,8 +1020,6 @@ void VM_Version::get_processor_features() {
if (UseAPX && !apx_supported) {
warning("UseAPX is not supported on this CPU, setting it to false");
FLAG_SET_DEFAULT(UseAPX, false);
} else if (FLAG_IS_DEFAULT(UseAPX)) {
FLAG_SET_DEFAULT(UseAPX, apx_supported ? true : false);
}
if (!UseAPX) {
@@ -2111,7 +2107,7 @@ bool VM_Version::is_intel_cascade_lake() {
// has improved implementation of 64-byte load/stores and so the default
// threshold is set to 0 for these platforms.
int VM_Version::avx3_threshold() {
return (is_intel_family_core() &&
return (is_intel_server_family() &&
supports_serialize() &&
FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
}
@@ -3151,17 +3147,11 @@ bool VM_Version::os_supports_apx_egprs() {
if (!supports_apx_f()) {
return false;
}
// Enable APX support for product builds after
// completion of planned features listed in JDK-8329030.
#if !defined(PRODUCT)
if (_cpuid_info.apx_save[0] != egpr_test_value() ||
_cpuid_info.apx_save[1] != egpr_test_value()) {
return false;
}
return true;
#else
return false;
#endif
}
uint VM_Version::cores_per_cpu() {

View File

@@ -2055,10 +2055,6 @@ ins_attrib ins_alignment(1); // Required alignment attribute (must
// compute_padding() function must be
// provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in

View File

@@ -1261,6 +1261,69 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do beyond of what os::print_cpu_info() does.
}
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so.
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
Dl_info dlinfo;
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
assert(ret != 0, "cannot locate libjvm");
char* rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm.so".
const char* p = strrchr(buf, '/');
if (p == nullptr) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
ss.print("%s/lib", buf);
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm.so"
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Go back to path of .so
rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
}
////////////////////////////////////////////////////////////////////////////////
// Virtual Memory

View File

@@ -154,8 +154,7 @@ julong os::Bsd::available_memory() {
assert(kerr == KERN_SUCCESS,
"host_statistics64 failed - check mach_host_self() and count");
if (kerr == KERN_SUCCESS) {
// free_count is just a lowerbound, other page categories can be freed too and make memory available
available = (vmstat.free_count + vmstat.inactive_count + vmstat.purgeable_count) * os::vm_page_size();
available = vmstat.free_count * os::vm_page_size();
}
#endif
return available;
@@ -1483,6 +1482,83 @@ void os::print_memory_info(outputStream* st) {
st->cr();
}
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
char dli_fname[MAXPATHLEN];
dli_fname[0] = '\0';
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), nullptr);
assert(ret, "cannot locate libjvm");
char *rp = nullptr;
if (ret && dli_fname[0] != '\0') {
rp = os::realpath(dli_fname, buf, buflen);
}
if (rp == nullptr) {
return;
}
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm"
const char* p = strrchr(buf, '/');
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer space");
// Add the appropriate library and JVM variant subdirs
ss.print("%s/lib/%s", buf, Abstract_VM_Version::vm_variant());
if (0 != access(buf, F_OK)) {
ss.reset();
ss.print("%s/lib", buf);
}
// If the path exists within JAVA_HOME, add the JVM library name
// to complete the path to JVM being overridden. Otherwise fallback
// to the path to the current library.
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm"
ss.print("/libjvm%s", JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Fall back to path of current library
rp = os::realpath(dli_fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
////////////////////////////////////////////////////////////////////////////////
// Virtual Memory

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,6 +35,9 @@
range, \
constraint) \
\
product(bool, UseOprofile, false, \
"(Deprecated) enable support for Oprofile profiler") \
\
product(bool, UseTransparentHugePages, false, \
"Use MADV_HUGEPAGE for large pages") \
\

View File

@@ -2746,9 +2746,118 @@ void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
#endif
}
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
char dli_fname[MAXPATHLEN];
dli_fname[0] = '\0';
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), nullptr);
assert(ret, "cannot locate libjvm");
char *rp = nullptr;
if (ret && dli_fname[0] != '\0') {
rp = os::realpath(dli_fname, buf, buflen);
}
if (rp == nullptr) {
return;
}
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm.so".
const char* p = strrchr(buf, '/');
if (p == nullptr) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
ss.print("%s/lib", buf);
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm.so"
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Go back to path of .so
rp = os::realpath(dli_fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
////////////////////////////////////////////////////////////////////////////////
// Virtual Memory
// Rationale behind this function:
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
// samples for JITted code. Here we create private executable mapping over the code cache
// and then we can use standard (well, almost, as mapping can change) way to provide
// info for the reporting script by storing timestamp and location of symbol
void linux_wrap_code(char* base, size_t size) {
static volatile jint cnt = 0;
static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
if (!UseOprofile) {
return;
}
char buf[PATH_MAX+1];
int num = Atomic::add(&cnt, 1);
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num);
unlink(buf);
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
if (fd != -1) {
off_t rv = ::lseek(fd, size-2, SEEK_SET);
if (rv != (off_t)-1) {
if (::write(fd, "", 1) == 1) {
mmap(base, size,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
}
}
::close(fd);
unlink(buf);
}
}
static bool recoverable_mmap_error(int err) {
// See if the error is one we can let the caller handle. This
// list of errno values comes from JBS-6843484. I can't find a

View File

@@ -59,7 +59,6 @@
#ifdef AIX
#include "loadlib_aix.hpp"
#include "os_aix.hpp"
#include "porting_aix.hpp"
#endif
#ifdef LINUX
#include "os_linux.hpp"
@@ -1061,95 +1060,6 @@ bool os::same_files(const char* file1, const char* file2) {
return is_same;
}
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
const char* fname;
#ifdef AIX
Dl_info dlinfo;
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
assert(ret != 0, "cannot locate libjvm");
if (ret == 0) {
return;
}
fname = dlinfo.dli_fname;
#else
char dli_fname[MAXPATHLEN];
dli_fname[0] = '\0';
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), nullptr);
assert(ret, "cannot locate libjvm");
if (!ret) {
return;
}
fname = dli_fname;
#endif // AIX
char* rp = nullptr;
if (fname[0] != '\0') {
rp = os::realpath(fname, buf, buflen);
}
if (rp == nullptr) {
return;
}
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm.so".
const char* p = strrchr(buf, '/');
if (p == nullptr) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
ss.print("%s/lib", buf);
// If the path exists within JAVA_HOME, add the VM variant directory and JVM
// library name to complete the path to JVM being overridden. Otherwise fallback
// to the path to the current library.
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm.so"
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Go back to path of .so
rp = os::realpath(fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
// Called when creating the thread. The minimum stack sizes have already been calculated
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
size_t stack_size;

View File

@@ -2623,7 +2623,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr());
}
#if !defined(PRODUCT)
if ((exception_code == EXCEPTION_ACCESS_VIOLATION) &&
VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
// Verify that OS save/restore APX registers.
@@ -2631,7 +2630,6 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return Handle_Exception(exceptionInfo, VM_Version::cpuinfo_cont_addr_apx());
}
#endif
#endif
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
if (VMError::was_assert_poison_crash(exception_record)) {

View File

@@ -81,12 +81,14 @@
#endif
#define SPELL_REG_SP "sp"
#define SPELL_REG_FP "fp"
#ifdef __APPLE__
// see darwin-xnu/osfmk/mach/arm/_structs.h
// 10.5 UNIX03 member name prefixes
#define DU3_PREFIX(s, m) __ ## s.__ ## m
#endif
#define context_x uc_mcontext->DU3_PREFIX(ss,x)
#define context_fp uc_mcontext->DU3_PREFIX(ss,fp)
@@ -95,31 +97,6 @@
#define context_pc uc_mcontext->DU3_PREFIX(ss,pc)
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
#define context_esr uc_mcontext->DU3_PREFIX(es,esr)
#endif
#ifdef __FreeBSD__
# define context_x uc_mcontext.mc_gpregs.gp_x
# define context_fp context_x[REG_FP]
# define context_lr uc_mcontext.mc_gpregs.gp_lr
# define context_sp uc_mcontext.mc_gpregs.gp_sp
# define context_pc uc_mcontext.mc_gpregs.gp_elr
#endif
#ifdef __NetBSD__
# define context_x uc_mcontext.__gregs
# define context_fp uc_mcontext.__gregs[_REG_FP]
# define context_lr uc_mcontext.__gregs[_REG_LR]
# define context_sp uc_mcontext.__gregs[_REG_SP]
# define context_pc uc_mcontext.__gregs[_REG_ELR]
#endif
#ifdef __OpenBSD__
# define context_x sc_x
# define context_fp sc_x[REG_FP]
# define context_lr sc_lr
# define context_sp sc_sp
# define context_pc sc_elr
#endif
#define REG_BCP context_x[22]
@@ -520,11 +497,9 @@ int os::extra_bang_size_in_bytes() {
return 0;
}
#ifdef __APPLE__
void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec);
}
#endif
static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src;

View File

@@ -429,13 +429,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub = VM_Version::cpuinfo_cont_addr();
}
#if !defined(PRODUCT) && defined(_LP64)
if ((sig == SIGSEGV || sig == SIGBUS) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
// Verify that OS save/restore APX registers.
stub = VM_Version::cpuinfo_cont_addr_apx();
VM_Version::clear_apx_test_state();
}
#endif
// We test if stub is already set (by the stack overflow code
// above) so it is not overwritten by the code that follows. This

View File

@@ -255,13 +255,11 @@ bool PosixSignals::pd_hotspot_signal_handler(int sig, siginfo_t* info,
stub = VM_Version::cpuinfo_cont_addr();
}
#if !defined(PRODUCT) && defined(_LP64)
if ((sig == SIGSEGV) && VM_Version::is_cpuinfo_segv_addr_apx(pc)) {
// Verify that OS save/restore APX registers.
stub = VM_Version::cpuinfo_cont_addr_apx();
VM_Version::clear_apx_test_state();
}
#endif
if (thread->thread_state() == _thread_in_Java) {
// Java thread running in Java code => find exception handler if any

View File

@@ -481,3 +481,7 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
*legal_text = legal_start;
return (int) (legal_end - legal_start);
}
void *operator new( size_t size, int, const char *, int ) throw() {
return ::operator new( size );
}

View File

@@ -1626,8 +1626,6 @@ void ArchDesc::declareClasses(FILE *fp) {
while (attr != nullptr) {
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
} else if (strcmp (attr->_ident, "ins_is_late_expanded_null_check_candidate") == 0) {
fprintf(fp, " virtual bool is_late_expanded_null_check_candidate() const { return %s; }\n", attr->_val);
} else if (strcmp (attr->_ident, "ins_cost") != 0 &&
strncmp(attr->_ident, "ins_field_", 10) != 0 &&
// Must match function in node.hpp: return type bool, no prefix "ins_".

View File

@@ -818,7 +818,7 @@ JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
if (action == Deoptimization::Action_make_not_entrant) {
if (nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize)) {
if (nm->make_not_entrant("C1 deoptimize")) {
if (reason == Deoptimization::Reason_tenured) {
MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
if (trap_mdo != nullptr) {
@@ -1110,7 +1110,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
// safepoint, but if it's still alive then make it not_entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
if (nm != nullptr) {
nm->make_not_entrant(nmethod::ChangeReason::C1_codepatch);
nm->make_not_entrant("C1 code patch");
}
Deoptimization::deoptimize_frame(current, caller_frame.id());
@@ -1358,7 +1358,7 @@ void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
// Make sure the nmethod is invalidated, i.e. made not entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
if (nm != nullptr) {
nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize_for_patching);
nm->make_not_entrant("C1 deoptimize for patching");
}
}
@@ -1486,7 +1486,7 @@ JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
assert (nm != nullptr, "no more nmethod?");
nm->make_not_entrant(nmethod::ChangeReason::C1_predicate_failed_trap);
nm->make_not_entrant("C1 predicate failed trap");
methodHandle m(current, nm->method());
MethodData* mdo = m->method_data();

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -187,7 +187,13 @@ class ValueNumberingVisitor: public InstructionVisitor {
void do_Convert (Convert* x) { /* nothing to do */ }
void do_NullCheck (NullCheck* x) { /* nothing to do */ }
void do_TypeCast (TypeCast* x) { /* nothing to do */ }
void do_NewInstance (NewInstance* x) { /* nothing to do */ }
void do_NewInstance (NewInstance* x) {
ciInstanceKlass* c = x->klass();
if (c != nullptr && !c->is_initialized() &&
(!c->is_loaded() || c->has_class_initializer())) {
kill_memory();
}
}
void do_NewTypeArray (NewTypeArray* x) { /* nothing to do */ }
void do_NewObjectArray (NewObjectArray* x) { /* nothing to do */ }
void do_NewMultiArray (NewMultiArray* x) { /* nothing to do */ }

View File

@@ -110,24 +110,12 @@ const char* CDSConfig::default_archive_path() {
// before CDSConfig::ergo_initialize() is called.
assert(_cds_ergo_initialize_started, "sanity");
if (_default_archive_path == nullptr) {
char jvm_path[JVM_MAXPATHLEN];
os::jvm_path(jvm_path, sizeof(jvm_path));
char *end = strrchr(jvm_path, *os::file_separator());
if (end != nullptr) *end = '\0';
stringStream tmp;
if (is_vm_statically_linked()) {
// It's easier to form the path using JAVA_HOME as os::jvm_path
// gives the path to the launcher executable on static JDK.
const char* subdir = WINDOWS_ONLY("bin") NOT_WINDOWS("lib");
tmp.print("%s%s%s%s%s%sclasses",
Arguments::get_java_home(), os::file_separator(),
subdir, os::file_separator(),
Abstract_VM_Version::vm_variant(), os::file_separator());
} else {
// Assume .jsa is in the same directory where libjvm resides on
// non-static JDK.
char jvm_path[JVM_MAXPATHLEN];
os::jvm_path(jvm_path, sizeof(jvm_path));
char *end = strrchr(jvm_path, *os::file_separator());
if (end != nullptr) *end = '\0';
tmp.print("%s%sclasses", jvm_path, os::file_separator());
}
tmp.print("%s%sclasses", jvm_path, os::file_separator());
#ifdef _LP64
if (!UseCompressedOops) {
tmp.print_raw("_nocoops");

View File

@@ -147,7 +147,7 @@
product(bool, AOTVerifyTrainingData, trueInDebug, DIAGNOSTIC, \
"Verify archived training data") \
\
product(bool, AOTCompileEagerly, false, DIAGNOSTIC, \
product(bool, AOTCompileEagerly, false, EXPERIMENTAL, \
"Compile methods as soon as possible") \
\
/* AOT Code flags */ \

View File

@@ -837,11 +837,10 @@ void MetaspaceShared::preload_and_dump(TRAPS) {
struct stat st;
if (os::stat(AOTCache, &st) != 0) {
tty->print_cr("AOTCache creation failed: %s", AOTCache);
vm_exit(0);
} else {
tty->print_cr("AOTCache creation is complete: %s " INT64_FORMAT " bytes", AOTCache, (int64_t)(st.st_size));
vm_exit(0);
}
vm_direct_exit(0);
}
}
}

View File

@@ -549,6 +549,11 @@ bool ciInstanceKlass::compute_has_trusted_loader() {
return java_lang_ClassLoader::is_trusted_loader(loader_oop);
}
bool ciInstanceKlass::has_class_initializer() {
VM_ENTRY_MARK;
return get_instanceKlass()->class_initializer() != nullptr;
}
// ------------------------------------------------------------------
// ciInstanceKlass::find_method
//

View File

@@ -231,6 +231,8 @@ public:
ciInstanceKlass* unique_concrete_subklass();
bool has_finalizable_subclass();
bool has_class_initializer();
bool contains_field_offset(int offset);
// Get the instance of java.lang.Class corresponding to

View File

@@ -802,7 +802,7 @@ class CompileReplay : public StackObj {
// Make sure the existence of a prior compile doesn't stop this one
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
if (nm != nullptr) {
nm->make_not_entrant(nmethod::ChangeReason::CI_replay);
nm->make_not_entrant("CI replay");
}
replay_state = this;
CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level,

View File

@@ -154,8 +154,6 @@
#define JAVA_25_VERSION 69
#define JAVA_26_VERSION 70
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
assert((bad_constant == JVM_CONSTANT_Module ||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
@@ -3740,6 +3738,7 @@ void ClassFileParser::apply_parsed_class_metadata(
_cp->set_pool_holder(this_klass);
this_klass->set_constants(_cp);
this_klass->set_fieldinfo_stream(_fieldinfo_stream);
this_klass->set_fieldinfo_search_table(_fieldinfo_search_table);
this_klass->set_fields_status(_fields_status);
this_klass->set_methods(_methods);
this_klass->set_inner_classes(_inner_classes);
@@ -3749,6 +3748,8 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_permitted_subclasses(_permitted_subclasses);
this_klass->set_record_components(_record_components);
DEBUG_ONLY(FieldInfoStream::validate_search_table(_cp, _fieldinfo_stream, _fieldinfo_search_table));
// Delay the setting of _local_interfaces and _transitive_interfaces until after
// initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could
// be shared with _transitive_interfaces and _transitive_interfaces may be shared with
@@ -5056,6 +5057,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
// note that is not safe to use the fields in the parser from this point on
assert(nullptr == _cp, "invariant");
assert(nullptr == _fieldinfo_stream, "invariant");
assert(nullptr == _fieldinfo_search_table, "invariant");
assert(nullptr == _fields_status, "invariant");
assert(nullptr == _methods, "invariant");
assert(nullptr == _inner_classes, "invariant");
@@ -5276,6 +5278,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_super_klass(),
_cp(nullptr),
_fieldinfo_stream(nullptr),
_fieldinfo_search_table(nullptr),
_fields_status(nullptr),
_methods(nullptr),
_inner_classes(nullptr),
@@ -5352,6 +5355,7 @@ void ClassFileParser::clear_class_metadata() {
// deallocated if classfile parsing returns an error.
_cp = nullptr;
_fieldinfo_stream = nullptr;
_fieldinfo_search_table = nullptr;
_fields_status = nullptr;
_methods = nullptr;
_inner_classes = nullptr;
@@ -5374,6 +5378,7 @@ ClassFileParser::~ClassFileParser() {
if (_fieldinfo_stream != nullptr) {
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_stream);
}
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_search_table);
if (_fields_status != nullptr) {
MetadataFactory::free_array<FieldStatus>(_loader_data, _fields_status);
@@ -5774,6 +5779,7 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
_fieldinfo_stream =
FieldInfoStream::create_FieldInfoStream(_temp_field_info, _java_fields_count,
injected_fields_count, loader_data(), CHECK);
_fieldinfo_search_table = FieldInfoStream::create_search_table(_cp, _fieldinfo_stream, _loader_data, CHECK);
_fields_status =
MetadataFactory::new_array<FieldStatus>(_loader_data, _temp_field_info->length(),
FieldStatus(0), CHECK);

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -123,6 +123,7 @@ class ClassFileParser {
const InstanceKlass* _super_klass;
ConstantPool* _cp;
Array<u1>* _fieldinfo_stream;
Array<u1>* _fieldinfo_search_table;
Array<FieldStatus>* _fields_status;
Array<Method*>* _methods;
Array<u2>* _inner_classes;

View File

@@ -301,7 +301,7 @@ void FieldLayout::reconstruct_layout(const InstanceKlass* ik, bool& has_instance
BasicType last_type;
int last_offset = -1;
while (ik != nullptr) {
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
BasicType type = Signature::basic_type(fs.signature());
// distinction between static and non-static fields is missing
if (fs.access_flags().is_static()) continue;
@@ -461,7 +461,7 @@ void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlas
bool found = false;
const InstanceKlass* ik = super;
while (!found && ik != nullptr) {
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) {
for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
if (fs.offset() == b->offset()) {
output->print_cr(" @%d \"%s\" %s %d/%d %s",
b->offset(),

View File

@@ -967,6 +967,13 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
Array<u1>* new_fis = FieldInfoStream::create_FieldInfoStream(fields, java_fields, injected_fields, k->class_loader_data(), CHECK);
ik->set_fieldinfo_stream(new_fis);
MetadataFactory::free_array<u1>(k->class_loader_data(), old_stream);
Array<u1>* old_table = ik->fieldinfo_search_table();
Array<u1>* search_table = FieldInfoStream::create_search_table(ik->constants(), new_fis, k->class_loader_data(), CHECK);
ik->set_fieldinfo_search_table(search_table);
MetadataFactory::free_array<u1>(k->class_loader_data(), old_table);
DEBUG_ONLY(FieldInfoStream::validate_search_table(ik->constants(), new_fis, search_table));
}
}

View File

@@ -132,8 +132,16 @@ bool StackMapTable::match_stackmap(
}
void StackMapTable::check_jump_target(
StackMapFrame* frame, int32_t target, TRAPS) const {
StackMapFrame* frame, int bci, int offset, TRAPS) const {
ErrorContext ctx;
// Jump targets must be within the method and the method size is limited. See JVMS 4.11
int min_offset = -1 * max_method_code_size;
if (offset < min_offset || offset > max_method_code_size) {
frame->verifier()->verify_error(ErrorContext::bad_stackmap(bci, frame),
"Illegal target of jump or branch (bci %d + offset %d)", bci, offset);
return;
}
int target = bci + offset;
bool match = match_stackmap(
frame, target, true, false, &ctx, CHECK_VERIFY(frame->verifier()));
if (!match || (target < 0 || target >= _code_length)) {

View File

@@ -67,7 +67,7 @@ class StackMapTable : public StackObj {
// Check jump instructions. Make sure there are no uninitialized
// instances on backward branch.
void check_jump_target(StackMapFrame* frame, int32_t target, TRAPS) const;
void check_jump_target(StackMapFrame* frame, int bci, int offset, TRAPS) const;
// The following methods are only used inside this class.

View File

@@ -32,6 +32,7 @@
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/vmClasses.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/oopStorage.inline.hpp"
#include "gc/shared/oopStorageSet.hpp"
@@ -115,6 +116,7 @@ OopStorage* StringTable::_oop_storage;
static size_t _current_size = 0;
static volatile size_t _items_count = 0;
DEBUG_ONLY(static bool _disable_interning_during_cds_dump = false);
volatile bool _alt_hash = false;
@@ -346,6 +348,10 @@ bool StringTable::has_work() {
return Atomic::load_acquire(&_has_work);
}
size_t StringTable::items_count_acquire() {
return Atomic::load_acquire(&_items_count);
}
void StringTable::trigger_concurrent_work() {
// Avoid churn on ServiceThread
if (!has_work()) {
@@ -504,6 +510,9 @@ oop StringTable::intern(const char* utf8_string, TRAPS) {
}
oop StringTable::intern(const StringWrapper& name, TRAPS) {
assert(!Atomic::load_acquire(&_disable_interning_during_cds_dump),
"All threads that may intern strings should have been stopped before CDS starts copying the interned string table");
// shared table always uses java_lang_String::hash_code
unsigned int hash = hash_wrapped_string(name);
oop found_string = lookup_shared(name, hash);
@@ -793,7 +802,7 @@ void StringTable::verify() {
}
// Verification and comp
class VerifyCompStrings : StackObj {
class StringTable::VerifyCompStrings : StackObj {
static unsigned string_hash(oop const& str) {
return java_lang_String::hash_code_noupdate(str);
}
@@ -805,7 +814,7 @@ class VerifyCompStrings : StackObj {
string_hash, string_equals> _table;
public:
size_t _errors;
VerifyCompStrings() : _table(unsigned(_items_count / 8) + 1, 0 /* do not resize */), _errors(0) {}
VerifyCompStrings() : _table(unsigned(items_count_acquire() / 8) + 1, 0 /* do not resize */), _errors(0) {}
bool operator()(WeakHandle* val) {
oop s = val->resolve();
if (s == nullptr) {
@@ -939,20 +948,31 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
}
// This is called BEFORE we enter the CDS safepoint. We can allocate heap objects.
// This should be called when we know no more strings will be added (which will be easy
// to guarantee because CDS runs with a single Java thread. See JDK-8253495.)
// This is called BEFORE we enter the CDS safepoint. We can still allocate Java object arrays to
// be used by the shared strings table.
void StringTable::allocate_shared_strings_array(TRAPS) {
if (!CDSConfig::is_dumping_heap()) {
return;
}
assert(CDSConfig::allow_only_single_java_thread(), "No more interned strings can be added");
if (_items_count > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", _items_count);
CompileBroker::wait_for_no_active_tasks();
precond(CDSConfig::allow_only_single_java_thread());
// At this point, no more strings will be added:
// - There's only a single Java thread (this thread). It no longer executes Java bytecodes
// so JIT compilation will eventually stop.
// - CompileBroker has no more active tasks, so all JIT requests have been processed.
// This flag will be cleared after intern table dumping has completed, so we can run the
// compiler again (for future AOT method compilation, etc).
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, true));
if (items_count_acquire() > (size_t)max_jint) {
fatal("Too many strings to be archived: %zu", items_count_acquire());
}
int total = (int)_items_count;
int total = (int)items_count_acquire();
size_t single_array_size = objArrayOopDesc::object_size(total);
log_info(aot)("allocated string table for %d strings", total);
@@ -972,7 +992,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
// This can only happen if you have an extremely large number of classes that
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
// but bail out for safety.
log_error(aot)("Too many strings to be archived: %zu", _items_count);
log_error(aot)("Too many strings to be archived: %zu", items_count_acquire());
MetaspaceShared::unrecoverable_writing_error();
}
@@ -1070,7 +1090,7 @@ oop StringTable::init_shared_strings_array() {
void StringTable::write_shared_table() {
_shared_table.reset();
CompactHashtableWriter writer((int)_items_count, ArchiveBuilder::string_stats());
CompactHashtableWriter writer((int)items_count_acquire(), ArchiveBuilder::string_stats());
int index = 0;
auto copy_into_shared_table = [&] (WeakHandle* val) {
@@ -1084,6 +1104,8 @@ void StringTable::write_shared_table() {
};
_local_table->do_safepoint_scan(copy_into_shared_table);
writer.dump(&_shared_table, "string");
DEBUG_ONLY(Atomic::release_store(&_disable_interning_during_cds_dump, false));
}
void StringTable::set_shared_strings_array_index(int root_index) {

View File

@@ -40,7 +40,7 @@ class StringTableConfig;
class StringTable : AllStatic {
friend class StringTableConfig;
class VerifyCompStrings;
static volatile bool _has_work;
// Set if one bucket is out of balance due to hash algorithm deficiency
@@ -74,6 +74,7 @@ private:
static void item_added();
static void item_removed();
static size_t items_count_acquire();
static oop intern(const StringWrapper& name, TRAPS);
static oop do_intern(const StringWrapper& name, uintx hash, TRAPS);

View File

@@ -781,7 +781,6 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
// Merge with the next instruction
{
int target;
VerificationType type, type2;
VerificationType atype;
@@ -1606,9 +1605,8 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
case Bytecodes::_ifle:
current_frame.pop_stack(
VerificationType::integer_type(), CHECK_VERIFY(this));
target = bcs.dest();
stackmap_table.check_jump_target(
&current_frame, target, CHECK_VERIFY(this));
&current_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_if_acmpeq :
case Bytecodes::_if_acmpne :
@@ -1619,19 +1617,16 @@ void ClassVerifier::verify_method(const methodHandle& m, TRAPS) {
case Bytecodes::_ifnonnull :
current_frame.pop_stack(
VerificationType::reference_check(), CHECK_VERIFY(this));
target = bcs.dest();
stackmap_table.check_jump_target
(&current_frame, target, CHECK_VERIFY(this));
(&current_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
no_control_flow = false; break;
case Bytecodes::_goto :
target = bcs.dest();
stackmap_table.check_jump_target(
&current_frame, target, CHECK_VERIFY(this));
&current_frame, bcs.bci(), bcs.get_offset_s2(), CHECK_VERIFY(this));
no_control_flow = true; break;
case Bytecodes::_goto_w :
target = bcs.dest_w();
stackmap_table.check_jump_target(
&current_frame, target, CHECK_VERIFY(this));
&current_frame, bcs.bci(), bcs.get_offset_s4(), CHECK_VERIFY(this));
no_control_flow = true; break;
case Bytecodes::_tableswitch :
case Bytecodes::_lookupswitch :
@@ -2280,15 +2275,14 @@ void ClassVerifier::verify_switch(
}
}
}
int target = bci + default_offset;
stackmap_table->check_jump_target(current_frame, target, CHECK_VERIFY(this));
stackmap_table->check_jump_target(current_frame, bci, default_offset, CHECK_VERIFY(this));
for (int i = 0; i < keys; i++) {
// Because check_jump_target() may safepoint, the bytecode could have
// moved, which means 'aligned_bcp' is no good and needs to be recalculated.
aligned_bcp = align_up(bcs->bcp() + 1, jintSize);
target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
stackmap_table->check_jump_target(
current_frame, target, CHECK_VERIFY(this));
current_frame, bci, offset, CHECK_VERIFY(this));
}
NOT_PRODUCT(aligned_bcp = nullptr); // no longer valid at this point
}
@@ -2549,7 +2543,12 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
case Bytecodes::_goto:
case Bytecodes::_goto_w: {
int target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
int offset = (opcode == Bytecodes::_goto ? bcs.get_offset_s2() : bcs.get_offset_s4());
int min_offset = -1 * max_method_code_size;
// Check offset for overflow
if (offset < min_offset || offset > max_method_code_size) return false;
int target = bci + offset;
if (visited_branches->contains(bci)) {
if (bci_stack->is_empty()) {
if (handler_stack->is_empty()) {
@@ -2607,7 +2606,10 @@ bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
// Push the switch alternatives onto the stack.
for (int i = 0; i < keys; i++) {
int target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
int min_offset = -1 * max_method_code_size;
int offset = (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
if (offset < min_offset || offset > max_method_code_size) return false;
int target = bci + offset;
if (target > code_length) return false;
bci_stack->push(target);
}

View File

@@ -289,6 +289,8 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_dsin:
case vmIntrinsics::_dcos:
case vmIntrinsics::_dtan:
case vmIntrinsics::_dtanh:
case vmIntrinsics::_dcbrt:
case vmIntrinsics::_dlog:
case vmIntrinsics::_dexp:
case vmIntrinsics::_dpow:
@@ -314,13 +316,6 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_fmaF:
if (!InlineMathNatives || !UseFMA) return true;
break;
case vmIntrinsics::_dtanh:
case vmIntrinsics::_dcbrt:
if (!InlineMathNatives || !InlineIntrinsics) return true;
#if defined(AMD64) && (defined(COMPILER1) || defined(COMPILER2))
if (!UseLibmIntrinsic) return true;
#endif
break;
case vmIntrinsics::_floatToFloat16:
case vmIntrinsics::_float16ToFloat:
if (!InlineIntrinsics) return true;

View File

@@ -344,6 +344,7 @@ AOTCodeCache::~AOTCodeCache() {
_store_buffer = nullptr;
}
if (_table != nullptr) {
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
delete _table;
_table = nullptr;
}
@@ -774,6 +775,9 @@ bool AOTCodeCache::store_code_blob(CodeBlob& blob, AOTCodeEntry::Kind entry_kind
// we need to take a lock to prevent race between compiler threads generating AOT code
// and the main thread generating adapter
MutexLocker ml(Compile_lock);
if (!is_on()) {
return false; // AOT code cache was already dumped and closed.
}
if (!cache->align_write()) {
return false;
}
@@ -1434,6 +1438,9 @@ AOTCodeAddressTable::~AOTCodeAddressTable() {
if (_extrs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _extrs_addr);
}
if (_stubs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _stubs_addr);
}
if (_shared_blobs_addr != nullptr) {
FREE_C_HEAP_ARRAY(address, _shared_blobs_addr);
}
@@ -1485,6 +1492,7 @@ void AOTCodeCache::load_strings() {
int AOTCodeCache::store_strings() {
if (_C_strings_used > 0) {
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
uint offset = _write_position;
uint length = 0;
uint* lengths = (uint *)reserve_bytes(sizeof(uint) * _C_strings_used);
@@ -1510,15 +1518,17 @@ int AOTCodeCache::store_strings() {
const char* AOTCodeCache::add_C_string(const char* str) {
if (is_on_for_dump() && str != nullptr) {
return _cache->_table->add_C_string(str);
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
AOTCodeAddressTable* table = addr_table();
if (table != nullptr) {
return table->add_C_string(str);
}
}
return str;
}
const char* AOTCodeAddressTable::add_C_string(const char* str) {
if (_extrs_complete) {
LogStreamHandle(Trace, aot, codecache, stringtable) log; // ctor outside lock
MutexLocker ml(AOTCodeCStrings_lock, Mutex::_no_safepoint_check_flag);
// Check previous strings address
for (int i = 0; i < _C_strings_count; i++) {
if (_C_strings_in[i] == str) {
@@ -1535,9 +1545,7 @@ const char* AOTCodeAddressTable::add_C_string(const char* str) {
_C_strings_in[_C_strings_count] = str;
const char* dup = os::strdup(str);
_C_strings[_C_strings_count++] = dup;
if (log.is_enabled()) {
log.print_cr("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
}
log_trace(aot, codecache, stringtable)("add_C_string: [%d] " INTPTR_FORMAT " '%s'", _C_strings_count, p2i(dup), dup);
return dup;
} else {
assert(false, "Number of C strings >= MAX_STR_COUNT");

View File

@@ -136,6 +136,7 @@ private:
public:
AOTCodeAddressTable() :
_extrs_addr(nullptr),
_stubs_addr(nullptr),
_shared_blobs_addr(nullptr),
_C1_blobs_addr(nullptr),
_extrs_length(0),

View File

@@ -160,7 +160,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size
}
} else {
// We need unique and valid not null address
assert(_mutable_data = blob_end(), "sanity");
assert(_mutable_data == blob_end(), "sanity");
}
set_oop_maps(oop_maps);
@@ -177,6 +177,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
_code_offset(_content_offset),
_data_offset(size),
_frame_size(0),
_mutable_data_size(0),
S390_ONLY(_ctable_offset(0) COMMA)
_header_size(header_size),
_frame_complete_offset(CodeOffsets::frame_never_safe),
@@ -185,7 +186,7 @@ CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t heade
{
assert(is_aligned(size, oopSize), "unaligned size");
assert(is_aligned(header_size, oopSize), "unaligned size");
assert(_mutable_data = blob_end(), "sanity");
assert(_mutable_data == blob_end(), "sanity");
}
void CodeBlob::restore_mutable_data(address reloc_data) {
@@ -195,8 +196,11 @@ void CodeBlob::restore_mutable_data(address reloc_data) {
if (_mutable_data == nullptr) {
vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
}
} else {
_mutable_data = blob_end(); // default value
}
if (_relocation_size > 0) {
assert(_mutable_data_size > 0, "relocation is part of mutable data section");
memcpy((address)relocation_begin(), reloc_data, relocation_size());
}
}
@@ -206,6 +210,8 @@ void CodeBlob::purge() {
if (_mutable_data != blob_end()) {
os::free(_mutable_data);
_mutable_data = blob_end(); // Valid not null address
_mutable_data_size = 0;
_relocation_size = 0;
}
if (_oop_maps != nullptr) {
delete _oop_maps;

View File

@@ -247,7 +247,7 @@ public:
// Sizes
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return pointer_delta_as_int((address) relocation_end(), (address) relocation_begin()); }
int relocation_size() const { return _relocation_size; }
int content_size() const { return pointer_delta_as_int(content_end(), content_begin()); }
int code_size() const { return pointer_delta_as_int(code_end(), code_begin()); }

View File

@@ -1361,7 +1361,7 @@ void CodeCache::make_marked_nmethods_deoptimized() {
while(iter.next()) {
nmethod* nm = iter.method();
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
nm->make_not_entrant(nmethod::ChangeReason::marked_for_deoptimization);
nm->make_not_entrant("marked for deoptimization");
nm->make_deoptimized();
}
}

View File

@@ -28,7 +28,6 @@
#include "code/dependencies.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.inline.hpp"
#include "code/relocInfo.hpp"
#include "code/scopeDesc.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilationLog.hpp"
@@ -1653,10 +1652,6 @@ void nmethod::maybe_print_nmethod(const DirectiveSet* directive) {
}
void nmethod::print_nmethod(bool printmethod) {
// Enter a critical section to prevent a race with deopts that patch code and updates the relocation info.
// Unfortunately, we have to lock the NMethodState_lock before the tty lock due to the deadlock rules and
// cannot lock in a more finely grained manner.
ConditionalMutexLocker ml(NMethodState_lock, !NMethodState_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
ttyLocker ttyl; // keep the following output all in one block
if (xtty != nullptr) {
xtty->begin_head("print_nmethod");
@@ -1975,12 +1970,14 @@ void nmethod::invalidate_osr_method() {
}
}
void nmethod::log_state_change(ChangeReason change_reason) const {
void nmethod::log_state_change(const char* reason) const {
assert(reason != nullptr, "Must provide a reason");
if (LogCompilation) {
if (xtty != nullptr) {
ttyLocker ttyl; // keep the following output all in one block
xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
os::current_thread_id(), change_reason_to_string(change_reason));
os::current_thread_id(), reason);
log_identity(xtty);
xtty->stamp();
xtty->end_elem();
@@ -1989,7 +1986,7 @@ void nmethod::log_state_change(ChangeReason change_reason) const {
ResourceMark rm;
stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
ss.print("made not entrant: %s", change_reason_to_string(change_reason));
ss.print("made not entrant: %s", reason);
CompileTask::print_ul(this, ss.freeze());
if (PrintCompilation) {
@@ -2004,7 +2001,9 @@ void nmethod::unlink_from_method() {
}
// Invalidate code
bool nmethod::make_not_entrant(ChangeReason change_reason) {
bool nmethod::make_not_entrant(const char* reason) {
assert(reason != nullptr, "Must provide a reason");
// This can be called while the system is already at a safepoint which is ok
NoSafepointVerifier nsv;
@@ -2042,17 +2041,6 @@ bool nmethod::make_not_entrant(ChangeReason change_reason) {
// cache call.
NativeJump::patch_verified_entry(entry_point(), verified_entry_point(),
SharedRuntime::get_handle_wrong_method_stub());
// Update the relocation info for the patched entry.
// First, get the old relocation info...
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
if (iter.next() && iter.addr() == verified_entry_point()) {
Relocation* old_reloc = iter.reloc();
// ...then reset the iterator to update it.
RelocIterator iter(this, verified_entry_point(), verified_entry_point() + 8);
relocInfo::change_reloc_info_for_address(&iter, verified_entry_point(), old_reloc->type(),
relocInfo::relocType::runtime_call_type);
}
}
if (update_recompile_counts()) {
@@ -2073,7 +2061,7 @@ bool nmethod::make_not_entrant(ChangeReason change_reason) {
assert(success, "Transition can't fail");
// Log the transition once
log_state_change(change_reason);
log_state_change(reason);
// Remove nmethod from method.
unlink_from_method();
@@ -2178,6 +2166,7 @@ void nmethod::purge(bool unregister_nmethod) {
}
CodeCache::unregister_old_nmethod(this);
JVMCI_ONLY( _metadata_size = 0; )
CodeBlob::purge();
}

View File

@@ -471,85 +471,6 @@ class nmethod : public CodeBlob {
void oops_do_set_strong_done(nmethod* old_head);
public:
enum class ChangeReason : u1 {
C1_codepatch,
C1_deoptimize,
C1_deoptimize_for_patching,
C1_predicate_failed_trap,
CI_replay,
JVMCI_invalidate_nmethod,
JVMCI_invalidate_nmethod_mirror,
JVMCI_materialize_virtual_object,
JVMCI_new_installation,
JVMCI_register_method,
JVMCI_replacing_with_new_code,
JVMCI_reprofile,
marked_for_deoptimization,
missing_exception_handler,
not_used,
OSR_invalidation_back_branch,
OSR_invalidation_for_compiling_with_C1,
OSR_invalidation_of_lower_level,
set_native_function,
uncommon_trap,
whitebox_deoptimization,
zombie,
};
static const char* change_reason_to_string(ChangeReason change_reason) {
switch (change_reason) {
case ChangeReason::C1_codepatch:
return "C1 code patch";
case ChangeReason::C1_deoptimize:
return "C1 deoptimized";
case ChangeReason::C1_deoptimize_for_patching:
return "C1 deoptimize for patching";
case ChangeReason::C1_predicate_failed_trap:
return "C1 predicate failed trap";
case ChangeReason::CI_replay:
return "CI replay";
case ChangeReason::JVMCI_invalidate_nmethod:
return "JVMCI invalidate nmethod";
case ChangeReason::JVMCI_invalidate_nmethod_mirror:
return "JVMCI invalidate nmethod mirror";
case ChangeReason::JVMCI_materialize_virtual_object:
return "JVMCI materialize virtual object";
case ChangeReason::JVMCI_new_installation:
return "JVMCI new installation";
case ChangeReason::JVMCI_register_method:
return "JVMCI register method";
case ChangeReason::JVMCI_replacing_with_new_code:
return "JVMCI replacing with new code";
case ChangeReason::JVMCI_reprofile:
return "JVMCI reprofile";
case ChangeReason::marked_for_deoptimization:
return "marked for deoptimization";
case ChangeReason::missing_exception_handler:
return "missing exception handler";
case ChangeReason::not_used:
return "not used";
case ChangeReason::OSR_invalidation_back_branch:
return "OSR invalidation back branch";
case ChangeReason::OSR_invalidation_for_compiling_with_C1:
return "OSR invalidation for compiling with C1";
case ChangeReason::OSR_invalidation_of_lower_level:
return "OSR invalidation of lower level";
case ChangeReason::set_native_function:
return "set native function";
case ChangeReason::uncommon_trap:
return "uncommon trap";
case ChangeReason::whitebox_deoptimization:
return "whitebox deoptimization";
case ChangeReason::zombie:
return "zombie";
default: {
assert(false, "Unhandled reason");
return "Unknown";
}
}
}
// create nmethod with entry_bci
static nmethod* new_nmethod(const methodHandle& method,
int compile_id,
@@ -712,8 +633,8 @@ public:
// alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if
// another thread performed the transition.
bool make_not_entrant(ChangeReason change_reason);
bool make_not_used() { return make_not_entrant(ChangeReason::not_used); }
bool make_not_entrant(const char* reason);
bool make_not_used() { return make_not_entrant("not used"); }
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
@@ -1026,7 +947,7 @@ public:
// Logging
void log_identity(xmlStream* log) const;
void log_new_nmethod() const;
void log_state_change(ChangeReason change_reason) const;
void log_state_change(const char* reason) const;
// Prints block-level comments, including nmethod specific block labels:
void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;

View File

@@ -924,7 +924,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
// Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
osr_nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_for_compiling_with_C1);
osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
}
compile(mh, bci, CompLevel_simple, THREAD);
}
@@ -1516,7 +1516,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
}
nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_back_branch);
nm->make_not_entrant("OSR invalidation, back branch");
}
}
// Fix up next_level if necessary to avoid deopts

View File

@@ -1750,6 +1750,10 @@ void CompileBroker::wait_for_completion(CompileTask* task) {
}
}
void CompileBroker::wait_for_no_active_tasks() {
CompileTask::wait_for_no_active_tasks();
}
/**
* Initialize compiler thread(s) + compiler object(s). The postcondition
* of this function is that the compiler runtimes are initialized and that

View File

@@ -383,6 +383,9 @@ public:
static bool is_compilation_disabled_forever() {
return _should_compile_new_jobs == shutdown_compilation;
}
static void wait_for_no_active_tasks();
static void handle_full_code_cache(CodeBlobType code_blob_type);
// Ensures that warning is only printed once.
static bool should_print_compiler_warning() {

View File

@@ -37,12 +37,13 @@
#include "runtime/mutexLocker.hpp"
CompileTask* CompileTask::_task_free_list = nullptr;
int CompileTask::_active_tasks = 0;
/**
* Allocate a CompileTask, from the free list if possible.
*/
CompileTask* CompileTask::allocate() {
MutexLocker locker(CompileTaskAlloc_lock);
MonitorLocker locker(CompileTaskAlloc_lock);
CompileTask* task = nullptr;
if (_task_free_list != nullptr) {
@@ -56,6 +57,7 @@ CompileTask* CompileTask::allocate() {
}
assert(task->is_free(), "Task must be free.");
task->set_is_free(false);
_active_tasks++;
return task;
}
@@ -63,7 +65,7 @@ CompileTask* CompileTask::allocate() {
* Add a task to the free list.
*/
void CompileTask::free(CompileTask* task) {
MutexLocker locker(CompileTaskAlloc_lock);
MonitorLocker locker(CompileTaskAlloc_lock);
if (!task->is_free()) {
if ((task->_method_holder != nullptr && JNIHandles::is_weak_global_handle(task->_method_holder))) {
JNIHandles::destroy_weak_global(task->_method_holder);
@@ -79,6 +81,17 @@ void CompileTask::free(CompileTask* task) {
task->set_is_free(true);
task->set_next(_task_free_list);
_task_free_list = task;
_active_tasks--;
if (_active_tasks == 0) {
locker.notify_all();
}
}
}
void CompileTask::wait_for_no_active_tasks() {
MonitorLocker locker(CompileTaskAlloc_lock);
while (_active_tasks > 0) {
locker.wait();
}
}

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -83,6 +83,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
private:
static CompileTask* _task_free_list;
static int _active_tasks;
int _compile_id;
Method* _method;
jobject _method_holder;
@@ -123,6 +124,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
static CompileTask* allocate();
static void free(CompileTask* task);
static void wait_for_no_active_tasks();
int compile_id() const { return _compile_id; }
Method* method() const { return _method; }

View File

@@ -98,15 +98,15 @@ void ParallelArguments::initialize() {
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}
// The alignment used for spaces in young gen and old gen
static size_t default_space_alignment() {
// The alignment used for boundary between young gen and old gen
static size_t default_gen_alignment() {
return 64 * K * HeapWordSize;
}
void ParallelArguments::initialize_alignments() {
// Initialize card size before initializing alignments
CardTable::initialize_card_size();
SpaceAlignment = default_space_alignment();
SpaceAlignment = GenAlignment = default_gen_alignment();
HeapAlignment = compute_heap_alignment();
}
@@ -123,8 +123,9 @@ void ParallelArguments::initialize_heap_flags_and_sizes() {
// Can a page size be something else than a power of two?
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
size_t new_alignment = align_up(page_sz, SpaceAlignment);
if (new_alignment != SpaceAlignment) {
size_t new_alignment = align_up(page_sz, GenAlignment);
if (new_alignment != GenAlignment) {
GenAlignment = new_alignment;
SpaceAlignment = new_alignment;
// Redo everything from the start
initialize_heap_flags_and_sizes_one_pass();

View File

@@ -29,8 +29,10 @@
void ParallelInitLogger::print_heap() {
log_info_p(gc, init)("Alignments:"
" Space " EXACTFMT ","
" Generation " EXACTFMT ","
" Heap " EXACTFMT,
EXACTFMTARGS(SpaceAlignment),
EXACTFMTARGS(GenAlignment),
EXACTFMTARGS(HeapAlignment));
GCInitLogger::print_heap();
}

View File

@@ -69,8 +69,8 @@ jint ParallelScavengeHeap::initialize() {
initialize_reserved_region(heap_rs);
// Layout the reserved space for the generations.
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, SpaceAlignment);
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, GenAlignment);
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment);
assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
PSCardTable* card_table = new PSCardTable(_reserved);
@@ -107,7 +107,7 @@ jint ParallelScavengeHeap::initialize() {
new PSAdaptiveSizePolicy(eden_capacity,
initial_promo_size,
young_gen()->to_space()->capacity_in_bytes(),
SpaceAlignment,
GenAlignment,
max_gc_pause_sec,
GCTimeRatio
);

View File

@@ -41,7 +41,7 @@ PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,
_min_gen_size(min_size),
_max_gen_size(max_size)
{
initialize(rs, initial_size, SpaceAlignment);
initialize(rs, initial_size, GenAlignment);
}
void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment) {

View File

@@ -47,7 +47,7 @@ PSYoungGen::PSYoungGen(ReservedSpace rs, size_t initial_size, size_t min_size, s
_from_counters(nullptr),
_to_counters(nullptr)
{
initialize(rs, initial_size, SpaceAlignment);
initialize(rs, initial_size, GenAlignment);
}
void PSYoungGen::initialize_virtual_space(ReservedSpace rs,
@@ -746,7 +746,7 @@ size_t PSYoungGen::available_to_live() {
}
size_t delta_in_bytes = unused_committed + delta_in_survivor;
delta_in_bytes = align_down(delta_in_bytes, SpaceAlignment);
delta_in_bytes = align_down(delta_in_bytes, GenAlignment);
return delta_in_bytes;
}

View File

@@ -188,8 +188,8 @@ jint SerialHeap::initialize() {
initialize_reserved_region(heap_rs);
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, SpaceAlignment);
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, SpaceAlignment);
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, GenAlignment);
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, GenAlignment);
_rem_set = new CardTableRS(_reserved);
_rem_set->initialize(young_rs.base(), old_rs.base());

View File

@@ -35,7 +35,7 @@ extern size_t SpaceAlignment;
class GCArguments {
protected:
// Initialize HeapAlignment, SpaceAlignment
// Initialize HeapAlignment, SpaceAlignment, and extra alignments (E.g. GenAlignment)
virtual void initialize_alignments() = 0;
virtual void initialize_heap_flags_and_sizes();
virtual void initialize_size_info();

View File

@@ -42,15 +42,17 @@ size_t MaxOldSize = 0;
// See more in JDK-8346005
size_t OldSize = ScaleForWordSize(4*M);
size_t GenAlignment = 0;
size_t GenArguments::conservative_max_heap_alignment() { return (size_t)Generation::GenGrain; }
static size_t young_gen_size_lower_bound() {
// The young generation must be aligned and have room for eden + two survivors
return 3 * SpaceAlignment;
return align_up(3 * SpaceAlignment, GenAlignment);
}
static size_t old_gen_size_lower_bound() {
return SpaceAlignment;
return align_up(SpaceAlignment, GenAlignment);
}
size_t GenArguments::scale_by_NewRatio_aligned(size_t base_size, size_t alignment) {
@@ -67,20 +69,23 @@ static size_t bound_minus_alignment(size_t desired_size,
void GenArguments::initialize_alignments() {
// Initialize card size before initializing alignments
CardTable::initialize_card_size();
SpaceAlignment = (size_t)Generation::GenGrain;
SpaceAlignment = GenAlignment = (size_t)Generation::GenGrain;
HeapAlignment = compute_heap_alignment();
}
void GenArguments::initialize_heap_flags_and_sizes() {
GCArguments::initialize_heap_flags_and_sizes();
assert(SpaceAlignment != 0, "Generation alignment not set up properly");
assert(HeapAlignment >= SpaceAlignment,
"HeapAlignment: %zu less than SpaceAlignment: %zu",
HeapAlignment, SpaceAlignment);
assert(HeapAlignment % SpaceAlignment == 0,
"HeapAlignment: %zu not aligned by SpaceAlignment: %zu",
HeapAlignment, SpaceAlignment);
assert(GenAlignment != 0, "Generation alignment not set up properly");
assert(HeapAlignment >= GenAlignment,
"HeapAlignment: %zu less than GenAlignment: %zu",
HeapAlignment, GenAlignment);
assert(GenAlignment % SpaceAlignment == 0,
"GenAlignment: %zu not aligned by SpaceAlignment: %zu",
GenAlignment, SpaceAlignment);
assert(HeapAlignment % GenAlignment == 0,
"HeapAlignment: %zu not aligned by GenAlignment: %zu",
HeapAlignment, GenAlignment);
// All generational heaps have a young gen; handle those flags here
@@ -101,7 +106,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
// Make sure NewSize allows an old generation to fit even if set on the command line
if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) {
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment);
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment);
log_warning(gc, ergo)("NewSize (%zuk) is equal to or greater than initial heap size (%zuk). A new "
"NewSize of %zuk will be used to accomodate an old generation.",
NewSize/K, InitialHeapSize/K, revised_new_size/K);
@@ -110,8 +115,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
// Now take the actual NewSize into account. We will silently increase NewSize
// if the user specified a smaller or unaligned value.
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, SpaceAlignment);
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, SpaceAlignment));
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, GenAlignment);
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, GenAlignment));
if (bounded_new_size != NewSize) {
FLAG_SET_ERGO(NewSize, bounded_new_size);
}
@@ -120,7 +125,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
if (!FLAG_IS_DEFAULT(MaxNewSize)) {
if (MaxNewSize >= MaxHeapSize) {
// Make sure there is room for an old generation
size_t smaller_max_new_size = MaxHeapSize - SpaceAlignment;
size_t smaller_max_new_size = MaxHeapSize - GenAlignment;
if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("MaxNewSize (%zuk) is equal to or greater than the entire "
"heap (%zuk). A new max generation size of %zuk will be used.",
@@ -132,8 +137,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
}
} else if (MaxNewSize < NewSize) {
FLAG_SET_ERGO(MaxNewSize, NewSize);
} else if (!is_aligned(MaxNewSize, SpaceAlignment)) {
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, SpaceAlignment));
} else if (!is_aligned(MaxNewSize, GenAlignment)) {
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, GenAlignment));
}
}
@@ -161,13 +166,13 @@ void GenArguments::initialize_heap_flags_and_sizes() {
// exceed it. Adjust New/OldSize as necessary.
size_t calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size;
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), SpaceAlignment);
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), GenAlignment);
FLAG_SET_ERGO(NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
// OldSize is already aligned because above we aligned MaxHeapSize to
// HeapAlignment, and we just made sure that NewSize is aligned to
// SpaceAlignment. In initialize_flags() we verified that HeapAlignment
// is a multiple of SpaceAlignment.
// GenAlignment. In initialize_flags() we verified that HeapAlignment
// is a multiple of GenAlignment.
OldSize = MaxHeapSize - NewSize;
} else {
FLAG_SET_ERGO(MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
@@ -195,7 +200,7 @@ void GenArguments::initialize_size_info() {
// Determine maximum size of the young generation.
if (FLAG_IS_DEFAULT(MaxNewSize)) {
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, SpaceAlignment);
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, GenAlignment);
// Bound the maximum size by NewSize below (since it historically
// would have been NewSize and because the NewRatio calculation could
// yield a size that is too small) and bound it by MaxNewSize above.
@@ -224,18 +229,18 @@ void GenArguments::initialize_size_info() {
// If NewSize is set on the command line, we should use it as
// the initial size, but make sure it is within the heap bounds.
initial_young_size =
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment));
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, SpaceAlignment);
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment));
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, GenAlignment);
} else {
// For the case where NewSize is not set on the command line, use
// NewRatio to size the initial generation size. Use the current
// NewSize as the floor, because if NewRatio is overly large, the resulting
// size can be too small.
initial_young_size =
clamp(scale_by_NewRatio_aligned(InitialHeapSize, SpaceAlignment), NewSize, max_young_size);
clamp(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize, max_young_size);
// Derive MinNewSize from MinHeapSize
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, SpaceAlignment), initial_young_size);
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, GenAlignment), initial_young_size);
}
}
@@ -247,7 +252,7 @@ void GenArguments::initialize_size_info() {
// The maximum old size can be determined from the maximum young
// and maximum heap size since no explicit flags exist
// for setting the old generation maximum.
MaxOldSize = MAX2(MaxHeapSize - max_young_size, SpaceAlignment);
MaxOldSize = MAX2(MaxHeapSize - max_young_size, GenAlignment);
MinOldSize = MIN3(MaxOldSize,
InitialHeapSize - initial_young_size,
MinHeapSize - MinNewSize);
@@ -310,10 +315,10 @@ void GenArguments::assert_flags() {
assert(NewSize >= MinNewSize, "Ergonomics decided on a too small young gen size");
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
assert(NewSize % SpaceAlignment == 0, "NewSize alignment");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
assert(NewSize % GenAlignment == 0, "NewSize alignment");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
assert(OldSize % GenAlignment == 0, "OldSize alignment");
}
void GenArguments::assert_size_info() {
@@ -322,19 +327,19 @@ void GenArguments::assert_size_info() {
assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes");
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
assert(MinNewSize % SpaceAlignment == 0, "_min_young_size alignment");
assert(NewSize % SpaceAlignment == 0, "_initial_young_size alignment");
assert(MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, SpaceAlignment),
assert(MinNewSize % GenAlignment == 0, "_min_young_size alignment");
assert(NewSize % GenAlignment == 0, "_initial_young_size alignment");
assert(MaxNewSize % GenAlignment == 0, "MaxNewSize alignment");
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, GenAlignment),
"Ergonomics made minimum young generation larger than minimum heap");
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment),
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment),
"Ergonomics made initial young generation larger than initial heap");
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, SpaceAlignment),
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, GenAlignment),
"Ergonomics made maximum young generation lager than maximum heap");
assert(MinOldSize <= OldSize, "Ergonomics decided on incompatible minimum and initial old gen sizes");
assert(OldSize <= MaxOldSize, "Ergonomics decided on incompatible initial and maximum old gen sizes");
assert(MaxOldSize % SpaceAlignment == 0, "MaxOldSize alignment");
assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
assert(MaxOldSize % GenAlignment == 0, "MaxOldSize alignment");
assert(OldSize % GenAlignment == 0, "OldSize alignment");
assert(MaxHeapSize <= (MaxNewSize + MaxOldSize), "Total maximum heap sizes must be sum of generation maximum sizes");
assert(MinNewSize + MinOldSize <= MinHeapSize, "Minimum generation sizes exceed minimum heap size");
assert(NewSize + OldSize == InitialHeapSize, "Initial generation sizes should match initial heap size");

View File

@@ -35,6 +35,8 @@ extern size_t MaxOldSize;
extern size_t OldSize;
extern size_t GenAlignment;
class GenArguments : public GCArguments {
friend class TestGenCollectorPolicy; // Testing
private:

View File

@@ -625,6 +625,34 @@ void ShenandoahBarrierC2Support::verify(RootNode* root) {
}
#endif
bool ShenandoahBarrierC2Support::is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store,
Node* control) {
return maybe_load->is_Load() && phase->C->can_alias(store->adr_type(), phase->C->get_alias_index(maybe_load->adr_type())) &&
phase->ctrl_or_self(maybe_load) == control;
}
void ShenandoahBarrierC2Support::maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq) {
if (!maybe_store->is_Store() && !maybe_store->is_LoadStore()) {
return;
}
Node* mem = maybe_store->in(MemNode::Memory);
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
Node* u = mem->fast_out(i);
if (is_anti_dependent_load_at_control(phase, u, maybe_store, control)) {
wq.push(u);
}
}
}
void ShenandoahBarrierC2Support::push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl, Unique_Node_List &wq) {
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
wq.push(in);
}
}
}
bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
// That both nodes have the same control is not sufficient to prove
// domination, verify that there's no path from d to n
@@ -639,22 +667,9 @@ bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node*
if (m->is_Phi() && m->in(0)->is_Loop()) {
assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
} else {
if (m->is_Store() || m->is_LoadStore()) {
// Take anti-dependencies into account
Node* mem = m->in(MemNode::Memory);
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
Node* u = mem->fast_out(i);
if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
phase->ctrl_or_self(u) == c) {
wq.push(u);
}
}
}
for (uint i = 0; i < m->req(); i++) {
if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
wq.push(m->in(i));
}
}
// Take anti-dependencies into account
maybe_push_anti_dependent_loads(phase, m, c, wq);
push_data_inputs_at_control(phase, m, c, wq);
}
}
return true;
@@ -1006,7 +1021,20 @@ void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* lo
phase->register_new_node(val, ctrl);
}
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
void ShenandoahBarrierC2Support::collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl, Node* init_raw_mem) {
nodes_above_barrier.clear();
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
nodes_above_barrier.push(init_raw_mem);
}
for (uint next = 0; next < nodes_above_barrier.size(); next++) {
Node* n = nodes_above_barrier.at(next);
// Take anti-dependencies into account
maybe_push_anti_dependent_loads(phase, n, ctrl, nodes_above_barrier);
push_data_inputs_at_control(phase, n, ctrl, nodes_above_barrier);
}
}
void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase) {
Node* ctrl = phase->get_ctrl(barrier);
Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
@@ -1017,30 +1045,17 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
// control will be after the expanded barrier. The raw memory (if
// its memory is control dependent on the barrier's input control)
// must stay above the barrier.
uses_to_ignore.clear();
if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
uses_to_ignore.push(init_raw_mem);
}
for (uint next = 0; next < uses_to_ignore.size(); next++) {
Node *n = uses_to_ignore.at(next);
for (uint i = 0; i < n->req(); i++) {
Node* in = n->in(i);
if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
uses_to_ignore.push(in);
}
}
}
collect_nodes_above_barrier(nodes_above_barrier, phase, ctrl, init_raw_mem);
for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
Node* u = ctrl->fast_out(i);
if (u->_idx < last &&
u != barrier &&
!u->depends_only_on_test() && // preserve dependency on test
!uses_to_ignore.member(u) &&
!nodes_above_barrier.member(u) &&
(u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
(ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
Node* old_c = phase->ctrl_or_self(u);
Node* c = old_c;
if (c != ctrl ||
if (old_c != ctrl ||
is_dominator_same_ctrl(old_c, barrier, u, phase) ||
ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
phase->igvn().rehash_node_delayed(u);
@@ -1315,7 +1330,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
// Expand load-reference-barriers
MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
Unique_Node_List uses_to_ignore;
Unique_Node_List nodes_above_barriers;
for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
uint last = phase->C->unique();
@@ -1410,7 +1425,7 @@ void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
Node* out_val = val_phi;
phase->register_new_node(val_phi, region);
fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
fix_ctrl(lrb, region, fixer, uses, nodes_above_barriers, last, phase);
ctrl = orig_ctrl;

View File

@@ -62,8 +62,12 @@ private:
PhaseIdealLoop* phase, int flags);
static void call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
DecoratorSet decorators, PhaseIdealLoop* phase);
static void collect_nodes_above_barrier(Unique_Node_List &nodes_above_barrier, PhaseIdealLoop* phase, Node* ctrl,
Node* init_raw_mem);
static void test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase);
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase);
static void fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& nodes_above_barrier, uint last, PhaseIdealLoop* phase);
static Node* get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* lrb);
public:
@@ -76,6 +80,11 @@ public:
static bool expand(Compile* C, PhaseIterGVN& igvn);
static void pin_and_expand(PhaseIdealLoop* phase);
static void push_data_inputs_at_control(PhaseIdealLoop* phase, Node* n, Node* ctrl,
Unique_Node_List &wq);
static bool is_anti_dependent_load_at_control(PhaseIdealLoop* phase, Node* maybe_load, Node* store, Node* control);
static void maybe_push_anti_dependent_loads(PhaseIdealLoop* phase, Node* maybe_store, Node* control, Unique_Node_List &wq);
#ifdef ASSERT
static void verify(RootNode* root);
#endif

View File

@@ -415,10 +415,6 @@ void ShenandoahConcurrentGC::entry_reset() {
msg);
op_reset();
}
if (heap->mode()->is_generational()) {
heap->old_generation()->card_scan()->mark_read_table_as_clean();
}
}
void ShenandoahConcurrentGC::entry_scan_remembered_set() {
@@ -644,6 +640,10 @@ void ShenandoahConcurrentGC::op_reset() {
} else {
_generation->prepare_gc();
}
if (heap->mode()->is_generational()) {
heap->old_generation()->card_scan()->mark_read_table_as_clean();
}
}
class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {

View File

@@ -136,9 +136,15 @@ void ShenandoahDegenGC::op_degenerated() {
heap->set_unload_classes(_generation->heuristics()->can_unload_classes() &&
(!heap->mode()->is_generational() || _generation->is_global()));
if (heap->mode()->is_generational() && _generation->is_young()) {
// Swap remembered sets for young
_generation->swap_card_tables();
if (heap->mode()->is_generational()) {
// Clean the read table before swapping it. The end goal here is to have a clean
// write table, and to have the read table updated with the previous write table.
heap->old_generation()->card_scan()->mark_read_table_as_clean();
if (_generation->is_young()) {
// Swap remembered sets for young
_generation->swap_card_tables();
}
}
case _degenerated_roots:

View File

@@ -183,6 +183,29 @@ void ShenandoahGenerationalHeap::stop() {
regulator_thread()->stop();
}
bool ShenandoahGenerationalHeap::requires_barriers(stackChunkOop obj) const {
if (is_idle()) {
return false;
}
if (is_concurrent_young_mark_in_progress() && is_in_young(obj) && !marking_context()->allocated_after_mark_start(obj)) {
// We are marking young, this object is in young, and it is below the TAMS
return true;
}
if (is_in_old(obj)) {
// Card marking barriers are required for objects in the old generation
return true;
}
if (has_forwarded_objects()) {
// Object may have pointers that need to be updated
return true;
}
return false;
}
void ShenandoahGenerationalHeap::evacuate_collection_set(bool concurrent) {
ShenandoahRegionIterator regions;
ShenandoahGenerationalEvacuationTask task(this, &regions, concurrent, false /* only promote regions */);

View File

@@ -128,6 +128,8 @@ public:
void stop() override;
bool requires_barriers(stackChunkOop obj) const override;
// Used for logging the result of a region transfer outside the heap lock
struct TransferResult {
bool success;

Some files were not shown because too many files have changed in this diff Show More