mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-11 11:59:38 +01:00
Compare commits
279 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b5fbdb2166 | ||
|
|
2086b0f070 | ||
|
|
d1510505c1 | ||
|
|
ae49182985 | ||
|
|
37ebecec88 | ||
|
|
08c7c38342 | ||
|
|
fa7521b29e | ||
|
|
fbcf6d9c4f | ||
|
|
a124e6e5c7 | ||
|
|
0779f0d668 | ||
|
|
bd66b6b6f9 | ||
|
|
3edf379b67 | ||
|
|
10d81a337d | ||
|
|
1dbad8058b | ||
|
|
215149310c | ||
|
|
23f2c97f4c | ||
|
|
e84e0cdf62 | ||
|
|
2243974d29 | ||
|
|
79dd575113 | ||
|
|
12a61bce8d | ||
|
|
d9dd2d19b0 | ||
|
|
b21d7b23c1 | ||
|
|
867312a7e5 | ||
|
|
a4b49253e3 | ||
|
|
86fcbe09f8 | ||
|
|
48997f54c9 | ||
|
|
4e3bfc926e | ||
|
|
d0b4f9baab | ||
|
|
cb3c45a698 | ||
|
|
10f71f7dd4 | ||
|
|
a7964453cf | ||
|
|
5230786a0d | ||
|
|
378cd12f6b | ||
|
|
d96476d8bd | ||
|
|
b17a1c092f | ||
|
|
9e22b6dec3 | ||
|
|
fdbc2b24d3 | ||
|
|
31696a445c | ||
|
|
054362abe0 | ||
|
|
9b436d048e | ||
|
|
487c477181 | ||
|
|
d02cb742f7 | ||
|
|
02f240415c | ||
|
|
606df44141 | ||
|
|
33fd6ae986 | ||
|
|
8de5d2014a | ||
|
|
f15d423fb5 | ||
|
|
1a50bd09ef | ||
|
|
880c6b42ba | ||
|
|
c7d2841fb4 | ||
|
|
7ef2831293 | ||
|
|
8f0785325d | ||
|
|
6f690a5b01 | ||
|
|
b351b5f60e | ||
|
|
b3f540d354 | ||
|
|
30894126a4 | ||
|
|
ca9390755b | ||
|
|
60ea17e848 | ||
|
|
7564949a56 | ||
|
|
4c09d9f828 | ||
|
|
f7dbb98fe6 | ||
|
|
9b3694c4fc | ||
|
|
f73922b27d | ||
|
|
5dcb7a627e | ||
|
|
438121be6b | ||
|
|
d7d1afb0a8 | ||
|
|
7acfba288f | ||
|
|
c5c0867881 | ||
|
|
d85b0ca5cd | ||
|
|
326dbb1b13 | ||
|
|
9a8096feb8 | ||
|
|
6882b381e8 | ||
|
|
cbb6747e6b | ||
|
|
789f704d9a | ||
|
|
2c1b311f81 | ||
|
|
765ad0e40b | ||
|
|
8e903eeb1f | ||
|
|
3cbdf8d4d4 | ||
|
|
3944e67366 | ||
|
|
d0052c032c | ||
|
|
4369856c6d | ||
|
|
1f09467230 | ||
|
|
7dbd03388e | ||
|
|
9db7c95019 | ||
|
|
d826127970 | ||
|
|
67b9a08139 | ||
|
|
244f6ac222 | ||
|
|
a706e35b12 | ||
|
|
612b6896d2 | ||
|
|
dce9703155 | ||
|
|
b101dcb609 | ||
|
|
e0bab78640 | ||
|
|
31f70391e5 | ||
|
|
4a1cdd5ba9 | ||
|
|
664c993c41 | ||
|
|
8d3de45f4d | ||
|
|
9ee741d1e5 | ||
|
|
0f4154a9e9 | ||
|
|
64bbae7512 | ||
|
|
c7495fb35d | ||
|
|
454660d361 | ||
|
|
67d6f3ca9e | ||
|
|
ca30726352 | ||
|
|
29e10e4582 | ||
|
|
c7d2a5c1c4 | ||
|
|
d230b30353 | ||
|
|
1512011eaf | ||
|
|
6dac8d6452 | ||
|
|
9686e804a2 | ||
|
|
4de620732f | ||
|
|
1f9e62904c | ||
|
|
27af19d921 | ||
|
|
1c514b34c0 | ||
|
|
d07e530d33 | ||
|
|
f0bffbce35 | ||
|
|
b10158624b | ||
|
|
5ed0d52c84 | ||
|
|
91101f0d4f | ||
|
|
e0ac8249f5 | ||
|
|
d2d78ad18e | ||
|
|
7c83d7ab53 | ||
|
|
75220da26f | ||
|
|
a4c7be862c | ||
|
|
cfe91ed39c | ||
|
|
769b3e48ea | ||
|
|
8338946a6d | ||
|
|
4785461f61 | ||
|
|
24530022d0 | ||
|
|
51b2f80627 | ||
|
|
d29d5bfb7f | ||
|
|
c0ce7d871f | ||
|
|
10eb1cb639 | ||
|
|
2cae9a0397 | ||
|
|
9fd0e7349e | ||
|
|
8aeada105a | ||
|
|
e99f6a65a8 | ||
|
|
e650bdf465 | ||
|
|
e4fbb15c6a | ||
|
|
681137cad2 | ||
|
|
914423e3b7 | ||
|
|
5abc02927b | ||
|
|
d9e7b7e7da | ||
|
|
1e04ee6d57 | ||
|
|
32ee252c45 | ||
|
|
e930bc1fbb | ||
|
|
79a78f032e | ||
|
|
2f2dc2289b | ||
|
|
d481215126 | ||
|
|
7ab74c5f26 | ||
|
|
a7864af08a | ||
|
|
778ad00af8 | ||
|
|
bb4aed944e | ||
|
|
95c8a69b0e | ||
|
|
ba323b515d | ||
|
|
22ef827e2c | ||
|
|
2ab8ab5613 | ||
|
|
1b7d59f171 | ||
|
|
e304a8ae63 | ||
|
|
3634a91050 | ||
|
|
ffb0867e2c | ||
|
|
79f49983d3 | ||
|
|
ec88c6a872 | ||
|
|
b3e29db144 | ||
|
|
11e926cf50 | ||
|
|
44c1845ae7 | ||
|
|
922e312b0a | ||
|
|
1d889e54fc | ||
|
|
32636dcc3d | ||
|
|
707154235b | ||
|
|
4acafb809c | ||
|
|
921860d41d | ||
|
|
4a20691e9b | ||
|
|
f608918df3 | ||
|
|
1b04f6487c | ||
|
|
3cff588a31 | ||
|
|
2b4a4b7bd8 | ||
|
|
286cc163fd | ||
|
|
579cf705ff | ||
|
|
57bfd0e393 | ||
|
|
9a72068ef0 | ||
|
|
66d9bfce29 | ||
|
|
2ea365c945 | ||
|
|
789ac8b276 | ||
|
|
c8eea59f50 | ||
|
|
bc7d9e3d0b | ||
|
|
03b7a8586a | ||
|
|
43a2f17342 | ||
|
|
fed2b56017 | ||
|
|
6cda4c5985 | ||
|
|
c003c1207f | ||
|
|
6d718ae51a | ||
|
|
9b64ece514 | ||
|
|
3d4eb159e6 | ||
|
|
2cca83bc82 | ||
|
|
b8ae11e99b | ||
|
|
9a83dfee14 | ||
|
|
01060ad4ab | ||
|
|
673f767dad | ||
|
|
91ab088d5e | ||
|
|
9ac8d05a25 | ||
|
|
91caec07cb | ||
|
|
da6aa2a86c | ||
|
|
b8f2ec9091 | ||
|
|
0f3e2cc334 | ||
|
|
51ae08f72b | ||
|
|
4754f059f9 | ||
|
|
87a06b6ce4 | ||
|
|
e708d135e3 | ||
|
|
7b52d0acfc | ||
|
|
aa4c83a5bf | ||
|
|
cabe337400 | ||
|
|
2edb6d9813 | ||
|
|
1850914380 | ||
|
|
2f2cf38bb5 | ||
|
|
b5e1615c00 | ||
|
|
86eb5d9f3b | ||
|
|
be1d374bc5 | ||
|
|
ed81a478e1 | ||
|
|
08face8c4c | ||
|
|
793fd72fa6 | ||
|
|
891d5aedf1 | ||
|
|
f3d6fbf52e | ||
|
|
1b8dea4a92 | ||
|
|
0e7ea390bb | ||
|
|
72fbfe18cb | ||
|
|
08891553bb | ||
|
|
7f0ad513c3 | ||
|
|
617edf3f0d | ||
|
|
ffa4badb78 | ||
|
|
a3a367ef5d | ||
|
|
61db2f5b90 | ||
|
|
a083364520 | ||
|
|
16dba04e8d | ||
|
|
4e8deb396e | ||
|
|
08d51003d1 | ||
|
|
97ee2ffb89 | ||
|
|
985b9ce79a | ||
|
|
05f13e75ee | ||
|
|
7bf1989f59 | ||
|
|
253508b03a | ||
|
|
ebc520e83f | ||
|
|
236432dbdb | ||
|
|
b3b33667ad | ||
|
|
f66a586614 | ||
|
|
cd3e4c0366 | ||
|
|
c2cca2ab44 | ||
|
|
6d2aeb82bc | ||
|
|
cfdc64fcb4 | ||
|
|
0c934ff4e2 | ||
|
|
c099f14f07 | ||
|
|
6a35311468 | ||
|
|
f16265d69b | ||
|
|
5a2ba952b1 | ||
|
|
239c1b33b4 | ||
|
|
9b61a7608e | ||
|
|
a71b404785 | ||
|
|
af056c1676 | ||
|
|
da3001daf7 | ||
|
|
424eb60ded | ||
|
|
9b1d6d66b8 | ||
|
|
f8a3e4e428 | ||
|
|
ddd73b4583 | ||
|
|
0a9d1f8c89 | ||
|
|
c9a7b9772d | ||
|
|
7fd9d6c760 | ||
|
|
417d174aa1 | ||
|
|
303ac9f270 | ||
|
|
90758f6735 | ||
|
|
e19a421c30 | ||
|
|
2581935b47 | ||
|
|
b890336e11 | ||
|
|
4e6d851f3f | ||
|
|
612ae9289a | ||
|
|
1ea76d338b | ||
|
|
94af3c23ea | ||
|
|
1e5a2780d9 | ||
|
|
c2180d141c | ||
|
|
2a11e0da02 | ||
|
|
6829d9ac67 |
@@ -9,7 +9,7 @@ warning=issuestitle
|
||||
|
||||
[repository]
|
||||
tags=(?:jdk-(?:[1-9]([0-9]*)(?:\.(?:0|[1-9][0-9]*)){0,4})(?:\+(?:(?:[0-9]+))|(?:-ga)))|(?:jdk[4-9](?:u\d{1,3})?-(?:(?:b\d{2,3})|(?:ga)))|(?:hs\d\d(?:\.\d{1,2})?-b\d\d)
|
||||
branches=
|
||||
branches=.*
|
||||
|
||||
[census]
|
||||
version=0
|
||||
|
||||
@@ -177,10 +177,10 @@ is equivalent to <code>make test TEST="tier1"</code>, but the latter is
|
||||
more tab-completion friendly. For more complex test runs, the
|
||||
<code>test TEST="x"</code> solution needs to be used.</p>
|
||||
<p>The test specifications given in <code>TEST</code> is parsed into
|
||||
fully qualified test descriptors, which clearly and unambigously show
|
||||
fully qualified test descriptors, which clearly and unambiguously show
|
||||
which tests will be run. As an example, <code>:tier1</code> will expand
|
||||
to include all subcomponent test directories that define `tier1`,
|
||||
for example:
|
||||
to include all subcomponent test directories that define
|
||||
<code>tier1</code>, for example:
|
||||
<code>jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1 jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 ...</code>.
|
||||
You can always submit a list of fully qualified test descriptors in the
|
||||
<code>TEST</code> variable if you want to shortcut the parser.</p>
|
||||
@@ -228,7 +228,7 @@ tests.</p></li>
|
||||
These contain, among other things, tests that either run for too long to
|
||||
be at <code>tier1</code>, or may require special configuration, or tests
|
||||
that are less stable, or cover the broader range of non-core JVM and JDK
|
||||
features/components(for example, XML).</p></li>
|
||||
features/components (for example, XML).</p></li>
|
||||
<li><p><code>tier3</code>: This test group includes more stressful
|
||||
tests, the tests for corner cases not covered by previous tiers, plus
|
||||
the tests that require GUIs. As such, this suite should either be run
|
||||
@@ -368,7 +368,7 @@ would give an error, while <code>JTREG_TMIEOUT_FACTOR=8</code> would
|
||||
just pass unnoticed.</p>
|
||||
<p>To separate multiple keyword=value pairs, use <code>;</code>
|
||||
(semicolon). Since the shell normally eats <code>;</code>, the
|
||||
recommended usage is to write the assignment inside qoutes, e.g.
|
||||
recommended usage is to write the assignment inside quotes, e.g.
|
||||
<code>JTREG="...;..."</code>. This will also make sure spaces are
|
||||
preserved, as in
|
||||
<code>JTREG="JAVA_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"</code>.</p>
|
||||
@@ -397,10 +397,8 @@ TEST_OPTS keywords.</p>
|
||||
<p>Applies to JTReg, GTest and Micro.</p>
|
||||
<h4 id="vm_options">VM_OPTIONS</h4>
|
||||
<p>Applies to JTReg, GTest and Micro.</p>
|
||||
<h4 id="aot_modules">AOT_MODULES</h4>
|
||||
<p>Applies to JTReg and GTest.</p>
|
||||
<h4 id="jcov">JCOV</h4>
|
||||
<p>This keywords applies globally to the test runner system. If set to
|
||||
<p>This keyword applies globally to the test runner system. If set to
|
||||
<code>true</code>, it enables JCov coverage reporting for all tests run.
|
||||
To be useful, the JDK under test must be run with a JDK built with JCov
|
||||
instrumentation
|
||||
@@ -500,11 +498,6 @@ options to your test classes, use <code>JAVA_OPTIONS</code>.</p>
|
||||
<h4 id="launcher_options">LAUNCHER_OPTIONS</h4>
|
||||
<p>Additional Java options that are sent to the java launcher that
|
||||
starts the JTReg harness.</p>
|
||||
<h4 id="aot_modules-1">AOT_MODULES</h4>
|
||||
<p>Generate AOT modules before testing for the specified module, or set
|
||||
of modules. If multiple modules are specified, they should be separated
|
||||
by space (or, to help avoid quoting issues, the special value
|
||||
<code>%20</code>).</p>
|
||||
<h4 id="retry_count">RETRY_COUNT</h4>
|
||||
<p>Retry failed tests up to a set number of times, until they pass. This
|
||||
allows to pass the tests with intermittent failures. Defaults to 0.</p>
|
||||
@@ -527,11 +520,6 @@ intermittent problem.</p>
|
||||
<p>Additional options to the Gtest test framework.</p>
|
||||
<p>Use <code>GTEST="OPTIONS=--help"</code> to see all available Gtest
|
||||
options.</p>
|
||||
<h4 id="aot_modules-2">AOT_MODULES</h4>
|
||||
<p>Generate AOT modules before testing for the specified module, or set
|
||||
of modules. If multiple modules are specified, they should be separated
|
||||
by space (or, to help avoid quoting issues, the special value
|
||||
<code>%20</code>).</p>
|
||||
<h3 id="microbenchmark-keywords">Microbenchmark keywords</h3>
|
||||
<h4 id="fork">FORK</h4>
|
||||
<p>Override the number of benchmark forks to spawn. Same as specifying
|
||||
@@ -575,7 +563,7 @@ docker image are required on Ubuntu 18.04 by using
|
||||
<p>If your locale is non-US, some tests are likely to fail. To work
|
||||
around this you can set the locale to US. On Unix platforms simply
|
||||
setting <code>LANG="en_US"</code> in the environment before running
|
||||
tests should work. On Windows or MacOS, setting
|
||||
tests should work. On Windows or macOS, setting
|
||||
<code>JTREG="VM_OPTIONS=-Duser.language=en -Duser.country=US"</code>
|
||||
helps for most, but not all test cases.</p>
|
||||
<p>For example:</p>
|
||||
@@ -610,7 +598,7 @@ provided below.</p>
|
||||
Shortcuts; select or deselect desired shortcut.</p>
|
||||
<p>For example,
|
||||
test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java
|
||||
fails on MacOS because it uses <code>CTRL + F1</code> key sequence to
|
||||
fails on macOS because it uses <code>CTRL + F1</code> key sequence to
|
||||
show or hide tooltip message but the key combination is reserved by the
|
||||
operating system. To run the test correctly the default global key
|
||||
shortcut should be disabled using the steps described above, and then
|
||||
|
||||
@@ -102,8 +102,8 @@ TEST="tier1"`, but the latter is more tab-completion friendly. For more complex
|
||||
test runs, the `test TEST="x"` solution needs to be used.
|
||||
|
||||
The test specifications given in `TEST` is parsed into fully qualified test
|
||||
descriptors, which clearly and unambigously show which tests will be run. As an
|
||||
example, `:tier1` will expand to include all subcomponent test directories
|
||||
descriptors, which clearly and unambiguously show which tests will be run. As
|
||||
an example, `:tier1` will expand to include all subcomponent test directories
|
||||
that define `tier1`, for example: `jtreg:$(TOPDIR)/test/hotspot/jtreg:tier1
|
||||
jtreg:$(TOPDIR)/test/jdk:tier1 jtreg:$(TOPDIR)/test/langtools:tier1 ...`. You
|
||||
can always submit a list of fully qualified test descriptors in the `TEST`
|
||||
@@ -151,7 +151,7 @@ A brief description of the tiered test groups:
|
||||
- `tier2`: This test group covers even more ground. These contain, among other
|
||||
things, tests that either run for too long to be at `tier1`, or may require
|
||||
special configuration, or tests that are less stable, or cover the broader
|
||||
range of non-core JVM and JDK features/components(for example, XML).
|
||||
range of non-core JVM and JDK features/components (for example, XML).
|
||||
|
||||
- `tier3`: This test group includes more stressful tests, the tests for corner
|
||||
cases not covered by previous tiers, plus the tests that require GUIs. As
|
||||
@@ -294,7 +294,7 @@ would just pass unnoticed.
|
||||
|
||||
To separate multiple keyword=value pairs, use `;` (semicolon). Since the shell
|
||||
normally eats `;`, the recommended usage is to write the assignment inside
|
||||
qoutes, e.g. `JTREG="...;..."`. This will also make sure spaces are preserved,
|
||||
quotes, e.g. `JTREG="...;..."`. This will also make sure spaces are preserved,
|
||||
as in `JTREG="JAVA_OPTIONS=-XshowSettings -Xlog:gc+ref=debug"`.
|
||||
|
||||
(Other ways are possible, e.g. using backslash:
|
||||
@@ -334,13 +334,9 @@ Applies to JTReg, GTest and Micro.
|
||||
|
||||
Applies to JTReg, GTest and Micro.
|
||||
|
||||
#### AOT_MODULES
|
||||
|
||||
Applies to JTReg and GTest.
|
||||
|
||||
#### JCOV
|
||||
|
||||
This keywords applies globally to the test runner system. If set to `true`, it
|
||||
This keyword applies globally to the test runner system. If set to `true`, it
|
||||
enables JCov coverage reporting for all tests run. To be useful, the JDK under
|
||||
test must be run with a JDK built with JCov instrumentation (`configure
|
||||
--with-jcov=<path to directory containing lib/jcov.jar>`, `make jcov-image`).
|
||||
@@ -480,12 +476,6 @@ your test classes, use `JAVA_OPTIONS`.
|
||||
Additional Java options that are sent to the java launcher that starts the
|
||||
JTReg harness.
|
||||
|
||||
#### AOT_MODULES
|
||||
|
||||
Generate AOT modules before testing for the specified module, or set of
|
||||
modules. If multiple modules are specified, they should be separated by space
|
||||
(or, to help avoid quoting issues, the special value `%20`).
|
||||
|
||||
#### RETRY_COUNT
|
||||
|
||||
Retry failed tests up to a set number of times, until they pass. This allows to
|
||||
@@ -517,12 +507,6 @@ Additional options to the Gtest test framework.
|
||||
|
||||
Use `GTEST="OPTIONS=--help"` to see all available Gtest options.
|
||||
|
||||
#### AOT_MODULES
|
||||
|
||||
Generate AOT modules before testing for the specified module, or set of
|
||||
modules. If multiple modules are specified, they should be separated by space
|
||||
(or, to help avoid quoting issues, the special value `%20`).
|
||||
|
||||
### Microbenchmark keywords
|
||||
|
||||
#### FORK
|
||||
@@ -587,7 +571,7 @@ $ make test TEST="jtreg:test/hotspot/jtreg/containers/docker" \
|
||||
|
||||
If your locale is non-US, some tests are likely to fail. To work around this
|
||||
you can set the locale to US. On Unix platforms simply setting `LANG="en_US"`
|
||||
in the environment before running tests should work. On Windows or MacOS,
|
||||
in the environment before running tests should work. On Windows or macOS,
|
||||
setting `JTREG="VM_OPTIONS=-Duser.language=en -Duser.country=US"` helps for
|
||||
most, but not all test cases.
|
||||
|
||||
@@ -635,7 +619,7 @@ select or deselect desired shortcut.
|
||||
|
||||
For example,
|
||||
test/jdk/javax/swing/TooltipManager/JMenuItemToolTipKeyBindingsTest/JMenuItemToolTipKeyBindingsTest.java
|
||||
fails on MacOS because it uses `CTRL + F1` key sequence to show or hide tooltip
|
||||
fails on macOS because it uses `CTRL + F1` key sequence to show or hide tooltip
|
||||
message but the key combination is reserved by the operating system. To run the
|
||||
test correctly the default global key shortcut should be disabled using the
|
||||
steps described above, and then deselect "Turn keyboard access on or off"
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
default: all
|
||||
|
||||
include $(SPEC)
|
||||
include MakeBase.gmk
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Concatenate exported.symbols files for modules into a single global file.
|
||||
#
|
||||
|
||||
GLOBAL_SYMBOLS_FILE := $(SUPPORT_OUTPUTDIR)/build-static/exported.symbols
|
||||
|
||||
EXPORTED_SYMBOLS_MODULES := java.base jdk.jdwp.agent
|
||||
|
||||
MODULES_SYMBOLS_FILES := $(foreach module, $(EXPORTED_SYMBOLS_MODULES), \
|
||||
$(SUPPORT_OUTPUTDIR)/modules_libs/$(module)/$(module).symbols)
|
||||
|
||||
$(GLOBAL_SYMBOLS_FILE): $(MODULES_SYMBOLS_FILES)
|
||||
$(call LogInfo, Generating global exported.symbols file)
|
||||
$(call MakeTargetDir)
|
||||
$(CAT) $^ > $@
|
||||
|
||||
TARGETS += $(GLOBAL_SYMBOLS_FILE)
|
||||
|
||||
################################################################################
|
||||
|
||||
all: $(TARGETS)
|
||||
|
||||
.PHONY: default all
|
||||
@@ -501,9 +501,12 @@ else # $(HAS_SPEC)=true
|
||||
# Failure logs are only supported for "parallel" main targets, not the
|
||||
# (trivial) sequential make targets (such as clean and reconfigure),
|
||||
# since the failure-logs directory creation will conflict with clean.
|
||||
# We also make sure the javatmp directory exists, which is needed if a java
|
||||
# process (like javac) is using java.io.tmpdir.
|
||||
define PrepareFailureLogs
|
||||
$(RM) -r $(MAKESUPPORT_OUTPUTDIR)/failure-logs 2> /dev/null && \
|
||||
$(MKDIR) -p $(MAKESUPPORT_OUTPUTDIR)/failure-logs
|
||||
$(MKDIR) -p $(JAVA_TMP_DIR)
|
||||
$(RM) $(MAKESUPPORT_OUTPUTDIR)/exit-with-error 2> /dev/null
|
||||
endef
|
||||
|
||||
|
||||
@@ -96,14 +96,6 @@ $(eval $(call SetupTarget, buildtools-hotspot, \
|
||||
MAKEFILE := CompileToolsHotspot, \
|
||||
))
|
||||
|
||||
################################################################################
|
||||
# Special targets for certain modules
|
||||
|
||||
$(eval $(call SetupTarget, generate-exported-symbols, \
|
||||
MAKEFILE := BuildStatic, \
|
||||
DEPS := java.base-libs jdk.jdwp.agent-libs, \
|
||||
))
|
||||
|
||||
################################################################################
|
||||
# Gensrc targets, generating source before java compilation can be done
|
||||
#
|
||||
@@ -905,10 +897,6 @@ else
|
||||
|
||||
$(LAUNCHER_TARGETS): java.base-libs
|
||||
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
$(LAUNCHER_TARGETS): generate-exported-symbols
|
||||
endif
|
||||
|
||||
# Declare dependency from <module>-java to <module>-gensrc
|
||||
$(foreach m, $(GENSRC_MODULES), $(eval $m-java: $m-gensrc))
|
||||
|
||||
|
||||
@@ -641,11 +641,8 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
JDK_PICFLAG="$PICFLAG"
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# Linking is different on MacOSX
|
||||
JDK_PICFLAG=''
|
||||
if test "x$STATIC_BUILD" = xtrue; then
|
||||
JVM_PICFLAG=""
|
||||
fi
|
||||
# Linking is different on macOS
|
||||
JVM_PICFLAG=""
|
||||
fi
|
||||
|
||||
# Extra flags needed when building optional static versions of certain
|
||||
|
||||
@@ -71,7 +71,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
LDFLAGS_CXX_PARTIAL_LINKING="$MACHINE_FLAG -r"
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xlinux; then
|
||||
BASIC_LDFLAGS="-Wl,--exclude-libs,ALL"
|
||||
BASIC_LDFLAGS="-fuse-ld=lld -Wl,--exclude-libs,ALL"
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
BASIC_LDFLAGS="-Wl,-b64 -Wl,-brtl -Wl,-bnorwexec -Wl,-bnolibpath -Wl,-bnoexpall \
|
||||
|
||||
@@ -267,11 +267,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_DEBUG_SYMBOLS],
|
||||
fi
|
||||
],
|
||||
[
|
||||
if test "x$STATIC_BUILD" = xtrue; then
|
||||
with_native_debug_symbols="none"
|
||||
else
|
||||
with_native_debug_symbols="external"
|
||||
fi
|
||||
with_native_debug_symbols="external"
|
||||
])
|
||||
AC_MSG_RESULT([$with_native_debug_symbols])
|
||||
|
||||
@@ -543,24 +539,7 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_UNDEFINED_BEHAVIOR_SANITIZER],
|
||||
#
|
||||
AC_DEFUN_ONCE([JDKOPT_SETUP_STATIC_BUILD],
|
||||
[
|
||||
UTIL_ARG_ENABLE(NAME: static-build, DEFAULT: false, RESULT: STATIC_BUILD,
|
||||
DESC: [enable static library build],
|
||||
CHECKING_MSG: [if static build is enabled],
|
||||
CHECK_AVAILABLE: [
|
||||
AC_MSG_CHECKING([if static build is available])
|
||||
if test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AVAILABLE=false
|
||||
fi
|
||||
],
|
||||
IF_ENABLED: [
|
||||
STATIC_BUILD_CFLAGS="-DSTATIC_BUILD=1"
|
||||
CFLAGS_JDKLIB_EXTRA="$CFLAGS_JDKLIB_EXTRA $STATIC_BUILD_CFLAGS"
|
||||
CXXFLAGS_JDKLIB_EXTRA="$CXXFLAGS_JDKLIB_EXTRA $STATIC_BUILD_CFLAGS"
|
||||
])
|
||||
AC_SUBST(STATIC_BUILD)
|
||||
UTIL_DEPRECATED_ARG_ENABLE(static-build)
|
||||
])
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -46,7 +46,7 @@ m4_define(jvm_features_valid, m4_normalize( \
|
||||
\
|
||||
cds compiler1 compiler2 dtrace epsilongc g1gc jfr jni-check \
|
||||
jvmci jvmti link-time-opt management minimal opt-size parallelgc \
|
||||
serialgc services shenandoahgc static-build vm-structs zero zgc \
|
||||
serialgc services shenandoahgc vm-structs zero zgc \
|
||||
))
|
||||
|
||||
# Deprecated JVM features (these are ignored, but with a warning)
|
||||
@@ -309,22 +309,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_CHECK_SHENANDOAHGC],
|
||||
])
|
||||
])
|
||||
|
||||
###############################################################################
|
||||
# Check if the feature 'static-build' is available on this platform.
|
||||
#
|
||||
AC_DEFUN_ONCE([JVM_FEATURES_CHECK_STATIC_BUILD],
|
||||
[
|
||||
JVM_FEATURES_CHECK_AVAILABILITY(static-build, [
|
||||
AC_MSG_CHECKING([if static-build is enabled in configure])
|
||||
if test "x$STATIC_BUILD" = "xtrue"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no, use --enable-static-build to enable static build.])
|
||||
AVAILABLE=false
|
||||
fi
|
||||
])
|
||||
])
|
||||
|
||||
###############################################################################
|
||||
# Check if the feature 'zgc' is available on this platform.
|
||||
#
|
||||
@@ -395,7 +379,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_PREPARE_PLATFORM],
|
||||
JVM_FEATURES_CHECK_DTRACE
|
||||
JVM_FEATURES_CHECK_JVMCI
|
||||
JVM_FEATURES_CHECK_SHENANDOAHGC
|
||||
JVM_FEATURES_CHECK_STATIC_BUILD
|
||||
JVM_FEATURES_CHECK_ZGC
|
||||
|
||||
])
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -268,14 +268,16 @@ AC_DEFUN([LIB_SETUP_HSDIS_BINUTILS],
|
||||
disasm_header="\"$BINUTILS_INSTALL_DIR/include/dis-asm.h\""
|
||||
if test -e $BINUTILS_INSTALL_DIR/lib/libbfd.a && \
|
||||
test -e $BINUTILS_INSTALL_DIR/lib/libopcodes.a && \
|
||||
(test -e $BINUTILS_INSTALL_DIR/lib/libiberty.a || test -e $BINUTILS_INSTALL_DIR/lib64/libiberty.a); then
|
||||
(test -e $BINUTILS_INSTALL_DIR/lib/libiberty.a || test -e $BINUTILS_INSTALL_DIR/lib64/libiberty.a || test -e $BINUTILS_INSTALL_DIR/lib32/libiberty.a); then
|
||||
HSDIS_CFLAGS="-DLIBARCH_$OPENJDK_TARGET_CPU_LEGACY_LIB -I$BINUTILS_INSTALL_DIR/include"
|
||||
|
||||
# libiberty ignores --libdir and may be installed in $BINUTILS_INSTALL_DIR/lib or $BINUTILS_INSTALL_DIR/lib64
|
||||
# depending on system setup
|
||||
# libiberty ignores --libdir and may be installed in $BINUTILS_INSTALL_DIR/lib, $BINUTILS_INSTALL_DIR/lib32
|
||||
# or $BINUTILS_INSTALL_DIR/lib64, depending on system setup
|
||||
LIBIBERTY_LIB=""
|
||||
if test -e $BINUTILS_INSTALL_DIR/lib/libiberty.a; then
|
||||
LIBIBERTY_LIB="$BINUTILS_INSTALL_DIR/lib/libiberty.a"
|
||||
elif test -e $BINUTILS_INSTALL_DIR/lib32/libiberty.a; then
|
||||
LIBIBERTY_LIB="$BINUTILS_INSTALL_DIR/lib32/libiberty.a"
|
||||
else
|
||||
LIBIBERTY_LIB="$BINUTILS_INSTALL_DIR/lib64/libiberty.a"
|
||||
fi
|
||||
|
||||
@@ -353,6 +353,8 @@ BUNDLES_OUTPUTDIR = $(OUTPUTDIR)/bundles
|
||||
TESTMAKE_OUTPUTDIR = $(OUTPUTDIR)/test-make
|
||||
MAKESUPPORT_OUTPUTDIR = $(OUTPUTDIR)/make-support
|
||||
|
||||
JAVA_TMP_DIR = $(SUPPORT_OUTPUTDIR)/javatmp
|
||||
|
||||
# This does not get overridden in a bootcycle build
|
||||
CONFIGURESUPPORT_OUTPUTDIR := @CONFIGURESUPPORT_OUTPUTDIR@
|
||||
BUILDJDK_OUTPUTDIR = $(OUTPUTDIR)/buildjdk
|
||||
@@ -630,11 +632,11 @@ SHARED_LIBRARY_SUFFIX := @SHARED_LIBRARY_SUFFIX@
|
||||
STATIC_LIBRARY_SUFFIX := @STATIC_LIBRARY_SUFFIX@
|
||||
EXECUTABLE_SUFFIX := @EXECUTABLE_SUFFIX@
|
||||
OBJ_SUFFIX := @OBJ_SUFFIX@
|
||||
STATIC_BUILD := @STATIC_BUILD@
|
||||
|
||||
STRIPFLAGS := @STRIPFLAGS@
|
||||
|
||||
JAVA_FLAGS := @JAVA_FLAGS@
|
||||
JAVA_FLAGS_TMPDIR := -Djava.io.tmpdir=$(JAVA_TMP_DIR)
|
||||
JAVA_FLAGS := @JAVA_FLAGS@ $(JAVA_FLAGS_TMPDIR)
|
||||
JAVA_FLAGS_BIG := @JAVA_FLAGS_BIG@
|
||||
JAVA_FLAGS_SMALL := @JAVA_FLAGS_SMALL@
|
||||
BUILD_JAVA_FLAGS_SMALL := @BUILD_JAVA_FLAGS_SMALL@
|
||||
|
||||
@@ -178,23 +178,14 @@ AC_DEFUN([TOOLCHAIN_SETUP_FILENAME_PATTERNS],
|
||||
EXECUTABLE_SUFFIX='.exe'
|
||||
else
|
||||
LIBRARY_PREFIX=lib
|
||||
SHARED_LIBRARY_SUFFIX='.so'
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
SHARED_LIBRARY_SUFFIX='.dylib'
|
||||
else
|
||||
SHARED_LIBRARY_SUFFIX='.so'
|
||||
fi
|
||||
STATIC_LIBRARY_SUFFIX='.a'
|
||||
OBJ_SUFFIX='.o'
|
||||
EXECUTABLE_SUFFIX=''
|
||||
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
|
||||
# For full static builds, we're overloading the shared library suffix
|
||||
# in order to limit the amount of changes required.
|
||||
# It would be better to remove SHARED and just use LIBRARY and
|
||||
# LIBRARY_SUFFIX for libraries that can be built either
|
||||
# shared or static and use STATIC_* for libraries that are
|
||||
# always built statically.
|
||||
if test "x$STATIC_BUILD" = xtrue; then
|
||||
SHARED_LIBRARY_SUFFIX='.a'
|
||||
else
|
||||
SHARED_LIBRARY_SUFFIX='.dylib'
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_SUBST(LIBRARY_PREFIX)
|
||||
|
||||
@@ -197,7 +197,7 @@ define SetupJavaCompilationBody
|
||||
|
||||
ifeq ($$($1_COMPILER), bootjdk)
|
||||
# Javac server is not available when using the bootjdk compiler.
|
||||
$1_JAVAC_CMD := $$(JAVAC)
|
||||
$1_JAVAC_CMD := $$(JAVAC) -J$$(JAVA_FLAGS_TMPDIR)
|
||||
|
||||
ifeq ($$($1_SMALL_JAVA), true)
|
||||
$1_FLAGS += $$(addprefix -J, $$(JAVA_FLAGS_SMALL))
|
||||
@@ -211,7 +211,7 @@ define SetupJavaCompilationBody
|
||||
$1_TARGET_RELEASE := $$(TARGET_RELEASE_BOOTJDK)
|
||||
endif
|
||||
else ifeq ($$($1_COMPILER), buildjdk)
|
||||
$1_JAVAC_CMD := $$(BUILD_JAVAC)
|
||||
$1_JAVAC_CMD := $$(BUILD_JAVAC) -J$$(JAVA_FLAGS_TMPDIR)
|
||||
|
||||
ifeq ($$($1_TARGET_RELEASE), )
|
||||
# If unspecified, default to the new jdk we're building
|
||||
|
||||
@@ -194,9 +194,6 @@ define AddJdkLibrary
|
||||
endif
|
||||
|
||||
# Determine if the library in question is static.
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
$1_$2_STATIC_LIBRARY := true
|
||||
endif
|
||||
# Ideally, we should not hardcode these
|
||||
ifeq ($(call isTargetOs, aix)+$$($1_$2_MODULE):$$($1_$2_NAME), true+java.base:jli)
|
||||
$1_$2_STATIC_LIBRARY := true
|
||||
|
||||
@@ -259,14 +259,6 @@ define SetupBasicVariables
|
||||
$1_TYPE := LIBRARY
|
||||
endif
|
||||
|
||||
# If we're doing a static build and producing a library
|
||||
# force it to be a static library and remove the -l libraries
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
ifeq ($$($1_TYPE), LIBRARY)
|
||||
$1_TYPE := STATIC_LIBRARY
|
||||
endif
|
||||
endif
|
||||
|
||||
# STATIC_LIBS is set from Main.gmk when building static versions of certain
|
||||
# native libraries.
|
||||
ifeq ($(STATIC_LIBS), true)
|
||||
|
||||
@@ -108,19 +108,6 @@ define SetupBuildLauncherBody
|
||||
))
|
||||
|
||||
$1_LDFLAGS += -sectcreate __TEXT __info_plist $$($1_PLIST_FILE)
|
||||
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
$1_LDFLAGS += -exported_symbols_list \
|
||||
$(SUPPORT_OUTPUTDIR)/build-static/exported.symbols
|
||||
$1_LIBS += \
|
||||
$$(shell $(FIND) $(SUPPORT_OUTPUTDIR)/modules_libs/java.base -name "*.a") \
|
||||
$(SUPPORT_OUTPUTDIR)/modules_libs/jdk.jdwp.agent/libdt_socket.a \
|
||||
$(SUPPORT_OUTPUTDIR)/modules_libs/jdk.jdwp.agent/libjdwp.a \
|
||||
-framework CoreFoundation \
|
||||
-framework Foundation \
|
||||
-framework SystemConfiguration \
|
||||
-lstdc++ -liconv
|
||||
endif
|
||||
endif
|
||||
|
||||
$1_EXTRA_FILES := $(LAUNCHER_SRC)/main.c
|
||||
|
||||
@@ -114,13 +114,6 @@ define CreateStaticLibrary
|
||||
$(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
|
||||
$$($1_AR) $$(ARFLAGS) -r -cs $$($1_TARGET) \
|
||||
$$($1_AR_OBJ_ARG) $$($1_RES))
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
$(RM) $$(@D)/$$(basename $$(@F)).symbols; \
|
||||
$(ECHO) "Getting symbols from nm"; \
|
||||
$(NM) $(NMFLAGS) -m $$($1_TARGET) | $(GREP) "__TEXT" | \
|
||||
$(EGREP) -v "non-external|private extern|__TEXT,__eh_frame" | \
|
||||
$(SED) -e 's/.* //' > $$(@D)/$$(basename $$(@F)).symbols
|
||||
endif
|
||||
endef
|
||||
|
||||
################################################################################
|
||||
|
||||
8
make/data/hotspot-symbols/version-script-clang.txt
Normal file
8
make/data/hotspot-symbols/version-script-clang.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
SUNWprivate_1.1 {
|
||||
global:
|
||||
*;
|
||||
|
||||
local:
|
||||
_fini;
|
||||
_init;
|
||||
};
|
||||
@@ -47,6 +47,12 @@ JVM_LDFLAGS += \
|
||||
|
||||
JVM_ASFLAGS += $(EXTRA_ASFLAGS)
|
||||
|
||||
JVM_ASFLAGS += \
|
||||
-I$(TOPDIR)/src/hotspot/os_cpu/$(HOTSPOT_TARGET_OS)_$(HOTSPOT_TARGET_CPU_ARCH) \
|
||||
-I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS) \
|
||||
-I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE) \
|
||||
#
|
||||
|
||||
JVM_LIBS += \
|
||||
$(JVM_LIBS_FEATURES) \
|
||||
#
|
||||
@@ -148,7 +154,7 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOs, linux), true)
|
||||
HOTSPOT_VERSION_SCRIPT := $(TOPDIR)/make/data/hotspot-symbols/version-script.txt
|
||||
HOTSPOT_VERSION_SCRIPT := $(TOPDIR)/make/data/hotspot-symbols/version-script-$(TOOLCHAIN_TYPE).txt
|
||||
|
||||
JVM_LDFLAGS += -Wl,-version-script=$(HOTSPOT_VERSION_SCRIPT)
|
||||
endif
|
||||
|
||||
@@ -78,10 +78,6 @@ ifeq ($(call check-jvm-feature, dtrace), true)
|
||||
JVM_CFLAGS_FEATURES += -DDTRACE_ENABLED
|
||||
endif
|
||||
|
||||
ifeq ($(call check-jvm-feature, static-build), true)
|
||||
JVM_CFLAGS_FEATURES += -DSTATIC_BUILD=1
|
||||
endif
|
||||
|
||||
ifneq ($(call check-jvm-feature, jvmti), true)
|
||||
JVM_CFLAGS_FEATURES += -DINCLUDE_JVMTI=0
|
||||
JVM_EXCLUDE_FILES += jvmtiGetLoadedClasses.cpp jvmtiThreadState.cpp jvmtiExtensions.cpp \
|
||||
|
||||
@@ -151,6 +151,17 @@ public class HelloClasslist {
|
||||
|
||||
LOGGER.log(Level.FINE, "New Date: " + newDate + " - old: " + oldDate);
|
||||
|
||||
// Pull SwitchBootstraps and associated classes into the classlist
|
||||
record A(int a) { }
|
||||
record B(int b) { }
|
||||
Object o = new A(4711);
|
||||
int value = switch (o) {
|
||||
case A a -> a.a;
|
||||
case B b -> b.b;
|
||||
default -> 17;
|
||||
};
|
||||
LOGGER.log(Level.FINE, "Value: " + value);
|
||||
|
||||
// The Striped64$Cell is loaded rarely only when there's a contention among
|
||||
// multiple threads performing LongAdder.increment(). This results in
|
||||
// an inconsistency in the classlist between builds (see JDK-8295951).
|
||||
|
||||
@@ -778,7 +778,7 @@ public class FieldGen {
|
||||
result.appendLine("}");
|
||||
|
||||
result.appendLine("@Override");
|
||||
result.appendLine("protected int mult(long[] a, long[] b, long[] r) {");
|
||||
result.appendLine("protected void mult(long[] a, long[] b, long[] r) {");
|
||||
result.incrIndent();
|
||||
for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) {
|
||||
result.appendIndent();
|
||||
@@ -804,9 +804,6 @@ public class FieldGen {
|
||||
}
|
||||
}
|
||||
result.append(");\n");
|
||||
result.appendIndent();
|
||||
result.append("return 0;");
|
||||
result.appendLine();
|
||||
result.decrIndent();
|
||||
result.appendLine("}");
|
||||
|
||||
@@ -836,7 +833,7 @@ public class FieldGen {
|
||||
// }
|
||||
// }
|
||||
result.appendLine("@Override");
|
||||
result.appendLine("protected int square(long[] a, long[] r) {");
|
||||
result.appendLine("protected void square(long[] a, long[] r) {");
|
||||
result.incrIndent();
|
||||
for (int i = 0; i < 2 * params.getNumLimbs() - 1; i++) {
|
||||
result.appendIndent();
|
||||
@@ -877,9 +874,6 @@ public class FieldGen {
|
||||
}
|
||||
}
|
||||
result.append(");\n");
|
||||
result.appendIndent();
|
||||
result.append("return 0;");
|
||||
result.appendLine();
|
||||
result.decrIndent();
|
||||
result.appendLine("}");
|
||||
|
||||
|
||||
@@ -340,6 +340,10 @@ public class CreateSymbols {
|
||||
"Ljdk/internal/javac/PreviewFeature;";
|
||||
private static final String PREVIEW_FEATURE_ANNOTATION_INTERNAL =
|
||||
"Ljdk/internal/PreviewFeature+Annotation;";
|
||||
private static final String RESTRICTED_ANNOTATION =
|
||||
"Ljdk/internal/javac/Restricted;";
|
||||
private static final String RESTRICTED_ANNOTATION_INTERNAL =
|
||||
"Ljdk/internal/javac/Restricted+Annotation;";
|
||||
private static final String VALUE_BASED_ANNOTATION =
|
||||
"Ljdk/internal/ValueBased;";
|
||||
private static final String VALUE_BASED_ANNOTATION_INTERNAL =
|
||||
@@ -349,7 +353,8 @@ public class CreateSymbols {
|
||||
"Lsun/Proprietary+Annotation;",
|
||||
PREVIEW_FEATURE_ANNOTATION_OLD,
|
||||
PREVIEW_FEATURE_ANNOTATION_NEW,
|
||||
VALUE_BASED_ANNOTATION));
|
||||
VALUE_BASED_ANNOTATION,
|
||||
RESTRICTED_ANNOTATION));
|
||||
|
||||
private void stripNonExistentAnnotations(LoadDescriptions data) {
|
||||
Set<String> allClasses = data.classes.name2Class.keySet();
|
||||
@@ -1247,6 +1252,12 @@ public class CreateSymbols {
|
||||
annotationType = VALUE_BASED_ANNOTATION_INTERNAL;
|
||||
}
|
||||
|
||||
if (RESTRICTED_ANNOTATION.equals(annotationType)) {
|
||||
//the non-public Restricted annotation will not be available in ct.sym,
|
||||
//replace with purely synthetic javac-internal annotation:
|
||||
annotationType = RESTRICTED_ANNOTATION_INTERNAL;
|
||||
}
|
||||
|
||||
return new Annotation(null,
|
||||
addString(constantPool, annotationType),
|
||||
createElementPairs(constantPool, values));
|
||||
|
||||
@@ -97,6 +97,7 @@ public interface MessageType {
|
||||
TOKEN("token", "TokenKind", "com.sun.tools.javac.parser.Tokens"),
|
||||
TREE_TAG("tree tag", "Tag", "com.sun.tools.javac.tree.JCTree"),
|
||||
TYPE("type", "Type", "com.sun.tools.javac.code"),
|
||||
ANNOTATED_TYPE("annotated-type", "AnnotatedType", "com.sun.tools.javac.util.JCDiagnostic"),
|
||||
URL("url", "URL", "java.net"),
|
||||
SET("set", "Set", "java.util"),
|
||||
LIST("list", "List", "java.util"),
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
# new warning is added to javac, it can be temporarily added to the
|
||||
# disabled warnings list.
|
||||
#
|
||||
DISABLED_WARNINGS_java += dangling-doc-comments
|
||||
# DISABLED_WARNINGS_java +=
|
||||
|
||||
DOCLINT += -Xdoclint:all/protected \
|
||||
'-Xdoclint/package:java.*,javax.*'
|
||||
|
||||
@@ -107,7 +107,7 @@ ifeq ($(call isTargetOs, macosx), true)
|
||||
TARGETS += $(BUILD_LIBOSXSECURITY)
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOsType, unix)+$(STATIC_BUILD), true+false)
|
||||
ifeq ($(call isTargetOsType, unix), true)
|
||||
##############################################################################
|
||||
## Build libjsig
|
||||
##############################################################################
|
||||
@@ -145,35 +145,6 @@ ifeq ($(call isTargetOsType, unix)+$(STATIC_BUILD), true+false)
|
||||
endif
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
# Create the symbols file for static builds.
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
STATIC_SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/modules_libs/java.base
|
||||
JAVA_BASE_EXPORT_SYMBOLS_SRC := \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)jli.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)java.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)net.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)nio.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)verify.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)zip.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)jimage.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/server/$(LIBRARY_PREFIX)jvm.symbols \
|
||||
#
|
||||
|
||||
JAVA_BASE_EXPORT_SYMBOL_FILE := $(STATIC_SYMBOLS_DIR)/java.base.symbols
|
||||
|
||||
$(JAVA_BASE_EXPORT_SYMBOL_FILE): $(JAVA_BASE_EXPORT_SYMBOLS_SRC)
|
||||
$(call LogInfo, Generating java.base.symbols file)
|
||||
$(CAT) $^ > $@
|
||||
|
||||
# The individual symbol files is generated when the respective lib is built
|
||||
$(JAVA_BASE_EXPORT_SYMBOLS_SRC): $(BUILD_LIBJLI) $(BUILD_LIBJAVA) \
|
||||
$(BUILD_LIBNET) $(BUILD_LIBNIO) $(BUILD_LIBVERIFY) $(BUILD_LIBZIP) \
|
||||
$(BUILD_LIBJIMAGE)
|
||||
|
||||
TARGETS += $(JAVA_BASE_EXPORT_SYMBOL_FILE)
|
||||
endif
|
||||
|
||||
################################################################################
|
||||
## Build libsyslookup
|
||||
################################################################################
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -24,14 +24,17 @@
|
||||
#
|
||||
|
||||
include CopyCommon.gmk
|
||||
include Modules.gmk
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Copy property and template files from share/conf to CONF_DST_DIR
|
||||
#
|
||||
$(eval $(call SetupCopyFiles, COPY_XML_MODULE_CONF, \
|
||||
DEST := $(CONF_DST_DIR), \
|
||||
SRC := $(TOPDIR)/src/java.xml/share/conf, \
|
||||
FILES := jaxp.properties jaxp-strict.properties.template, \
|
||||
))
|
||||
|
||||
XML_LIB_SRC := $(TOPDIR)/src/java.xml/share/conf
|
||||
|
||||
$(CONF_DST_DIR)/jaxp.properties: $(XML_LIB_SRC)/jaxp.properties
|
||||
$(call install-file)
|
||||
|
||||
TARGETS := $(CONF_DST_DIR)/jaxp.properties
|
||||
|
||||
TARGETS += $(COPY_XML_MODULE_CONF)
|
||||
################################################################################
|
||||
|
||||
@@ -68,24 +68,3 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJDWP, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJDWP)
|
||||
|
||||
################################################################################
|
||||
# Setup static build symbols
|
||||
|
||||
ifeq ($(STATIC_BUILD), true)
|
||||
STATIC_SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/modules_libs/jdk.jdwp.agent
|
||||
JDK_JDWP_AGENT_EXPORT_SYMBOLS_SRC := \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)dt_socket.symbols \
|
||||
$(STATIC_SYMBOLS_DIR)/$(LIBRARY_PREFIX)jdwp.symbols
|
||||
|
||||
JDK_JDWP_AGENT_EXPORT_SYMBOL_FILE := $(STATIC_SYMBOLS_DIR)/jdk.jdwp.agent.symbols
|
||||
|
||||
$(JDK_JDWP_AGENT_EXPORT_SYMBOL_FILE): $(JDK_JDWP_AGENT_EXPORT_SYMBOLS_SRC)
|
||||
$(call LogInfo, Generating jdk.jdwp.agent symbols file)
|
||||
$(CAT) $^ > $@
|
||||
|
||||
# The individual symbol files is generated when the respective lib is built
|
||||
$(JDK_JDWP_AGENT_EXPORT_SYMBOLS_SRC): $(BUILD_LIBDT_SOCKET) $(BUILD_LIBJDWP)
|
||||
|
||||
TARGETS += $(JDK_JDWP_AGENT_EXPORT_SYMBOL_FILE)
|
||||
endif
|
||||
|
||||
@@ -81,6 +81,10 @@ IMAGES_TARGETS += $(COPY_FH)
|
||||
# Use JTREG_TEST_OPTS for test VM options
|
||||
# Use JTREG_TESTS for jtreg tests parameter
|
||||
#
|
||||
# Most likely you want to select a specific test from test/failure_handler/test
|
||||
# and manually inspect the results. This target does not actually verify
|
||||
# anything about the failure_handler's output or even if it ran at all.
|
||||
#
|
||||
RUN_DIR := $(FH_SUPPORT)/test
|
||||
|
||||
test:
|
||||
|
||||
@@ -94,7 +94,7 @@ $(eval $(call SetupJavaCompilation, BUILD_JDK_MICROBENCHMARK, \
|
||||
TARGET_RELEASE := $(TARGET_RELEASE_NEWJDK_UPGRADED), \
|
||||
SMALL_JAVA := false, \
|
||||
CLASSPATH := $(JMH_COMPILE_JARS), \
|
||||
DISABLED_WARNINGS := restricted this-escape processing rawtypes cast \
|
||||
DISABLED_WARNINGS := restricted this-escape processing rawtypes removal cast \
|
||||
serial preview dangling-doc-comments, \
|
||||
SRC := $(MICROBENCHMARK_SRC), \
|
||||
BIN := $(MICROBENCHMARK_CLASSES), \
|
||||
|
||||
@@ -1512,7 +1512,7 @@ else
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_LIBS_exeGetCreatedJavaVMs := -lpthread
|
||||
BUILD_HOTSPOT_JTREG_EXECUTABLES_JDK_LIBS_exeGetCreatedJavaVMs := java.base:libjvm
|
||||
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += libNativeException.c
|
||||
BUILD_HOTSPOT_JTREG_EXCLUDE += libNativeException.c exeGetProcessorInfo.c
|
||||
endif
|
||||
|
||||
ifeq ($(ASAN_ENABLED), true)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@@ -67,22 +67,20 @@ import java.io.BufferedOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.BitSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import javax.swing.*;
|
||||
import javax.swing.event.*;
|
||||
import javax.swing.plaf.nimbus.NimbusLookAndFeel;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_16;
|
||||
|
||||
/**
|
||||
* Font2DTest.java
|
||||
*
|
||||
* @author Shinsuke Fukuda
|
||||
* @author Ankit Patel [Conversion to Swing - 01/07/30]
|
||||
*/
|
||||
|
||||
/// Main Font2DTest Class
|
||||
|
||||
public final class Font2DTest extends JPanel
|
||||
implements ActionListener, ItemListener, ChangeListener {
|
||||
|
||||
@@ -95,6 +93,12 @@ public final class Font2DTest extends JPanel
|
||||
|
||||
/// Other menus to set parameters for text drawing
|
||||
private final ChoiceV2 fontMenu;
|
||||
private JPanel fontMenuPanel;
|
||||
private JPanel stylePanel;
|
||||
private LabelV2 fontMenuLabel = null;
|
||||
private LabelV2 styleLabel = null;
|
||||
private ChoiceV2 fontNameMenu;
|
||||
private ChoiceV2 fontSubFamilyMenu;
|
||||
private final JTextField sizeField;
|
||||
private final ChoiceV2 styleMenu;
|
||||
private final ChoiceV2 textMenu;
|
||||
@@ -111,6 +115,9 @@ public final class Font2DTest extends JPanel
|
||||
private CheckboxMenuItemV2 displayGridCBMI;
|
||||
private CheckboxMenuItemV2 force16ColsCBMI;
|
||||
private CheckboxMenuItemV2 showFontInfoCBMI;
|
||||
private JRadioButtonMenuItem familyAndStyleRBMI;
|
||||
private JRadioButtonMenuItem familyAndSubFamilyRBMI;
|
||||
private JRadioButtonMenuItem fontNameRBMI;
|
||||
|
||||
/// JDialog boxes
|
||||
private JDialog userTextDialog;
|
||||
@@ -126,6 +133,7 @@ public final class Font2DTest extends JPanel
|
||||
/// Status bar
|
||||
private final LabelV2 statusBar;
|
||||
|
||||
private String currentFontName = Font.DIALOG;
|
||||
private int[] fontStyles = {Font.PLAIN, Font.BOLD, Font.ITALIC, Font.BOLD | Font.ITALIC};
|
||||
|
||||
/// Text filename
|
||||
@@ -133,6 +141,7 @@ public final class Font2DTest extends JPanel
|
||||
|
||||
// Enabled or disabled status of canDisplay check
|
||||
private static boolean canDisplayCheck = true;
|
||||
private static final Locale l = Locale.getDefault();
|
||||
|
||||
/// Initialize GUI variables and its layouts
|
||||
public Font2DTest( JFrame f) {
|
||||
@@ -143,6 +152,8 @@ public final class Font2DTest extends JPanel
|
||||
statusBar = new LabelV2("");
|
||||
|
||||
fontMenu = new ChoiceV2( this, canDisplayCheck );
|
||||
fontNameMenu = new ChoiceV2( this, false );
|
||||
fontSubFamilyMenu = new ChoiceV2( this, false );
|
||||
sizeField = new JTextField( "12", 3 );
|
||||
sizeField.addActionListener( this );
|
||||
styleMenu = new ChoiceV2( this );
|
||||
@@ -175,6 +186,46 @@ public final class Font2DTest extends JPanel
|
||||
}
|
||||
}
|
||||
|
||||
private void addFontMenuToGBL(String labelText,
|
||||
JComponent menuContainer,
|
||||
GridBagLayout gbl,
|
||||
GridBagConstraints gbc,
|
||||
int leftInset,
|
||||
Container target) {
|
||||
|
||||
fontMenuLabel = new LabelV2(labelText);
|
||||
fontMenuLabel.setLabelFor(menuContainer);
|
||||
GridBagConstraints gbcLabel = (GridBagConstraints) gbc.clone();
|
||||
gbcLabel.insets = new Insets(2, leftInset, 2, 0);
|
||||
gbcLabel.gridwidth = 1;
|
||||
gbcLabel.weightx = 0;
|
||||
gbcLabel.anchor = GridBagConstraints.EAST;
|
||||
gbl.setConstraints(fontMenuLabel, gbcLabel);
|
||||
target.add(fontMenuLabel);
|
||||
gbl.setConstraints(menuContainer, gbc);
|
||||
target.add( menuContainer );
|
||||
}
|
||||
|
||||
private void addStyleMenuToGBL(String labelText,
|
||||
JComponent menuContainer,
|
||||
GridBagLayout gbl,
|
||||
GridBagConstraints gbc,
|
||||
int leftInset,
|
||||
Container target) {
|
||||
|
||||
styleLabel = new LabelV2(labelText);
|
||||
styleLabel.setLabelFor(menuContainer);
|
||||
GridBagConstraints gbcLabel = (GridBagConstraints) gbc.clone();
|
||||
gbcLabel.insets = new Insets(2, leftInset, 2, 0);
|
||||
gbcLabel.gridwidth = 1;
|
||||
gbcLabel.weightx = 0;
|
||||
gbcLabel.anchor = GridBagConstraints.EAST;
|
||||
gbl.setConstraints(styleLabel, gbcLabel);
|
||||
target.add(styleLabel);
|
||||
gbl.setConstraints(menuContainer, gbc);
|
||||
target.add(menuContainer);
|
||||
}
|
||||
|
||||
/// Set up the main interface panel
|
||||
private void setupPanel() {
|
||||
GridBagLayout gbl = new GridBagLayout();
|
||||
@@ -184,43 +235,49 @@ public final class Font2DTest extends JPanel
|
||||
gbc.insets = new Insets( 2, 0, 2, 2 );
|
||||
this.setLayout( gbl );
|
||||
|
||||
addLabeledComponentToGBL( "Font: ", fontMenu, gbl, gbc, this );
|
||||
addLabeledComponentToGBL( "Size: ", sizeField, gbl, gbc, this );
|
||||
fontMenuPanel = new JPanel();
|
||||
fontMenuPanel.setLayout(new GridLayout());
|
||||
fontMenuPanel.add(fontMenu);
|
||||
addFontMenuToGBL(FAMILY_LABEL_TEXT, fontMenuPanel, gbl, gbc, 2, this );
|
||||
|
||||
stylePanel = new JPanel();
|
||||
stylePanel.setLayout(new GridLayout());
|
||||
stylePanel.add(styleMenu);
|
||||
addStyleMenuToGBL(STYLE_LABEL_TEXT, stylePanel, gbl, gbc, 40, this );
|
||||
|
||||
gbc.gridwidth = GridBagConstraints.REMAINDER;
|
||||
addLabeledComponentToGBL( "Font Transform:",
|
||||
transformMenu, gbl, gbc, this );
|
||||
transformMenu, gbl, gbc, 20, this );
|
||||
gbc.gridwidth = 1;
|
||||
|
||||
addLabeledComponentToGBL( "Range: ", rm, gbl, gbc, this );
|
||||
addLabeledComponentToGBL( "Style: ", styleMenu, gbl, gbc, this );
|
||||
addLabeledComponentToGBL( "Range: ", rm, gbl, gbc, 2, this );
|
||||
addLabeledComponentToGBL( "Size: ", sizeField, gbl, gbc, 40, this );
|
||||
gbc.gridwidth = GridBagConstraints.REMAINDER;
|
||||
addLabeledComponentToGBL( "Graphics Transform: ",
|
||||
transformMenuG2, gbl, gbc, this );
|
||||
transformMenuG2, gbl, gbc, 20, this );
|
||||
gbc.gridwidth = 1;
|
||||
|
||||
gbc.anchor = GridBagConstraints.WEST;
|
||||
addLabeledComponentToGBL( "Method: ", methodsMenu, gbl, gbc, this );
|
||||
addLabeledComponentToGBL("", null, gbl, gbc, this);
|
||||
addLabeledComponentToGBL( "Method: ", methodsMenu, gbl, gbc, 2, this );
|
||||
addLabeledComponentToGBL("", null, gbl, gbc, 40, this);
|
||||
gbc.anchor = GridBagConstraints.EAST;
|
||||
gbc.gridwidth = GridBagConstraints.REMAINDER;
|
||||
addLabeledComponentToGBL( "Text to use:", textMenu, gbl, gbc, this );
|
||||
|
||||
gbc.weightx=1;
|
||||
gbc.gridwidth = GridBagConstraints.REMAINDER;
|
||||
addLabeledComponentToGBL( "Text to use:", textMenu, gbl, gbc, 20, this );
|
||||
|
||||
gbc.gridwidth = 1;
|
||||
gbc.fill = GridBagConstraints.HORIZONTAL;
|
||||
gbc.anchor = GridBagConstraints.WEST;
|
||||
addLabeledComponentToGBL("LCD contrast: ",
|
||||
contrastSlider, gbl, gbc, this);
|
||||
contrastSlider, gbl, gbc, 2, this);
|
||||
|
||||
gbc.gridwidth = 1;
|
||||
gbc.fill = GridBagConstraints.NONE;
|
||||
addLabeledComponentToGBL("Antialiasing: ",
|
||||
antiAliasMenu, gbl, gbc, this);
|
||||
antiAliasMenu, gbl, gbc, 40, this);
|
||||
|
||||
gbc.anchor = GridBagConstraints.EAST;
|
||||
gbc.gridwidth = GridBagConstraints.REMAINDER;
|
||||
addLabeledComponentToGBL("Fractional metrics: ",
|
||||
fracMetricsMenu, gbl, gbc, this);
|
||||
fracMetricsMenu, gbl, gbc, 20, this);
|
||||
|
||||
gbc.weightx = 1;
|
||||
gbc.weighty = 1;
|
||||
@@ -241,16 +298,23 @@ public final class Font2DTest extends JPanel
|
||||
JComponent c,
|
||||
GridBagLayout gbl,
|
||||
GridBagConstraints gbc,
|
||||
int leftInset,
|
||||
Container target ) {
|
||||
LabelV2 l = new LabelV2( name );
|
||||
l.setLabelFor(c);
|
||||
GridBagConstraints gbcLabel = (GridBagConstraints) gbc.clone();
|
||||
gbcLabel.insets = new Insets( 2, 2, 2, 0 );
|
||||
if (gbcLabel.gridwidth == GridBagConstraints.REMAINDER) {
|
||||
gbcLabel.gridwidth = GridBagConstraints.RELATIVE;
|
||||
}
|
||||
|
||||
gbcLabel.insets = new Insets( 2, leftInset, 2, 0 );
|
||||
gbcLabel.gridwidth = 1;
|
||||
gbcLabel.weightx = 0;
|
||||
|
||||
if ( c == null )
|
||||
c = new JLabel( "" );
|
||||
|
||||
gbcLabel.anchor = GridBagConstraints.EAST;
|
||||
gbl.setConstraints( l, gbcLabel );
|
||||
target.add( l );
|
||||
gbl.setConstraints( c, gbc );
|
||||
@@ -277,6 +341,21 @@ public final class Font2DTest extends JPanel
|
||||
optionMenu.add( displayGridCBMI );
|
||||
optionMenu.add( force16ColsCBMI );
|
||||
optionMenu.add( showFontInfoCBMI );
|
||||
optionMenu.addSeparator();
|
||||
familyAndStyleRBMI = new JRadioButtonMenuItem("Select font using Family Name and Style");
|
||||
familyAndStyleRBMI.addActionListener(this);
|
||||
familyAndSubFamilyRBMI = new JRadioButtonMenuItem("Select font using Family Name and SubFamily");
|
||||
familyAndSubFamilyRBMI.addActionListener(this);
|
||||
fontNameRBMI = new JRadioButtonMenuItem("Select font using Full Name");
|
||||
fontNameRBMI.addActionListener(this);
|
||||
ButtonGroup bg = new ButtonGroup();
|
||||
bg.add(familyAndStyleRBMI);
|
||||
bg.add(familyAndSubFamilyRBMI);
|
||||
bg.add(fontNameRBMI);
|
||||
familyAndStyleRBMI.setSelected(true);
|
||||
optionMenu.add(familyAndStyleRBMI);
|
||||
optionMenu.add(familyAndSubFamilyRBMI);
|
||||
optionMenu.add(fontNameRBMI);
|
||||
|
||||
JMenuBar mb = parent.getJMenuBar();
|
||||
if ( mb == null )
|
||||
@@ -286,12 +365,17 @@ public final class Font2DTest extends JPanel
|
||||
|
||||
parent.setJMenuBar( mb );
|
||||
|
||||
String[] fontList =
|
||||
GraphicsEnvironment.getLocalGraphicsEnvironment().getAvailableFontFamilyNames();
|
||||
String[] fontList = getAllFamilyNames();
|
||||
for (int i = 0; i < fontList.length; i++ ) {
|
||||
fontMenu.addItem( fontList[i] );
|
||||
}
|
||||
fontMenu.setSelectedItem("Dialog");
|
||||
|
||||
for ( int i = 0; i < fontList.length; i++ )
|
||||
fontMenu.addItem( fontList[i] );
|
||||
fontMenu.setSelectedItem( "Dialog" );
|
||||
fontList = getAllFontNames();
|
||||
for (int i = 0; i < fontList.length; i++ ) {
|
||||
fontNameMenu.addItem( fontList[i] );
|
||||
}
|
||||
fontNameMenu.setSelectedItem("Dialog");
|
||||
|
||||
styleMenu.addItem( "Plain" );
|
||||
styleMenu.addItem( "Bold" );
|
||||
@@ -647,6 +731,10 @@ public final class Font2DTest extends JPanel
|
||||
displayGridCBMI.getState() + "\n" +
|
||||
force16ColsCBMI.getState() + "\n" +
|
||||
showFontInfoCBMI.getState() + "\n" +
|
||||
fontSelectionType + "\n" +
|
||||
(String)fontMenu.getSelectedItem() + "\n" +
|
||||
(String)fontNameMenu.getSelectedItem() + "\n" +
|
||||
(String)fontSubFamilyMenu.getSelectedItem() + "\n" +
|
||||
rm.getSelectedItem() + "\n" +
|
||||
range[0] + "\n" + range[1] + "\n" + curOptions + tFileName);
|
||||
byte[] toBeWritten = completeOptions.getBytes(UTF_16);
|
||||
@@ -724,6 +812,10 @@ public final class Font2DTest extends JPanel
|
||||
boolean displayGridOpt = Boolean.parseBoolean( perLine.nextToken() );
|
||||
boolean force16ColsOpt = Boolean.parseBoolean( perLine.nextToken() );
|
||||
boolean showFontInfoOpt = Boolean.parseBoolean( perLine.nextToken() );
|
||||
int fontSelType = Integer.parseInt( perLine.nextToken() );
|
||||
String fmItem = perLine.nextToken();
|
||||
String fnmItem = perLine.nextToken();
|
||||
String fsmItem = perLine.nextToken();
|
||||
String rangeNameOpt = perLine.nextToken();
|
||||
int rangeStartOpt = Integer.parseInt( perLine.nextToken() );
|
||||
int rangeEndOpt = Integer.parseInt( perLine.nextToken() );
|
||||
@@ -756,7 +848,11 @@ public final class Font2DTest extends JPanel
|
||||
force16ColsCBMI.setState( force16ColsOpt );
|
||||
showFontInfoCBMI.setState( showFontInfoOpt );
|
||||
rm.setSelectedRange( rangeNameOpt, rangeStartOpt, rangeEndOpt );
|
||||
fontMenu.setSelectedItem( fontNameOpt );
|
||||
currentFontName = fontNameOpt;
|
||||
setFontSelectionType(fontSelType);
|
||||
fontMenu.setSelectedItem( fmItem );
|
||||
fontNameMenu.setSelectedItem( fnmItem );
|
||||
fontSubFamilyMenu.setSelectedItem( fsmItem );
|
||||
sizeField.setText( String.valueOf( fontSizeOpt ));
|
||||
styleMenu.setSelectedIndex( fontStyleOpt );
|
||||
transformMenu.setSelectedIndex( fontTransformOpt );
|
||||
@@ -819,6 +915,110 @@ public final class Font2DTest extends JPanel
|
||||
}
|
||||
}
|
||||
|
||||
static final int FAMILY_AND_STYLE = 1;
|
||||
static final int FONT_NAME = 2;
|
||||
static final int FAMILY_AND_SUBFAMILY = 3;
|
||||
static int fontSelectionType = FAMILY_AND_STYLE;
|
||||
|
||||
static final String FAMILY_LABEL_TEXT = "Font Family:";
|
||||
static final String NAME_LABEL_TEXT = "Font Name:";
|
||||
static final String STYLE_LABEL_TEXT = "Style:";
|
||||
static final String SUBFAMILY_LABEL_TEXT = "Subfamily:";
|
||||
|
||||
void setUseFamilyAndStyle() {
|
||||
if (fontSelectionType == FAMILY_AND_STYLE) {
|
||||
return;
|
||||
}
|
||||
fontMenuLabel.setText(FAMILY_LABEL_TEXT);
|
||||
fontMenuPanel.removeAll();
|
||||
fontMenuPanel.add(fontMenu);
|
||||
if (fontSelectionType == FAMILY_AND_SUBFAMILY) {
|
||||
styleLabel.setText(STYLE_LABEL_TEXT);
|
||||
stylePanel.removeAll();
|
||||
stylePanel.add(styleMenu);
|
||||
}
|
||||
fontSelectionType = FAMILY_AND_STYLE;
|
||||
if (!familyAndStyleRBMI.isSelected()) {
|
||||
familyAndStyleRBMI.setSelected(true);
|
||||
}
|
||||
styleMenu.setSelectedIndex(0);
|
||||
currentFontName = (String)fontMenu.getSelectedItem();
|
||||
fp.setFontParams(currentFontName,
|
||||
Float.parseFloat(sizeField.getText()),
|
||||
0, // want to reset style to PLAIN
|
||||
transformMenu.getSelectedIndex());
|
||||
revalidate();
|
||||
repaint();
|
||||
}
|
||||
|
||||
void setUseFontName() {
|
||||
if (fontSelectionType == FONT_NAME) {
|
||||
return;
|
||||
}
|
||||
fontMenuLabel.setText(NAME_LABEL_TEXT);
|
||||
fontMenuPanel.removeAll();
|
||||
fontMenuPanel.add(fontNameMenu);
|
||||
if (fontSelectionType == FAMILY_AND_SUBFAMILY) {
|
||||
styleLabel.setText(STYLE_LABEL_TEXT);
|
||||
stylePanel.removeAll();
|
||||
stylePanel.add(styleMenu);
|
||||
}
|
||||
fontSelectionType = FONT_NAME;
|
||||
if (!fontNameRBMI.isSelected()) {
|
||||
fontNameRBMI.setSelected(true);
|
||||
}
|
||||
styleMenu.setSelectedIndex(0);
|
||||
currentFontName = (String)fontNameMenu.getSelectedItem();
|
||||
fp.setFontParams(currentFontName,
|
||||
Float.parseFloat(sizeField.getText()),
|
||||
0, // want to reset style to PLAIN
|
||||
transformMenu.getSelectedIndex());
|
||||
revalidate();
|
||||
repaint();
|
||||
}
|
||||
|
||||
void setUseFamilyAndSubFamily() {
|
||||
if (fontSelectionType == FAMILY_AND_SUBFAMILY) {
|
||||
return;
|
||||
}
|
||||
fontMenuLabel.setText(FAMILY_LABEL_TEXT);
|
||||
fontMenuPanel.removeAll();
|
||||
fontMenuPanel.add(fontMenu);
|
||||
styleLabel.setText(SUBFAMILY_LABEL_TEXT);
|
||||
stylePanel.removeAll();
|
||||
styleMenu.setSelectedIndex(0);
|
||||
String family = (String)fontMenu.getSelectedItem();
|
||||
updateSubFamilyMenu(family);
|
||||
stylePanel.add(fontSubFamilyMenu);
|
||||
fontSelectionType = FAMILY_AND_SUBFAMILY;
|
||||
if (!familyAndSubFamilyRBMI.isSelected()) {
|
||||
familyAndSubFamilyRBMI.setSelected(true);
|
||||
}
|
||||
String subname = (String)fontSubFamilyMenu.getSelectedItem();
|
||||
Font font = FontFamily.getFont(family, subname);
|
||||
currentFontName = (font != null) ? font.getFontName(l) : family;
|
||||
fp.setFontParams(currentFontName,
|
||||
Float.parseFloat(sizeField.getText()),
|
||||
0, // want to reset style to PLAIN
|
||||
transformMenu.getSelectedIndex());
|
||||
revalidate();
|
||||
repaint();
|
||||
}
|
||||
|
||||
void setFontSelectionType(int fsType) {
|
||||
switch (fsType) {
|
||||
case FAMILY_AND_STYLE :
|
||||
setUseFamilyAndStyle();
|
||||
break;
|
||||
case FONT_NAME :
|
||||
setUseFontName();
|
||||
break;
|
||||
case FAMILY_AND_SUBFAMILY :
|
||||
setUseFamilyAndSubFamily();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/// Interface functions...
|
||||
|
||||
/// ActionListener interface function
|
||||
@@ -830,7 +1030,14 @@ public final class Font2DTest extends JPanel
|
||||
JMenuItem mi = (JMenuItem) source;
|
||||
String itemName = mi.getText();
|
||||
|
||||
if ( itemName.equals( "Save Selected Options..." )) {
|
||||
if (source == familyAndStyleRBMI) {
|
||||
setUseFamilyAndStyle();
|
||||
} else if (source == familyAndSubFamilyRBMI) {
|
||||
setUseFamilyAndSubFamily();
|
||||
} else if (source == fontNameRBMI) {
|
||||
setUseFontName();
|
||||
}
|
||||
else if ( itemName.equals( "Save Selected Options..." )) {
|
||||
String fileName = promptFile( true, "options.txt" );
|
||||
if ( fileName != null )
|
||||
writeCurrentOptions( fileName );
|
||||
@@ -872,11 +1079,12 @@ public final class Font2DTest extends JPanel
|
||||
} catch (Exception se) {
|
||||
sizeField.setText("12");
|
||||
}
|
||||
if ( tf == sizeField )
|
||||
fp.setFontParams( fontMenu.getSelectedItem(),
|
||||
if ( tf == sizeField ) {
|
||||
fp.setFontParams(currentFontName,
|
||||
sz,
|
||||
styleMenu.getSelectedIndex(),
|
||||
transformMenu.getSelectedIndex() );
|
||||
}
|
||||
}
|
||||
|
||||
else if ( source instanceof JButton ) {
|
||||
@@ -901,7 +1109,41 @@ public final class Font2DTest extends JPanel
|
||||
|
||||
/// RangeMenu handles actions by itself and then calls fireRangeChanged,
|
||||
/// so it is not listed or handled here
|
||||
if ( c == fontMenu || c == styleMenu || c == transformMenu ) {
|
||||
if ( c == fontMenu || c == fontNameMenu || c == fontSubFamilyMenu ||
|
||||
c == styleMenu || c == transformMenu )
|
||||
{
|
||||
if (c == fontNameMenu) {
|
||||
currentFontName = (String)fontNameMenu.getSelectedItem();
|
||||
}
|
||||
else if ((c == fontMenu) && (fontSelectionType == FAMILY_AND_STYLE)) {
|
||||
currentFontName = (String)fontMenu.getSelectedItem();
|
||||
}
|
||||
else if ((c == fontMenu) && (fontSelectionType == FAMILY_AND_SUBFAMILY)) {
|
||||
String family = (String)fontMenu.getSelectedItem();
|
||||
updateSubFamilyMenu(family);
|
||||
String subname = (String)fontSubFamilyMenu.getSelectedItem();
|
||||
Font font = FontFamily.getFont(family, subname);
|
||||
if (font == null) return;
|
||||
currentFontName = font.getFontName(l);
|
||||
}
|
||||
else if (c == fontSubFamilyMenu) {
|
||||
/*
|
||||
* When switching families, all items are removed from the sub family list.
|
||||
* This triggers a synchronous recursive ActionEvent on the EDT, which should
|
||||
* be ignored here, the code removes them adds the new items and will then
|
||||
* use the new default selected item.
|
||||
* If we do not return, we'll not find a match and can get an NPE.
|
||||
* This feels unsatisfactory, but it works.
|
||||
*/
|
||||
if (fontSubFamilyMenu.getItemCount() == 0) {
|
||||
return;
|
||||
}
|
||||
String family = (String)fontMenu.getSelectedItem();
|
||||
String subname = (String)fontSubFamilyMenu.getSelectedItem();
|
||||
Font font = FontFamily.getFont(family, subname);
|
||||
if (font == null) return;
|
||||
currentFontName = font.getFontName(l);
|
||||
}
|
||||
float sz = 12f;
|
||||
try {
|
||||
sz = Float.parseFloat(sizeField.getText());
|
||||
@@ -912,7 +1154,7 @@ public final class Font2DTest extends JPanel
|
||||
} catch (Exception se) {
|
||||
sizeField.setText("12");
|
||||
}
|
||||
fp.setFontParams(fontMenu.getSelectedItem(),
|
||||
fp.setFontParams(currentFontName,
|
||||
sz,
|
||||
styleMenu.getSelectedIndex(),
|
||||
transformMenu.getSelectedIndex());
|
||||
@@ -1008,11 +1250,177 @@ public final class Font2DTest extends JPanel
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
static class FontFamily {
|
||||
|
||||
static Map<String, FontFamily> familyMap = new HashMap<>();
|
||||
private static Locale l = Locale.getDefault();
|
||||
private List<Font> fonts = new ArrayList<>();
|
||||
private List<String> subFamilyNames = new ArrayList<>();
|
||||
private Map<String, Font> nameToFontMap = new HashMap<>();
|
||||
private String familyName;
|
||||
|
||||
private FontFamily(String name) {
|
||||
this.familyName = name;
|
||||
}
|
||||
|
||||
String stripFamily(String family, String fullName) {
|
||||
if (family.equals(fullName)) {
|
||||
return "";
|
||||
}
|
||||
char[] familyChars = family.toCharArray();
|
||||
char[] fullChars = fullName.toCharArray();
|
||||
int familyIndex = 0;
|
||||
int fullIndex = 0;
|
||||
// there's probably a clever regexp way to do this
|
||||
// iterate over the chars in the family , if they are the same
|
||||
// keep going, if there's a '-' or ' ', skip it. In the font name,
|
||||
// do the same. If you reach the end of the family without some
|
||||
// other diff, return what's left of the fullName.
|
||||
while (familyIndex < familyChars.length && fullIndex < fullChars.length) {
|
||||
//while (familyIndex < familyChars.length) {
|
||||
if (fullIndex == fullChars.length) {
|
||||
System.err.println("WEIRD FONT " + family + " " + fullName);
|
||||
break;
|
||||
}
|
||||
if (familyChars[familyIndex] == fullChars[fullIndex]) {
|
||||
familyIndex++; fullIndex++;
|
||||
}
|
||||
else if (familyChars[familyIndex] == ' ' && fullChars[fullIndex] == '-') {
|
||||
familyIndex++; fullIndex++;
|
||||
}
|
||||
else if (familyChars[familyIndex] == '-' && fullChars[fullIndex] == ' ') {
|
||||
familyIndex++; fullIndex++;
|
||||
}
|
||||
else if (familyChars[familyIndex] == ' ' || familyChars[familyIndex] == '-') {
|
||||
familyIndex++;
|
||||
}
|
||||
else if (fullChars[fullIndex] == ' ' || fullChars[fullIndex] == '-') {
|
||||
fullIndex++;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (fullIndex == fullChars.length) {
|
||||
return fullName;
|
||||
} else {
|
||||
return fullName.substring(fullIndex);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Getting the string to display here can be an art.
|
||||
* If the family is "Arial Black", then for a regular font, the
|
||||
* full name may be "Arial Black", or "Arial-Black", as reported on macOS.
|
||||
* For this case for the specific font might want to display the
|
||||
* full name, or synthesise "Regular". But to do that we have to
|
||||
* recognise that ' ' to '-' mapping.
|
||||
* For "Arial Black Italic" (Arial-Black-Italic) we want to be able to
|
||||
* trim so we display just "Italic".
|
||||
* Then we need to be able to map the text selection back to the
|
||||
* right font.
|
||||
*/
|
||||
void add(Font f) {
|
||||
String fontName = f.getFontName(l);
|
||||
int flen = familyName.length();
|
||||
int nlen = fontName.length();
|
||||
String sfn;
|
||||
|
||||
if (fontName.equals(familyName)) {
|
||||
sfn = "Regular";
|
||||
}
|
||||
else {
|
||||
sfn = stripFamily(familyName, fontName);
|
||||
sfn = sfn.replace('-', ' ');
|
||||
}
|
||||
|
||||
fonts.add(f);
|
||||
subFamilyNames.add(sfn);
|
||||
nameToFontMap.put(sfn, f);
|
||||
}
|
||||
|
||||
String[] getSubFamilyNames() {
|
||||
return subFamilyNames.stream().sorted().toArray(String[]::new);
|
||||
}
|
||||
|
||||
|
||||
Font getFontForSubFamilyName(String name) {
|
||||
return nameToFontMap.get(name);
|
||||
}
|
||||
|
||||
static FontFamily getFontFamily(String name) {
|
||||
return familyMap.get(name);
|
||||
}
|
||||
|
||||
static FontFamily createFontFamily(String name) {
|
||||
FontFamily f = familyMap.get(name);
|
||||
if (f == null) {
|
||||
f = new FontFamily(name);
|
||||
familyMap.put(name, f);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
/*
|
||||
* familyName must be a name of an existing FontFamily
|
||||
* name, must be a valid "subFamilyName" within that FontFamily
|
||||
* as returned by getSubFamilyNames()
|
||||
*/
|
||||
static Font getFont(String familyName, String subFamilyName) {
|
||||
FontFamily family = getFontFamily(familyName);
|
||||
return family.getFontForSubFamilyName(subFamilyName);
|
||||
}
|
||||
}
|
||||
|
||||
static String[] familyNames;
|
||||
static Font[] allFonts;
|
||||
static List<String> allFontNames;
|
||||
static Map<String, FontFamily> familyMap = new HashMap<>();
|
||||
|
||||
private static void buildFontInfo() {
|
||||
GraphicsEnvironment ge = GraphicsEnvironment.getLocalGraphicsEnvironment();
|
||||
familyNames = ge.getAvailableFontFamilyNames();
|
||||
allFonts = ge.getAllFonts();
|
||||
allFontNames = new ArrayList<String>();
|
||||
Locale l = Locale.getDefault();
|
||||
for (Font f : allFonts) {
|
||||
allFontNames.add(f.getFontName(l));
|
||||
String family = f.getFamily(l);
|
||||
FontFamily ff = FontFamily.getFontFamily(family);
|
||||
if (ff == null) {
|
||||
ff = FontFamily.createFontFamily(family);
|
||||
}
|
||||
ff.add(f);
|
||||
}
|
||||
}
|
||||
|
||||
String getFontNameFor(String family, String subFamily) {
|
||||
return family + " " + subFamily;
|
||||
}
|
||||
|
||||
void updateSubFamilyMenu(String name) {
|
||||
FontFamily family = FontFamily.getFontFamily(name);
|
||||
fontSubFamilyMenu.removeAllItems();
|
||||
|
||||
String [] sfNames = family.getSubFamilyNames();
|
||||
for (int i=0; i<sfNames.length; i++) {
|
||||
fontSubFamilyMenu.addItem(sfNames[i]);
|
||||
}
|
||||
fontSubFamilyMenu.setSelectedIndex(0); // better be at least one !
|
||||
}
|
||||
|
||||
static String[] getAllFontNames() {
|
||||
return allFontNames.stream().sorted().toArray(String[]::new);
|
||||
}
|
||||
|
||||
static String[] getAllFamilyNames() {
|
||||
return familyNames;
|
||||
}
|
||||
|
||||
/// Main function
|
||||
public static void main(String[] argv) {
|
||||
|
||||
if(argv.length > 0) {
|
||||
if(argv[0].equalsIgnoreCase("-disablecandisplaycheck") ||
|
||||
if (argv.length > 0) {
|
||||
if (argv[0].equalsIgnoreCase("-disablecandisplaycheck") ||
|
||||
argv[0].equalsIgnoreCase("-dcdc")) {
|
||||
canDisplayCheck = false;
|
||||
}
|
||||
@@ -1021,17 +1429,23 @@ public final class Font2DTest extends JPanel
|
||||
}
|
||||
}
|
||||
|
||||
UIManager.put("swing.boldMetal", Boolean.FALSE);
|
||||
final JFrame f = new JFrame( "Font2DTest" );
|
||||
final Font2DTest f2dt = new Font2DTest( f);
|
||||
f.addWindowListener( new WindowAdapter() {
|
||||
public void windowOpening( WindowEvent e ) { f2dt.repaint(); }
|
||||
public void windowClosing( WindowEvent e ) { System.exit(0); }
|
||||
});
|
||||
buildFontInfo();
|
||||
try {
|
||||
UIManager.setLookAndFeel(new NimbusLookAndFeel());
|
||||
SwingUtilities.invokeAndWait(() -> {
|
||||
final JFrame f = new JFrame( "Font2DTest" );
|
||||
final Font2DTest f2dt = new Font2DTest( f);
|
||||
f.addWindowListener( new WindowAdapter() {
|
||||
public void windowOpening( WindowEvent e ) { f2dt.repaint(); }
|
||||
public void windowClosing( WindowEvent e ) { System.exit(0); }
|
||||
});
|
||||
|
||||
f.getContentPane().add( f2dt );
|
||||
f.pack();
|
||||
f.setVisible(true);
|
||||
f.getContentPane().add( f2dt );
|
||||
f.pack();
|
||||
f.setVisible(true);
|
||||
});
|
||||
} catch (UnsupportedLookAndFeelException|InterruptedException|InvocationTargetException e) {
|
||||
}
|
||||
}
|
||||
|
||||
/// Inner class definitions...
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
@@ -740,7 +740,8 @@ public final class FontPanel extends JPanel implements AdjustmentListener {
|
||||
verticalBar.setValues( oldValue, numCharDown, 0, totalNumRows );
|
||||
}
|
||||
if ( totalNumRows <= numCharDown && drawStart == 0) {
|
||||
verticalBar.setEnabled( false );
|
||||
// the disabled scroll bar looks odd with Nimbus L&F.
|
||||
verticalBar.setEnabled( true );
|
||||
}
|
||||
else {
|
||||
verticalBar.setEnabled( true );
|
||||
|
||||
@@ -207,7 +207,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ eor(tmp1, store_addr, new_val);
|
||||
__ lsr(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
|
||||
__ cbz(tmp1, done);
|
||||
|
||||
// crosses regions, storing null?
|
||||
|
||||
@@ -269,21 +269,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
// verify_tlab();
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1) {
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
|
||||
__ ldr(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ add(t1, t1, var_size_in_bytes);
|
||||
} else {
|
||||
__ add(t1, t1, con_size_in_bytes);
|
||||
}
|
||||
__ str(t1, Address(rthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
}
|
||||
|
||||
static volatile uint32_t _patching_epoch = 0;
|
||||
|
||||
address BarrierSetAssembler::patching_epoch_addr() {
|
||||
|
||||
@@ -44,11 +44,6 @@ enum class NMethodPatchingType {
|
||||
};
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes, int con_size_in_bytes,
|
||||
Register t1 = noreg);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs) {}
|
||||
|
||||
@@ -109,18 +109,13 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
assert_different_registers(obj, pre_val, tmp1, tmp2);
|
||||
assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ ldrw(tmp1, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ ldrb(tmp1, in_progress);
|
||||
}
|
||||
__ cbzw(tmp1, done);
|
||||
Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ ldrb(tmp1, gc_state);
|
||||
__ tbz(tmp1, ShenandoahHeap::MARKING_BITPOS, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
|
||||
// Default value if probing is not implemented for a certain platform
|
||||
// Max address bit is restricted by implicit assumptions in the code, for instance
|
||||
// the bit layout of XForwardingEntry or Partial array entry (see XMarkStackEntry) in mark stack
|
||||
// the bit layout of ZForwardingEntry or Partial array entry (see ZMarkStackEntry) in mark stack
|
||||
static const size_t DEFAULT_MAX_ADDRESS_BIT = 46;
|
||||
// Minimum value returned, if probing fail
|
||||
static const size_t MINIMUM_MAX_ADDRESS_BIT = 36;
|
||||
|
||||
@@ -1141,6 +1141,7 @@ public:
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
BLOCK_COMMENT("ZLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
@@ -1159,6 +1160,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
BLOCK_COMMENT("ZStoreBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
|
||||
@@ -3879,7 +3879,7 @@ void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
|
||||
* r2: y
|
||||
* r3: ylen
|
||||
* r4: z
|
||||
* r5: zlen
|
||||
* r5: tmp0
|
||||
* r10: tmp1
|
||||
* r11: tmp2
|
||||
* r12: tmp3
|
||||
@@ -3890,11 +3890,11 @@ void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
|
||||
*
|
||||
*/
|
||||
void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
|
||||
Register z, Register zlen,
|
||||
Register z, Register tmp0,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6, Register product_hi) {
|
||||
|
||||
assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
|
||||
assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi);
|
||||
|
||||
const Register idx = tmp1;
|
||||
const Register kdx = tmp2;
|
||||
@@ -3903,7 +3903,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
const Register y_idx = tmp4;
|
||||
const Register carry = tmp5;
|
||||
const Register product = xlen;
|
||||
const Register x_xstart = zlen; // reuse register
|
||||
const Register x_xstart = tmp0;
|
||||
|
||||
// First Loop.
|
||||
//
|
||||
@@ -3919,9 +3919,9 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
// z[xstart] = (int)carry;
|
||||
//
|
||||
|
||||
movw(idx, ylen); // idx = ylen;
|
||||
movw(kdx, zlen); // kdx = xlen+ylen;
|
||||
mov(carry, zr); // carry = 0;
|
||||
movw(idx, ylen); // idx = ylen;
|
||||
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
|
||||
mov(carry, zr); // carry = 0;
|
||||
|
||||
Label L_done;
|
||||
|
||||
|
||||
@@ -1510,7 +1510,7 @@ public:
|
||||
void ghash_load_wide(int index, Register data, FloatRegister result, FloatRegister state);
|
||||
public:
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z,
|
||||
Register zlen, Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp0, Register tmp1, Register tmp2, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register tmp6, Register tmp7);
|
||||
void mul_add(Register out, Register in, Register offs, Register len, Register k);
|
||||
void ghash_multiply(FloatRegister result_lo, FloatRegister result_hi,
|
||||
|
||||
@@ -50,113 +50,6 @@ void NativeInstruction::wrote(int offset) {
|
||||
ICache::invalidate_word(addr_at(offset));
|
||||
}
|
||||
|
||||
void NativeLoadGot::report_and_fail() const {
|
||||
tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
|
||||
fatal("not a indirect rip mov to rbx");
|
||||
}
|
||||
|
||||
void NativeLoadGot::verify() const {
|
||||
assert(is_adrp_at((address)this), "must be adrp");
|
||||
}
|
||||
|
||||
address NativeLoadGot::got_address() const {
|
||||
return MacroAssembler::target_addr_for_insn((address)this);
|
||||
}
|
||||
|
||||
intptr_t NativeLoadGot::data() const {
|
||||
return *(intptr_t *) got_address();
|
||||
}
|
||||
|
||||
address NativePltCall::destination() const {
|
||||
NativeGotJump* jump = nativeGotJump_at(plt_jump());
|
||||
return *(address*)MacroAssembler::target_addr_for_insn((address)jump);
|
||||
}
|
||||
|
||||
address NativePltCall::plt_entry() const {
|
||||
return MacroAssembler::target_addr_for_insn((address)this);
|
||||
}
|
||||
|
||||
address NativePltCall::plt_jump() const {
|
||||
address entry = plt_entry();
|
||||
// Virtual PLT code has move instruction first
|
||||
if (((NativeGotJump*)entry)->is_GotJump()) {
|
||||
return entry;
|
||||
} else {
|
||||
return nativeLoadGot_at(entry)->next_instruction_address();
|
||||
}
|
||||
}
|
||||
|
||||
address NativePltCall::plt_load_got() const {
|
||||
address entry = plt_entry();
|
||||
if (!((NativeGotJump*)entry)->is_GotJump()) {
|
||||
// Virtual PLT code has move instruction first
|
||||
return entry;
|
||||
} else {
|
||||
// Static PLT code has move instruction second (from c2i stub)
|
||||
return nativeGotJump_at(entry)->next_instruction_address();
|
||||
}
|
||||
}
|
||||
|
||||
address NativePltCall::plt_c2i_stub() const {
|
||||
address entry = plt_load_got();
|
||||
// This method should be called only for static calls which has C2I stub.
|
||||
NativeLoadGot* load = nativeLoadGot_at(entry);
|
||||
return entry;
|
||||
}
|
||||
|
||||
address NativePltCall::plt_resolve_call() const {
|
||||
NativeGotJump* jump = nativeGotJump_at(plt_jump());
|
||||
address entry = jump->next_instruction_address();
|
||||
if (((NativeGotJump*)entry)->is_GotJump()) {
|
||||
return entry;
|
||||
} else {
|
||||
// c2i stub 2 instructions
|
||||
entry = nativeLoadGot_at(entry)->next_instruction_address();
|
||||
return nativeGotJump_at(entry)->next_instruction_address();
|
||||
}
|
||||
}
|
||||
|
||||
void NativePltCall::reset_to_plt_resolve_call() {
|
||||
set_destination_mt_safe(plt_resolve_call());
|
||||
}
|
||||
|
||||
void NativePltCall::set_destination_mt_safe(address dest) {
|
||||
// rewriting the value in the GOT, it should always be aligned
|
||||
NativeGotJump* jump = nativeGotJump_at(plt_jump());
|
||||
address* got = (address *) jump->got_address();
|
||||
*got = dest;
|
||||
}
|
||||
|
||||
void NativePltCall::set_stub_to_clean() {
|
||||
NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
|
||||
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
|
||||
method_loader->set_data(0);
|
||||
jump->set_jump_destination((address)-1);
|
||||
}
|
||||
|
||||
void NativePltCall::verify() const {
|
||||
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
|
||||
}
|
||||
|
||||
address NativeGotJump::got_address() const {
|
||||
return MacroAssembler::target_addr_for_insn((address)this);
|
||||
}
|
||||
|
||||
address NativeGotJump::destination() const {
|
||||
address *got_entry = (address *) got_address();
|
||||
return *got_entry;
|
||||
}
|
||||
|
||||
bool NativeGotJump::is_GotJump() const {
|
||||
NativeInstruction *insn =
|
||||
nativeInstruction_at(addr_at(3 * NativeInstruction::instruction_size));
|
||||
return insn->encoding() == 0xd61f0200; // br x16
|
||||
}
|
||||
|
||||
void NativeGotJump::verify() const {
|
||||
assert(is_adrp_at((address)this), "must be adrp");
|
||||
}
|
||||
|
||||
address NativeCall::destination() const {
|
||||
address addr = (address)this;
|
||||
address destination = instruction_address() + displacement();
|
||||
|
||||
@@ -39,16 +39,15 @@
|
||||
// - NativeInstruction
|
||||
// - - NativeCall
|
||||
// - - NativeMovConstReg
|
||||
// - - NativeMovConstRegPatching
|
||||
// - - NativeMovRegMem
|
||||
// - - NativeMovRegMemPatching
|
||||
// - - NativeJump
|
||||
// - - NativeIllegalOpCode
|
||||
// - - NativeGeneralJump
|
||||
// - - NativeReturn
|
||||
// - - NativeReturnX (return with argument)
|
||||
// - - NativePushConst
|
||||
// - - NativeTstRegMem
|
||||
// - - - NativeGeneralJump
|
||||
// - - NativeIllegalInstruction
|
||||
// - - NativeCallTrampolineStub
|
||||
// - - NativeMembar
|
||||
// - - NativeLdSt
|
||||
// - - NativePostCallNop
|
||||
// - - NativeDeoptInstruction
|
||||
|
||||
// The base class for different kinds of native instruction abstractions.
|
||||
// Provides the primitive operations to manipulate code relative to this.
|
||||
@@ -155,44 +154,6 @@ inline NativeInstruction* nativeInstruction_at(uint32_t* address) {
|
||||
return (NativeInstruction*)address;
|
||||
}
|
||||
|
||||
class NativePltCall: public NativeInstruction {
|
||||
public:
|
||||
enum Arm_specific_constants {
|
||||
instruction_size = 4,
|
||||
instruction_offset = 0,
|
||||
displacement_offset = 1,
|
||||
return_address_offset = 4
|
||||
};
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address next_instruction_address() const { return addr_at(return_address_offset); }
|
||||
address displacement_address() const { return addr_at(displacement_offset); }
|
||||
int displacement() const { return (jint) int_at(displacement_offset); }
|
||||
address return_address() const { return addr_at(return_address_offset); }
|
||||
address destination() const;
|
||||
address plt_entry() const;
|
||||
address plt_jump() const;
|
||||
address plt_load_got() const;
|
||||
address plt_resolve_call() const;
|
||||
address plt_c2i_stub() const;
|
||||
void set_stub_to_clean();
|
||||
|
||||
void reset_to_plt_resolve_call();
|
||||
void set_destination_mt_safe(address dest);
|
||||
|
||||
void verify() const;
|
||||
};
|
||||
|
||||
inline NativePltCall* nativePltCall_at(address address) {
|
||||
NativePltCall* call = (NativePltCall*)address;
|
||||
DEBUG_ONLY(call->verify());
|
||||
return call;
|
||||
}
|
||||
|
||||
inline NativePltCall* nativePltCall_before(address addr) {
|
||||
address at = addr - NativePltCall::instruction_size;
|
||||
return nativePltCall_at(at);
|
||||
}
|
||||
|
||||
inline NativeCall* nativeCall_at(address address);
|
||||
// The NativeCall is an abstraction for accessing/manipulating native
|
||||
// call instructions (used to manipulate inline caches, primitive &
|
||||
@@ -326,15 +287,6 @@ inline NativeMovConstReg* nativeMovConstReg_before(address address) {
|
||||
return test;
|
||||
}
|
||||
|
||||
class NativeMovConstRegPatching: public NativeMovConstReg {
|
||||
private:
|
||||
friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
|
||||
NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
};
|
||||
|
||||
// An interface for accessing/manipulating native moves of the form:
|
||||
// mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
|
||||
// mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
|
||||
@@ -387,60 +339,6 @@ inline NativeMovRegMem* nativeMovRegMem_at(address address) {
|
||||
return test;
|
||||
}
|
||||
|
||||
class NativeMovRegMemPatching: public NativeMovRegMem {
|
||||
private:
|
||||
friend NativeMovRegMemPatching* nativeMovRegMemPatching_at(address address) {
|
||||
Unimplemented();
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
// An interface for accessing/manipulating native leal instruction of form:
|
||||
// leal reg, [reg + offset]
|
||||
|
||||
class NativeLoadAddress: public NativeInstruction {
|
||||
enum AArch64_specific_constants {
|
||||
instruction_size = 4,
|
||||
instruction_offset = 0,
|
||||
data_offset = 0,
|
||||
next_instruction_offset = 4
|
||||
};
|
||||
|
||||
public:
|
||||
void verify();
|
||||
};
|
||||
|
||||
// adrp x16, #page
|
||||
// add x16, x16, #offset
|
||||
// ldr x16, [x16]
|
||||
class NativeLoadGot: public NativeInstruction {
|
||||
public:
|
||||
enum AArch64_specific_constants {
|
||||
instruction_length = 4 * NativeInstruction::instruction_size,
|
||||
offset_offset = 0,
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
address return_address() const { return addr_at(instruction_length); }
|
||||
address got_address() const;
|
||||
address next_instruction_address() const { return return_address(); }
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t data) {
|
||||
intptr_t* addr = (intptr_t*)got_address();
|
||||
*addr = data;
|
||||
}
|
||||
|
||||
void verify() const;
|
||||
private:
|
||||
void report_and_fail() const;
|
||||
};
|
||||
|
||||
inline NativeLoadGot* nativeLoadGot_at(address addr) {
|
||||
NativeLoadGot* load = (NativeLoadGot*)addr;
|
||||
DEBUG_ONLY(load->verify());
|
||||
return load;
|
||||
}
|
||||
|
||||
class NativeJump: public NativeInstruction {
|
||||
public:
|
||||
enum AArch64_specific_constants {
|
||||
@@ -496,60 +394,12 @@ inline NativeGeneralJump* nativeGeneralJump_at(address address) {
|
||||
return jump;
|
||||
}
|
||||
|
||||
class NativeGotJump: public NativeInstruction {
|
||||
public:
|
||||
enum AArch64_specific_constants {
|
||||
instruction_size = 4 * NativeInstruction::instruction_size,
|
||||
};
|
||||
|
||||
void verify() const;
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
address destination() const;
|
||||
address return_address() const { return addr_at(instruction_size); }
|
||||
address got_address() const;
|
||||
address next_instruction_address() const { return addr_at(instruction_size); }
|
||||
bool is_GotJump() const;
|
||||
|
||||
void set_jump_destination(address dest) {
|
||||
address* got = (address*)got_address();
|
||||
*got = dest;
|
||||
}
|
||||
};
|
||||
|
||||
inline NativeGotJump* nativeGotJump_at(address addr) {
|
||||
NativeGotJump* jump = (NativeGotJump*)(addr);
|
||||
DEBUG_ONLY(jump->verify());
|
||||
return jump;
|
||||
}
|
||||
|
||||
class NativePopReg : public NativeInstruction {
|
||||
public:
|
||||
// Insert a pop instruction
|
||||
static void insert(address code_pos, Register reg);
|
||||
};
|
||||
|
||||
|
||||
class NativeIllegalInstruction: public NativeInstruction {
|
||||
public:
|
||||
// Insert illegal opcode as specific address
|
||||
static void insert(address code_pos);
|
||||
};
|
||||
|
||||
// return instruction that does not pop values of the stack
|
||||
class NativeReturn: public NativeInstruction {
|
||||
public:
|
||||
};
|
||||
|
||||
// return instruction that does pop values of the stack
|
||||
class NativeReturnX: public NativeInstruction {
|
||||
public:
|
||||
};
|
||||
|
||||
// Simple test vs memory
|
||||
class NativeTstRegMem: public NativeInstruction {
|
||||
public:
|
||||
};
|
||||
|
||||
inline bool NativeInstruction::is_nop() const{
|
||||
uint32_t insn = *(uint32_t*)addr_at(0);
|
||||
return insn == 0xd503201f;
|
||||
|
||||
@@ -389,14 +389,14 @@ typedef AbstractRegSet<PRegister> PRegSet;
|
||||
|
||||
template <>
|
||||
inline Register AbstractRegSet<Register>::first() {
|
||||
uint32_t first = _bitset & -_bitset;
|
||||
return first ? as_Register(exact_log2(first)) : noreg;
|
||||
if (_bitset == 0) { return noreg; }
|
||||
return as_Register(count_trailing_zeros(_bitset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline FloatRegister AbstractRegSet<FloatRegister>::first() {
|
||||
uint32_t first = _bitset & -_bitset;
|
||||
return first ? as_FloatRegister(exact_log2(first)) : fnoreg;
|
||||
if (_bitset == 0) { return fnoreg; }
|
||||
return as_FloatRegister(count_trailing_zeros(_bitset));
|
||||
}
|
||||
|
||||
inline Register as_Register(FloatRegister reg) {
|
||||
|
||||
@@ -4645,7 +4645,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
* c_rarg2 - y address
|
||||
* c_rarg3 - y length
|
||||
* c_rarg4 - z address
|
||||
* c_rarg5 - z length
|
||||
*/
|
||||
address generate_multiplyToLen() {
|
||||
__ align(CodeEntryAlignment);
|
||||
@@ -4657,8 +4656,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register y = r2;
|
||||
const Register ylen = r3;
|
||||
const Register z = r4;
|
||||
const Register zlen = r5;
|
||||
|
||||
const Register tmp0 = r5;
|
||||
const Register tmp1 = r10;
|
||||
const Register tmp2 = r11;
|
||||
const Register tmp3 = r12;
|
||||
@@ -4669,7 +4668,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret(lr);
|
||||
|
||||
@@ -4687,10 +4686,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register x = r0;
|
||||
const Register xlen = r1;
|
||||
const Register z = r2;
|
||||
const Register zlen = r3;
|
||||
const Register y = r4; // == x
|
||||
const Register ylen = r5; // == xlen
|
||||
|
||||
const Register tmp0 = r3;
|
||||
const Register tmp1 = r10;
|
||||
const Register tmp2 = r11;
|
||||
const Register tmp3 = r12;
|
||||
@@ -4705,7 +4704,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ push(spilled_regs, sp);
|
||||
__ mov(y, x);
|
||||
__ mov(ylen, xlen);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ pop(spilled_regs, sp);
|
||||
__ leave();
|
||||
__ ret(lr);
|
||||
@@ -8465,6 +8464,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
#endif // LINUX
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (UseSecondarySupersTable) {
|
||||
StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
if (! InlineSecondarySupersTest) {
|
||||
@@ -8474,6 +8474,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
|
||||
|
||||
|
||||
@@ -207,7 +207,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ eor(tmp1, store_addr, new_val);
|
||||
__ movs(tmp1, AsmOperand(tmp1, lsr, HeapRegion::LogOfHRGrainBytes));
|
||||
__ movs(tmp1, AsmOperand(tmp1, lsr, G1HeapRegion::LogOfHRGrainBytes));
|
||||
__ b(done, eq);
|
||||
|
||||
// crosses regions, storing null?
|
||||
|
||||
@@ -159,46 +159,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, Regi
|
||||
__ str(obj_end, Address(Rthread, JavaThread::tlab_top_offset()));
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, RegisterOrConstant size_in_bytes, Register tmp) {
|
||||
// Bump total bytes allocated by this thread
|
||||
Label done;
|
||||
|
||||
// Borrow the Rthread for alloc counter
|
||||
Register Ralloc = Rthread;
|
||||
__ add(Ralloc, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
|
||||
__ ldr(tmp, Address(Ralloc));
|
||||
__ adds(tmp, tmp, size_in_bytes);
|
||||
__ str(tmp, Address(Ralloc), cc);
|
||||
__ b(done, cc);
|
||||
|
||||
// Increment the high word and store single-copy atomically (that is an unlikely scenario on typical embedded systems as it means >4GB has been allocated)
|
||||
// To do so ldrd/strd instructions used which require an even-odd pair of registers. Such a request could be difficult to satisfy by
|
||||
// allocating those registers on a higher level, therefore the routine is ready to allocate a pair itself.
|
||||
Register low, high;
|
||||
// Select ether R0/R1 or R2/R3
|
||||
|
||||
if (size_in_bytes.is_register() && (size_in_bytes.as_register() == R0 || size_in_bytes.as_register() == R1)) {
|
||||
low = R2;
|
||||
high = R3;
|
||||
} else {
|
||||
low = R0;
|
||||
high = R1;
|
||||
}
|
||||
__ push(RegisterSet(low, high));
|
||||
|
||||
__ ldrd(low, Address(Ralloc));
|
||||
__ adds(low, low, size_in_bytes);
|
||||
__ adc(high, high, 0);
|
||||
__ strd(low, Address(Ralloc));
|
||||
|
||||
__ pop(RegisterSet(low, high));
|
||||
|
||||
__ bind(done);
|
||||
|
||||
// Unborrow the Rthread
|
||||
__ sub(Rthread, Ralloc, in_bytes(JavaThread::allocated_bytes_offset()));
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
@@ -40,12 +40,6 @@ enum class NMethodPatchingType {
|
||||
};
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm,
|
||||
RegisterOrConstant size_in_bytes,
|
||||
Register tmp
|
||||
);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register addr, Register count, int callee_saved_regs) {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -78,9 +78,9 @@ int Assembler::branch_destination(int inst, int pos) {
|
||||
|
||||
// Low-level andi-one-instruction-macro.
|
||||
void Assembler::andi(Register a, Register s, const long ui16) {
|
||||
if (is_power_of_2(((jlong) ui16)+1)) {
|
||||
if (is_power_of_2(((unsigned long) ui16)+1)) {
|
||||
// pow2minus1
|
||||
clrldi(a, s, 64 - log2i_exact((((jlong) ui16)+1)));
|
||||
clrldi(a, s, 64 - log2i_exact((((unsigned long) ui16)+1)));
|
||||
} else if (is_power_of_2((jlong) ui16)) {
|
||||
// pow2
|
||||
rlwinm(a, s, 0, 31 - log2i_exact((jlong) ui16), 31 - log2i_exact((jlong) ui16));
|
||||
|
||||
@@ -578,7 +578,7 @@ inline bool can_handle_logic_op_as_uimm(ValueType *type, Bytecodes::Code bc) {
|
||||
is_power_of_2(int_or_long_const) ||
|
||||
is_power_of_2(-int_or_long_const))) return true;
|
||||
if (bc == Bytecodes::_land &&
|
||||
(is_power_of_2(int_or_long_const+1) ||
|
||||
(is_power_of_2((unsigned long)int_or_long_const+1) ||
|
||||
(Assembler::is_uimm(int_or_long_const, 32) && is_power_of_2(int_or_long_const)) ||
|
||||
(int_or_long_const != min_jlong && is_power_of_2(-int_or_long_const)))) return true;
|
||||
|
||||
|
||||
@@ -183,7 +183,7 @@ void C1_MacroAssembler::try_allocate(
|
||||
Register obj, // result: pointer to object after successful allocation
|
||||
Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
|
||||
int con_size_in_bytes, // object size in bytes if known at compile time
|
||||
Register t1, // temp register, must be global register for incr_allocated_bytes
|
||||
Register t1, // temp register
|
||||
Register t2, // temp register
|
||||
Label& slow_case // continuation point if fast allocation fails
|
||||
) {
|
||||
|
||||
@@ -243,7 +243,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
|
||||
|
||||
// Does store cross heap regions?
|
||||
__ xorr(tmp1, store_addr, new_val);
|
||||
__ srdi_(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ srdi_(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
// Crosses regions, storing null?
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -887,6 +887,7 @@ class ZSetupArguments {
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
__ block_comment("generate_c2_load_barrier_stub (zgc) {");
|
||||
|
||||
__ bind(*stub->entry());
|
||||
@@ -910,6 +911,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
__ block_comment("ZStoreBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -2209,9 +2209,6 @@ void MacroAssembler::tlab_allocate(
|
||||
std(new_top, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
|
||||
//verify_tlab(); not implemented
|
||||
}
|
||||
void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2) {
|
||||
unimplemented("incr_allocated_bytes");
|
||||
}
|
||||
|
||||
address MacroAssembler::emit_trampoline_stub(int destination_toc_offset,
|
||||
int insts_call_instruction_offset, Register Rtoc) {
|
||||
@@ -3901,7 +3898,7 @@ void MacroAssembler::muladd(Register out, Register in,
|
||||
|
||||
void MacroAssembler::multiply_to_len(Register x, Register xlen,
|
||||
Register y, Register ylen,
|
||||
Register z, Register zlen,
|
||||
Register z,
|
||||
Register tmp1, Register tmp2,
|
||||
Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6,
|
||||
@@ -3912,11 +3909,11 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen,
|
||||
|
||||
ShortBranchVerifier sbv(this);
|
||||
|
||||
assert_different_registers(x, xlen, y, ylen, z, zlen,
|
||||
assert_different_registers(x, xlen, y, ylen, z,
|
||||
tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
|
||||
assert_different_registers(x, xlen, y, ylen, z, zlen,
|
||||
assert_different_registers(x, xlen, y, ylen, z,
|
||||
tmp1, tmp2, tmp3, tmp4, tmp5, tmp7);
|
||||
assert_different_registers(x, xlen, y, ylen, z, zlen,
|
||||
assert_different_registers(x, xlen, y, ylen, z,
|
||||
tmp1, tmp2, tmp3, tmp4, tmp5, tmp8);
|
||||
|
||||
const Register idx = tmp1;
|
||||
@@ -3944,7 +3941,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen,
|
||||
// z[xstart] = (int)carry;
|
||||
|
||||
mr_if_needed(idx, ylen); // idx = ylen
|
||||
mr_if_needed(kdx, zlen); // kdx = xlen + ylen
|
||||
add(kdx, xlen, ylen); // kdx = xlen + ylen
|
||||
li(carry, 0); // carry = 0
|
||||
|
||||
Label L_done;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -626,7 +626,6 @@ class MacroAssembler: public Assembler {
|
||||
Register t1, // temp register
|
||||
Label& slow_case // continuation point if fast allocation fails
|
||||
);
|
||||
void incr_allocated_bytes(RegisterOrConstant size_in_bytes, Register t1, Register t2);
|
||||
|
||||
enum { trampoline_stub_size = 6 * 4 };
|
||||
address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg);
|
||||
@@ -785,7 +784,7 @@ class MacroAssembler: public Assembler {
|
||||
Register tmp1, Register tmp2, Register carry);
|
||||
void multiply_to_len(Register x, Register xlen,
|
||||
Register y, Register ylen,
|
||||
Register z, Register zlen,
|
||||
Register z,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5,
|
||||
Register tmp6, Register tmp7, Register tmp8, Register tmp9, Register tmp10,
|
||||
Register tmp11, Register tmp12, Register tmp13);
|
||||
|
||||
@@ -3204,7 +3204,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// R5 - y address
|
||||
// R6 - y length
|
||||
// R7 - z address
|
||||
// R8 - z length
|
||||
//
|
||||
address generate_multiplyToLen() {
|
||||
|
||||
@@ -3217,7 +3216,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register y = R5;
|
||||
const Register ylen = R6;
|
||||
const Register z = R7;
|
||||
const Register zlen = R8;
|
||||
|
||||
const Register tmp1 = R2; // TOC not used.
|
||||
const Register tmp2 = R9;
|
||||
@@ -3240,7 +3238,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// C2 does not respect int to long conversion for stub calls.
|
||||
__ clrldi(xlen, xlen, 32);
|
||||
__ clrldi(ylen, ylen, 32);
|
||||
__ clrldi(zlen, zlen, 32);
|
||||
|
||||
// Save non-volatile regs (frameless).
|
||||
int current_offs = 8;
|
||||
@@ -3253,7 +3250,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ std(R30, -current_offs, R1_SP); current_offs += 8;
|
||||
__ std(R31, -current_offs, R1_SP);
|
||||
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5,
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, tmp1, tmp2, tmp3, tmp4, tmp5,
|
||||
tmp6, tmp7, tmp8, tmp9, tmp10, tmp11, tmp12, tmp13);
|
||||
|
||||
// Restore non-volatile regs.
|
||||
|
||||
@@ -3484,7 +3484,7 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotFinal);
|
||||
|
||||
if (RewriteBytecodes && !UseSharedSpaces && !CDSConfig::is_dumping_static_archive()) {
|
||||
if (RewriteBytecodes && !CDSConfig::is_using_archive() && !CDSConfig::is_dumping_static_archive()) {
|
||||
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
|
||||
}
|
||||
invokevfinal_helper(Rcache, R11_scratch1, R12_scratch2, Rflags /* tmp */, Rrecv /* tmp */);
|
||||
|
||||
@@ -1863,15 +1863,53 @@ enum Nf {
|
||||
patch_VArith(op, Vd, funct3, Vs1->raw_encoding(), Vs2, vm, funct6); \
|
||||
}
|
||||
|
||||
// Vector Bit-manipulation used in Cryptography (Zvkb) Extension
|
||||
// Vector Bit-manipulation used in Cryptography (Zvbb) Extension
|
||||
INSN(vandn_vv, 0b1010111, 0b000, 0b000001);
|
||||
INSN(vclmul_vv, 0b1010111, 0b010, 0b001100);
|
||||
INSN(vclmulh_vv, 0b1010111, 0b010, 0b001101);
|
||||
INSN(vror_vv, 0b1010111, 0b000, 0b010100);
|
||||
INSN(vrol_vv, 0b1010111, 0b000, 0b010101);
|
||||
|
||||
// Vector Bit-manipulation used in Cryptography (Zvbc) Extension
|
||||
INSN(vclmul_vv, 0b1010111, 0b010, 0b001100);
|
||||
INSN(vclmulh_vv, 0b1010111, 0b010, 0b001101);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define INSN(NAME, op, funct3, funct6) \
|
||||
void NAME(VectorRegister Vd, VectorRegister Vs2, Register Rs1, VectorMask vm = unmasked) { \
|
||||
patch_VArith(op, Vd, funct3, Rs1->raw_encoding(), Vs2, vm, funct6); \
|
||||
}
|
||||
|
||||
// Vector Bit-manipulation used in Cryptography (Zvbb) Extension
|
||||
INSN(vrol_vx, 0b1010111, 0b100, 0b010101);
|
||||
INSN(vror_vx, 0b1010111, 0b100, 0b010100);
|
||||
|
||||
#undef INSN
|
||||
|
||||
#define patch_VArith_imm6(op, Reg, funct3, Reg_or_Imm5, I5, Vs2, vm, funct6) \
|
||||
unsigned insn = 0; \
|
||||
patch((address)&insn, 6, 0, op); \
|
||||
patch((address)&insn, 14, 12, funct3); \
|
||||
patch((address)&insn, 19, 15, Reg_or_Imm5); \
|
||||
patch((address)&insn, 25, vm); \
|
||||
patch((address)&insn, 26, I5); \
|
||||
patch((address)&insn, 31, 27, funct6); \
|
||||
patch_reg((address)&insn, 7, Reg); \
|
||||
patch_reg((address)&insn, 20, Vs2); \
|
||||
emit(insn)
|
||||
|
||||
#define INSN(NAME, op, funct3, funct6) \
|
||||
void NAME(VectorRegister Vd, VectorRegister Vs2, uint32_t imm, VectorMask vm = unmasked) { \
|
||||
guarantee(is_uimm6(imm), "uimm is invalid"); \
|
||||
patch_VArith_imm6(op, Vd, funct3, (uint32_t)(imm & 0x1f), (uint32_t)((imm >> 5) & 0x1), Vs2, vm, funct6); \
|
||||
}
|
||||
|
||||
// Vector Bit-manipulation used in Cryptography (Zvbb) Extension
|
||||
// NOTE: there is no corresponding vrol.vi supplied by the extension, but it can be emulated with vror.vi easily.
|
||||
INSN(vror_vi, 0b1010111, 0b011, 0b01010);
|
||||
|
||||
#undef INSN
|
||||
#undef patch_VArith_imm6
|
||||
|
||||
#define INSN(NAME, op, funct3, Vs1, funct6) \
|
||||
void NAME(VectorRegister Vd, VectorRegister Vs2, VectorMask vm = unmasked) { \
|
||||
patch_VArith(op, Vd, funct3, Vs1, Vs2, vm, funct6); \
|
||||
|
||||
@@ -341,7 +341,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ call_VM_leaf(entry, args_num);
|
||||
}
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
if (stub != nullptr) {
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -135,7 +135,7 @@ static jlong as_long(LIR_Opr data) {
|
||||
Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
|
||||
if (addr->base()->is_illegal()) {
|
||||
assert(addr->index()->is_illegal(), "must be illegal too");
|
||||
__ movptr(tmp, addr->disp());
|
||||
__ movptr(tmp, (address)addr->disp());
|
||||
return Address(tmp, 0);
|
||||
}
|
||||
|
||||
@@ -1023,7 +1023,8 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
arrayOopDesc::base_offset_in_bytes(op->type()),
|
||||
array_element_size(op->type()),
|
||||
op->klass()->as_register(),
|
||||
*op->stub()->entry());
|
||||
*op->stub()->entry(),
|
||||
op->zero_array());
|
||||
}
|
||||
__ bind(*op->stub()->continuation());
|
||||
}
|
||||
|
||||
@@ -733,7 +733,13 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 5, "wrong type");
|
||||
|
||||
// Make all state_for calls early since they can emit code
|
||||
CodeEmitInfo* info = state_for(x, x->state());
|
||||
CodeEmitInfo* info = nullptr;
|
||||
if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
|
||||
info = state_for(x, x->state_before());
|
||||
info->set_force_reexecute();
|
||||
} else {
|
||||
info = state_for(x, x->state());
|
||||
}
|
||||
|
||||
LIRItem src(x->argument_at(0), this);
|
||||
LIRItem src_pos(x->argument_at(1), this);
|
||||
@@ -766,6 +772,9 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
|
||||
int flags;
|
||||
ciArrayKlass* expected_type = nullptr;
|
||||
arraycopy_helper(x, &flags, &expected_type);
|
||||
if (x->check_flag(Instruction::OmitChecksFlag)) {
|
||||
flags = 0;
|
||||
}
|
||||
|
||||
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp,
|
||||
expected_type, flags, info); // does add_safepoint
|
||||
@@ -844,7 +853,13 @@ void LIRGenerator::do_NewInstance(NewInstance* x) {
|
||||
}
|
||||
|
||||
void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
|
||||
CodeEmitInfo* info = state_for(x, x->state());
|
||||
CodeEmitInfo* info = nullptr;
|
||||
if (x->state_before() != nullptr && x->state_before()->force_reexecute()) {
|
||||
info = state_for(x, x->state_before());
|
||||
info->set_force_reexecute();
|
||||
} else {
|
||||
info = state_for(x, x->state());
|
||||
}
|
||||
|
||||
LIRItem length(x->length(), this);
|
||||
length.load_item_force(FrameMap::r9_opr);
|
||||
@@ -861,7 +876,7 @@ void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
|
||||
__ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
|
||||
|
||||
CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
|
||||
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
|
||||
__ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path, x->zero_array());
|
||||
|
||||
LIR_Opr result = rlock_result(x);
|
||||
__ move(reg, result);
|
||||
|
||||
@@ -282,7 +282,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
|
||||
verify_oop(obj);
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) {
|
||||
void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case, bool zero_array) {
|
||||
assert_different_registers(obj, len, tmp1, tmp2, klass);
|
||||
|
||||
// determine alignment mask
|
||||
@@ -308,7 +308,9 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
|
||||
|
||||
// clear rest of allocated space
|
||||
const Register len_zero = len;
|
||||
initialize_body(obj, arr_size, base_offset, len_zero);
|
||||
if (zero_array) {
|
||||
initialize_body(obj, arr_size, base_offset, len_zero);
|
||||
}
|
||||
|
||||
membar(MacroAssembler::StoreStore);
|
||||
|
||||
|
||||
@@ -101,7 +101,8 @@ using MacroAssembler::null_check;
|
||||
// base_offset_in_bytes: offset of first array element, in bytes
|
||||
// f : element scale factor
|
||||
// slow_case : exit to slow case implementation if fast allocation fails
|
||||
void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case);
|
||||
// zero_array : zero the allocated array or not
|
||||
void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case, bool zero_array);
|
||||
|
||||
int rsp_offset() const { return _rsp_offset; }
|
||||
|
||||
|
||||
@@ -2762,6 +2762,10 @@ void C2_MacroAssembler::compare_integral_v(VectorRegister vd, VectorRegister src
|
||||
case BoolTest::ge: vmsge_vv(vd, src1, src2, vm); break;
|
||||
case BoolTest::lt: vmslt_vv(vd, src1, src2, vm); break;
|
||||
case BoolTest::gt: vmsgt_vv(vd, src1, src2, vm); break;
|
||||
case BoolTest::ule: vmsleu_vv(vd, src1, src2, vm); break;
|
||||
case BoolTest::uge: vmsgeu_vv(vd, src1, src2, vm); break;
|
||||
case BoolTest::ult: vmsltu_vv(vd, src1, src2, vm); break;
|
||||
case BoolTest::ugt: vmsgtu_vv(vd, src1, src2, vm); break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
ShouldNotReachHere();
|
||||
@@ -2787,6 +2791,21 @@ void C2_MacroAssembler::compare_fp_v(VectorRegister vd, VectorRegister src1, Vec
|
||||
}
|
||||
}
|
||||
|
||||
// In Matcher::scalable_predicate_reg_slots,
|
||||
// we assume each predicate register is one-eighth of the size of
|
||||
// scalable vector register, one mask bit per vector byte.
|
||||
void C2_MacroAssembler::spill_vmask(VectorRegister v, int offset) {
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vse8_v(v, t0);
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::unspill_vmask(VectorRegister v, int offset) {
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vle8_v(v, t0);
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::integer_extend_v(VectorRegister dst, BasicType dst_bt, uint vector_length,
|
||||
VectorRegister src, BasicType src_bt, bool is_signed) {
|
||||
assert(type2aelembytes(dst_bt) > type2aelembytes(src_bt) && type2aelembytes(dst_bt) <= 8 && type2aelembytes(src_bt) <= 4, "invalid element size");
|
||||
|
||||
@@ -251,20 +251,9 @@
|
||||
void compare_fp_v(VectorRegister dst, VectorRegister src1, VectorRegister src2, int cond,
|
||||
BasicType bt, uint vector_length, VectorMask vm = Assembler::unmasked);
|
||||
|
||||
// In Matcher::scalable_predicate_reg_slots,
|
||||
// we assume each predicate register is one-eighth of the size of
|
||||
// scalable vector register, one mask bit per vector byte.
|
||||
void spill_vmask(VectorRegister v, int offset){
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vse8_v(v, t0);
|
||||
}
|
||||
void spill_vmask(VectorRegister v, int offset);
|
||||
|
||||
void unspill_vmask(VectorRegister v, int offset){
|
||||
vsetvli_helper(T_BYTE, MaxVectorSize >> 3);
|
||||
add(t0, sp, offset);
|
||||
vle8_v(v, t0);
|
||||
}
|
||||
void unspill_vmask(VectorRegister v, int offset);
|
||||
|
||||
void spill_copy_vmask_stack_to_stack(int src_offset, int dst_offset, uint vector_length_in_bytes) {
|
||||
assert(vector_length_in_bytes % 4 == 0, "unexpected vector mask reg size");
|
||||
|
||||
@@ -194,7 +194,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
// Does store cross heap regions?
|
||||
|
||||
__ xorr(tmp1, store_addr, new_val);
|
||||
__ srli(tmp1, tmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ srli(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes);
|
||||
__ beqz(tmp1, done);
|
||||
|
||||
// crosses regions, storing null?
|
||||
|
||||
@@ -211,21 +211,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
|
||||
}
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register tmp1) {
|
||||
assert(tmp1->is_valid(), "need temp reg");
|
||||
|
||||
__ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ add(tmp1, tmp1, var_size_in_bytes);
|
||||
} else {
|
||||
__ add(tmp1, tmp1, con_size_in_bytes);
|
||||
}
|
||||
__ sd(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
|
||||
}
|
||||
|
||||
static volatile uint32_t _patching_epoch = 0;
|
||||
|
||||
address BarrierSetAssembler::patching_epoch_addr() {
|
||||
|
||||
@@ -45,11 +45,6 @@ enum class NMethodPatchingType {
|
||||
};
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm,
|
||||
Register var_size_in_bytes, int con_size_in_bytes,
|
||||
Register t1 = noreg);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
|
||||
Register src, Register dst, Register count, RegSet saved_regs) {}
|
||||
|
||||
@@ -112,18 +112,14 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
assert_different_registers(obj, pre_val, tmp1, tmp2);
|
||||
assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
|
||||
|
||||
Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
// Is marking active?
|
||||
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
|
||||
__ lwu(tmp1, in_progress);
|
||||
} else {
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbu(tmp1, in_progress);
|
||||
}
|
||||
__ beqz(tmp1, done);
|
||||
Address gc_state(xthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
|
||||
__ lbu(t1, gc_state);
|
||||
__ test_bit(t1, t1, ShenandoahHeap::MARKING_BITPOS);
|
||||
__ beqz(t1, done);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (obj != noreg) {
|
||||
|
||||
@@ -629,7 +629,7 @@ void ZBarrierSetAssembler::patch_barrier_relocation(address addr, int format) {
|
||||
case ZBarrierRelocationFormatMarkBadMask:
|
||||
case ZBarrierRelocationFormatStoreGoodBits:
|
||||
case ZBarrierRelocationFormatStoreBadMask:
|
||||
assert(NativeInstruction::is_li16u_at(addr), "invalide zgc barrier");
|
||||
assert(MacroAssembler::is_li16u_at(addr), "invalide zgc barrier");
|
||||
bytes = MacroAssembler::pd_patch_instruction_size(addr, (address)(uintptr_t)value);
|
||||
break;
|
||||
default:
|
||||
|
||||
@@ -42,8 +42,10 @@ jint CodeInstaller::pd_next_offset(NativeInstruction* inst, jint pc_offset, JVMC
|
||||
return pc_offset + NativeCall::instruction_size;
|
||||
} else if (inst->is_jump()) {
|
||||
return pc_offset + NativeJump::instruction_size;
|
||||
} else if (inst->is_movptr()) {
|
||||
return pc_offset + NativeMovConstReg::movptr_instruction_size;
|
||||
} else if (inst->is_movptr1()) {
|
||||
return pc_offset + NativeMovConstReg::movptr1_instruction_size;
|
||||
} else if (inst->is_movptr2()) {
|
||||
return pc_offset + NativeMovConstReg::movptr2_instruction_size;
|
||||
} else {
|
||||
JVMCI_ERROR_0("unsupported type of instruction for call site");
|
||||
}
|
||||
|
||||
@@ -38,7 +38,6 @@
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "nativeInst_riscv.hpp"
|
||||
#include "oops/accessDecorators.hpp"
|
||||
#include "oops/compressedKlass.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
@@ -65,6 +64,138 @@
|
||||
#define STOP(str) stop(str);
|
||||
#define BIND(label) bind(label); __ BLOCK_COMMENT(#label ":")
|
||||
|
||||
|
||||
|
||||
Register MacroAssembler::extract_rs1(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 19, 15));
|
||||
}
|
||||
|
||||
Register MacroAssembler::extract_rs2(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 24, 20));
|
||||
}
|
||||
|
||||
Register MacroAssembler::extract_rd(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 11, 7));
|
||||
}
|
||||
|
||||
uint32_t MacroAssembler::extract_opcode(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return Assembler::extract(Assembler::ld_instr(instr), 6, 0);
|
||||
}
|
||||
|
||||
uint32_t MacroAssembler::extract_funct3(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return Assembler::extract(Assembler::ld_instr(instr), 14, 12);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_pc_relative_at(address instr) {
|
||||
// auipc + jalr
|
||||
// auipc + addi
|
||||
// auipc + load
|
||||
// auipc + fload_load
|
||||
return (is_auipc_at(instr)) &&
|
||||
(is_addi_at(instr + instruction_size) ||
|
||||
is_jalr_at(instr + instruction_size) ||
|
||||
is_load_at(instr + instruction_size) ||
|
||||
is_float_load_at(instr + instruction_size)) &&
|
||||
check_pc_relative_data_dependency(instr);
|
||||
}
|
||||
|
||||
// ie:ld(Rd, Label)
|
||||
bool MacroAssembler::is_load_pc_relative_at(address instr) {
|
||||
return is_auipc_at(instr) && // auipc
|
||||
is_ld_at(instr + instruction_size) && // ld
|
||||
check_load_pc_relative_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_movptr1_at(address instr) {
|
||||
return is_lui_at(instr) && // Lui
|
||||
is_addi_at(instr + instruction_size) && // Addi
|
||||
is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
|
||||
is_addi_at(instr + instruction_size * 3) && // Addi
|
||||
is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6
|
||||
(is_addi_at(instr + instruction_size * 5) ||
|
||||
is_jalr_at(instr + instruction_size * 5) ||
|
||||
is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
|
||||
check_movptr1_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_movptr2_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_lui_at(instr + instruction_size) && // lui
|
||||
is_slli_shift_at(instr + instruction_size * 2, 18) && // slli Rd, Rs, 18
|
||||
is_add_at(instr + instruction_size * 3) &&
|
||||
(is_addi_at(instr + instruction_size * 4) ||
|
||||
is_jalr_at(instr + instruction_size * 4) ||
|
||||
is_load_at(instr + instruction_size * 4)) && // Addi/Jalr/Load
|
||||
check_movptr2_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_li16u_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_srli_at(instr + instruction_size) && // srli
|
||||
check_li16u_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_li32_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addiw_at(instr + instruction_size) && // addiw
|
||||
check_li32_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_li64_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addi_at(instr + instruction_size) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 2, 12) && // Slli Rd, Rs, 12
|
||||
is_addi_at(instr + instruction_size * 3) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 4, 12) && // Slli Rd, Rs, 12
|
||||
is_addi_at(instr + instruction_size * 5) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 6, 8) && // Slli Rd, Rs, 8
|
||||
is_addi_at(instr + instruction_size * 7) && // addi
|
||||
check_li64_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool MacroAssembler::is_lwu_to_zr(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0000011 &&
|
||||
extract_funct3(instr) == 0b110 &&
|
||||
extract_rd(instr) == zr); // zr
|
||||
}
|
||||
|
||||
uint32_t MacroAssembler::get_membar_kind(address addr) {
|
||||
assert_cond(addr != nullptr);
|
||||
assert(is_membar(addr), "no membar found");
|
||||
|
||||
uint32_t insn = Bytes::get_native_u4(addr);
|
||||
|
||||
uint32_t predecessor = Assembler::extract(insn, 27, 24);
|
||||
uint32_t successor = Assembler::extract(insn, 23, 20);
|
||||
|
||||
return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
|
||||
}
|
||||
|
||||
void MacroAssembler::set_membar_kind(address addr, uint32_t order_kind) {
|
||||
assert_cond(addr != nullptr);
|
||||
assert(is_membar(addr), "no membar found");
|
||||
|
||||
uint32_t predecessor = 0;
|
||||
uint32_t successor = 0;
|
||||
|
||||
MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
|
||||
|
||||
uint32_t insn = Bytes::get_native_u4(addr);
|
||||
address pInsn = (address) &insn;
|
||||
Assembler::patch(pInsn, 27, 24, predecessor);
|
||||
Assembler::patch(pInsn, 23, 20, successor);
|
||||
|
||||
address membar = addr;
|
||||
Assembler::sd_instr(membar, insn);
|
||||
}
|
||||
|
||||
|
||||
static void pass_arg0(MacroAssembler* masm, Register arg) {
|
||||
if (c_rarg0 != arg) {
|
||||
masm->mv(c_rarg0, arg);
|
||||
@@ -640,7 +771,7 @@ void MacroAssembler::emit_static_call_stub() {
|
||||
|
||||
// Jump to the entry point of the c2i stub.
|
||||
int32_t offset = 0;
|
||||
movptr(t0, 0, offset);
|
||||
movptr(t0, 0, offset, t1); // lui + lui + slli + add
|
||||
jr(t0, offset);
|
||||
}
|
||||
|
||||
@@ -1405,7 +1536,7 @@ static int patch_offset_in_jal(address branch, int64_t offset) {
|
||||
Assembler::patch(branch, 30, 21, (offset >> 1) & 0x3ff); // offset[10:1] ==> branch[30:21]
|
||||
Assembler::patch(branch, 20, 20, (offset >> 11) & 0x1); // offset[11] ==> branch[20]
|
||||
Assembler::patch(branch, 19, 12, (offset >> 12) & 0xff); // offset[19:12] ==> branch[19:12]
|
||||
return NativeInstruction::instruction_size; // only one instruction
|
||||
return MacroAssembler::instruction_size; // only one instruction
|
||||
}
|
||||
|
||||
static int patch_offset_in_conditional_branch(address branch, int64_t offset) {
|
||||
@@ -1415,25 +1546,44 @@ static int patch_offset_in_conditional_branch(address branch, int64_t offset) {
|
||||
Assembler::patch(branch, 30, 25, (offset >> 5) & 0x3f); // offset[10:5] ==> branch[30:25]
|
||||
Assembler::patch(branch, 7, 7, (offset >> 11) & 0x1); // offset[11] ==> branch[7]
|
||||
Assembler::patch(branch, 11, 8, (offset >> 1) & 0xf); // offset[4:1] ==> branch[11:8]
|
||||
return NativeInstruction::instruction_size; // only one instruction
|
||||
return MacroAssembler::instruction_size; // only one instruction
|
||||
}
|
||||
|
||||
static int patch_offset_in_pc_relative(address branch, int64_t offset) {
|
||||
const int PC_RELATIVE_INSTRUCTION_NUM = 2; // auipc, addi/jalr/load
|
||||
Assembler::patch(branch, 31, 12, ((offset + 0x800) >> 12) & 0xfffff); // Auipc. offset[31:12] ==> branch[31:12]
|
||||
Assembler::patch(branch + 4, 31, 20, offset & 0xfff); // Addi/Jalr/Load. offset[11:0] ==> branch[31:20]
|
||||
return PC_RELATIVE_INSTRUCTION_NUM * NativeInstruction::instruction_size;
|
||||
return PC_RELATIVE_INSTRUCTION_NUM * MacroAssembler::instruction_size;
|
||||
}
|
||||
|
||||
static int patch_addr_in_movptr(address branch, address target) {
|
||||
const int MOVPTR_INSTRUCTIONS_NUM = 6; // lui + addi + slli + addi + slli + addi/jalr/load
|
||||
static int patch_addr_in_movptr1(address branch, address target) {
|
||||
int32_t lower = ((intptr_t)target << 35) >> 35;
|
||||
int64_t upper = ((intptr_t)target - lower) >> 29;
|
||||
Assembler::patch(branch + 0, 31, 12, upper & 0xfffff); // Lui. target[48:29] + target[28] ==> branch[31:12]
|
||||
Assembler::patch(branch + 4, 31, 20, (lower >> 17) & 0xfff); // Addi. target[28:17] ==> branch[31:20]
|
||||
Assembler::patch(branch + 12, 31, 20, (lower >> 6) & 0x7ff); // Addi. target[16: 6] ==> branch[31:20]
|
||||
Assembler::patch(branch + 20, 31, 20, lower & 0x3f); // Addi/Jalr/Load. target[ 5: 0] ==> branch[31:20]
|
||||
return MOVPTR_INSTRUCTIONS_NUM * NativeInstruction::instruction_size;
|
||||
return MacroAssembler::movptr1_instruction_size;
|
||||
}
|
||||
|
||||
static int patch_addr_in_movptr2(address instruction_address, address target) {
|
||||
uintptr_t addr = (uintptr_t)target;
|
||||
|
||||
assert(addr < (1ull << 48), "48-bit overflow in address constant");
|
||||
unsigned int upper18 = (addr >> 30ull);
|
||||
int lower30 = (addr & 0x3fffffffu);
|
||||
int low12 = (lower30 << 20) >> 20;
|
||||
int mid18 = ((lower30 - low12) >> 12);
|
||||
|
||||
Assembler::patch(instruction_address + (MacroAssembler::instruction_size * 0), 31, 12, (upper18 & 0xfffff)); // Lui
|
||||
Assembler::patch(instruction_address + (MacroAssembler::instruction_size * 1), 31, 12, (mid18 & 0xfffff)); // Lui
|
||||
// Slli
|
||||
// Add
|
||||
Assembler::patch(instruction_address + (MacroAssembler::instruction_size * 4), 31, 20, low12 & 0xfff); // Addi/Jalr/Load
|
||||
|
||||
assert(MacroAssembler::target_addr_for_insn(instruction_address) == target, "Must be");
|
||||
|
||||
return MacroAssembler::movptr2_instruction_size;
|
||||
}
|
||||
|
||||
static int patch_imm_in_li64(address branch, address target) {
|
||||
@@ -1454,12 +1604,12 @@ static int patch_imm_in_li64(address branch, address target) {
|
||||
Assembler::patch(branch + 12, 31, 20, ((int32_t)lower >> 20) & 0xfff); // Addi.
|
||||
Assembler::patch(branch + 20, 31, 20, (((intptr_t)target << 44) >> 52) & 0xfff); // Addi.
|
||||
Assembler::patch(branch + 28, 31, 20, (intptr_t)target & 0xff); // Addi.
|
||||
return LI64_INSTRUCTIONS_NUM * NativeInstruction::instruction_size;
|
||||
return LI64_INSTRUCTIONS_NUM * MacroAssembler::instruction_size;
|
||||
}
|
||||
|
||||
static int patch_imm_in_li16u(address branch, uint16_t target) {
|
||||
Assembler::patch(branch, 31, 12, target); // patch lui only
|
||||
return NativeInstruction::instruction_size;
|
||||
return MacroAssembler::instruction_size;
|
||||
}
|
||||
|
||||
int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
|
||||
@@ -1470,7 +1620,7 @@ int MacroAssembler::patch_imm_in_li32(address branch, int32_t target) {
|
||||
upper = (int32_t)upper;
|
||||
Assembler::patch(branch + 0, 31, 12, (upper >> 12) & 0xfffff); // Lui.
|
||||
Assembler::patch(branch + 4, 31, 20, lower & 0xfff); // Addiw.
|
||||
return LI32_INSTRUCTIONS_NUM * NativeInstruction::instruction_size;
|
||||
return LI32_INSTRUCTIONS_NUM * MacroAssembler::instruction_size;
|
||||
}
|
||||
|
||||
static long get_offset_of_jal(address insn_addr) {
|
||||
@@ -1507,7 +1657,7 @@ static long get_offset_of_pc_relative(address insn_addr) {
|
||||
return offset;
|
||||
}
|
||||
|
||||
static address get_target_of_movptr(address insn_addr) {
|
||||
static address get_target_of_movptr1(address insn_addr) {
|
||||
assert_cond(insn_addr != nullptr);
|
||||
intptr_t target_address = (((int64_t)Assembler::sextract(Assembler::ld_instr(insn_addr), 31, 12)) & 0xfffff) << 29; // Lui.
|
||||
target_address += ((int64_t)Assembler::sextract(Assembler::ld_instr(insn_addr + 4), 31, 20)) << 17; // Addi.
|
||||
@@ -1516,6 +1666,17 @@ static address get_target_of_movptr(address insn_addr) {
|
||||
return (address) target_address;
|
||||
}
|
||||
|
||||
static address get_target_of_movptr2(address insn_addr) {
|
||||
assert_cond(insn_addr != nullptr);
|
||||
int32_t upper18 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + MacroAssembler::instruction_size * 0), 31, 12)) & 0xfffff); // Lui
|
||||
int32_t mid18 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + MacroAssembler::instruction_size * 1), 31, 12)) & 0xfffff); // Lui
|
||||
// 2 // Slli
|
||||
// 3 // Add
|
||||
int32_t low12 = ((Assembler::sextract(Assembler::ld_instr(insn_addr + MacroAssembler::instruction_size * 4), 31, 20))); // Addi/Jalr/Load.
|
||||
address ret = (address)(((intptr_t)upper18<<30ll) + ((intptr_t)mid18<<12ll) + low12);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static address get_target_of_li64(address insn_addr) {
|
||||
assert_cond(insn_addr != nullptr);
|
||||
intptr_t target_address = (((int64_t)Assembler::sextract(Assembler::ld_instr(insn_addr), 31, 12)) & 0xfffff) << 44; // Lui.
|
||||
@@ -1535,30 +1696,32 @@ address MacroAssembler::get_target_of_li32(address insn_addr) {
|
||||
|
||||
// Patch any kind of instruction; there may be several instructions.
|
||||
// Return the total length (in bytes) of the instructions.
|
||||
int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
assert_cond(branch != nullptr);
|
||||
int64_t offset = target - branch;
|
||||
if (NativeInstruction::is_jal_at(branch)) { // jal
|
||||
return patch_offset_in_jal(branch, offset);
|
||||
} else if (NativeInstruction::is_branch_at(branch)) { // beq/bge/bgeu/blt/bltu/bne
|
||||
return patch_offset_in_conditional_branch(branch, offset);
|
||||
} else if (NativeInstruction::is_pc_relative_at(branch)) { // auipc, addi/jalr/load
|
||||
return patch_offset_in_pc_relative(branch, offset);
|
||||
} else if (NativeInstruction::is_movptr_at(branch)) { // movptr
|
||||
return patch_addr_in_movptr(branch, target);
|
||||
} else if (NativeInstruction::is_li64_at(branch)) { // li64
|
||||
return patch_imm_in_li64(branch, target);
|
||||
} else if (NativeInstruction::is_li32_at(branch)) { // li32
|
||||
int MacroAssembler::pd_patch_instruction_size(address instruction_address, address target) {
|
||||
assert_cond(instruction_address != nullptr);
|
||||
int64_t offset = target - instruction_address;
|
||||
if (MacroAssembler::is_jal_at(instruction_address)) { // jal
|
||||
return patch_offset_in_jal(instruction_address, offset);
|
||||
} else if (MacroAssembler::is_branch_at(instruction_address)) { // beq/bge/bgeu/blt/bltu/bne
|
||||
return patch_offset_in_conditional_branch(instruction_address, offset);
|
||||
} else if (MacroAssembler::is_pc_relative_at(instruction_address)) { // auipc, addi/jalr/load
|
||||
return patch_offset_in_pc_relative(instruction_address, offset);
|
||||
} else if (MacroAssembler::is_movptr1_at(instruction_address)) { // movptr1
|
||||
return patch_addr_in_movptr1(instruction_address, target);
|
||||
} else if (MacroAssembler::is_movptr2_at(instruction_address)) { // movptr2
|
||||
return patch_addr_in_movptr2(instruction_address, target);
|
||||
} else if (MacroAssembler::is_li64_at(instruction_address)) { // li64
|
||||
return patch_imm_in_li64(instruction_address, target);
|
||||
} else if (MacroAssembler::is_li32_at(instruction_address)) { // li32
|
||||
int64_t imm = (intptr_t)target;
|
||||
return patch_imm_in_li32(branch, (int32_t)imm);
|
||||
} else if (NativeInstruction::is_li16u_at(branch)) {
|
||||
return patch_imm_in_li32(instruction_address, (int32_t)imm);
|
||||
} else if (MacroAssembler::is_li16u_at(instruction_address)) {
|
||||
int64_t imm = (intptr_t)target;
|
||||
return patch_imm_in_li16u(branch, (uint16_t)imm);
|
||||
return patch_imm_in_li16u(instruction_address, (uint16_t)imm);
|
||||
} else {
|
||||
#ifdef ASSERT
|
||||
tty->print_cr("pd_patch_instruction_size: instruction 0x%x at " INTPTR_FORMAT " could not be patched!\n",
|
||||
Assembler::ld_instr(branch), p2i(branch));
|
||||
Disassembler::decode(branch - 16, branch + 16);
|
||||
Assembler::ld_instr(instruction_address), p2i(instruction_address));
|
||||
Disassembler::decode(instruction_address - 16, instruction_address + 16);
|
||||
#endif
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
@@ -1568,17 +1731,19 @@ int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
|
||||
address MacroAssembler::target_addr_for_insn(address insn_addr) {
|
||||
long offset = 0;
|
||||
assert_cond(insn_addr != nullptr);
|
||||
if (NativeInstruction::is_jal_at(insn_addr)) { // jal
|
||||
if (MacroAssembler::is_jal_at(insn_addr)) { // jal
|
||||
offset = get_offset_of_jal(insn_addr);
|
||||
} else if (NativeInstruction::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne
|
||||
} else if (MacroAssembler::is_branch_at(insn_addr)) { // beq/bge/bgeu/blt/bltu/bne
|
||||
offset = get_offset_of_conditional_branch(insn_addr);
|
||||
} else if (NativeInstruction::is_pc_relative_at(insn_addr)) { // auipc, addi/jalr/load
|
||||
} else if (MacroAssembler::is_pc_relative_at(insn_addr)) { // auipc, addi/jalr/load
|
||||
offset = get_offset_of_pc_relative(insn_addr);
|
||||
} else if (NativeInstruction::is_movptr_at(insn_addr)) { // movptr
|
||||
return get_target_of_movptr(insn_addr);
|
||||
} else if (NativeInstruction::is_li64_at(insn_addr)) { // li64
|
||||
} else if (MacroAssembler::is_movptr1_at(insn_addr)) { // movptr1
|
||||
return get_target_of_movptr1(insn_addr);
|
||||
} else if (MacroAssembler::is_movptr2_at(insn_addr)) { // movptr2
|
||||
return get_target_of_movptr2(insn_addr);
|
||||
} else if (MacroAssembler::is_li64_at(insn_addr)) { // li64
|
||||
return get_target_of_li64(insn_addr);
|
||||
} else if (NativeInstruction::is_li32_at(insn_addr)) { // li32
|
||||
} else if (MacroAssembler::is_li32_at(insn_addr)) { // li32
|
||||
return get_target_of_li32(insn_addr);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
@@ -1590,13 +1755,16 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
|
||||
// OOPs are either narrow (32 bits) or wide (48 bits). We encode
|
||||
// narrow OOPs by setting the upper 16 bits in the first
|
||||
// instruction.
|
||||
if (NativeInstruction::is_li32_at(insn_addr)) {
|
||||
if (MacroAssembler::is_li32_at(insn_addr)) {
|
||||
// Move narrow OOP
|
||||
uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o));
|
||||
return patch_imm_in_li32(insn_addr, (int32_t)n);
|
||||
} else if (NativeInstruction::is_movptr_at(insn_addr)) {
|
||||
} else if (MacroAssembler::is_movptr1_at(insn_addr)) {
|
||||
// Move wide OOP
|
||||
return patch_addr_in_movptr(insn_addr, o);
|
||||
return patch_addr_in_movptr1(insn_addr, o);
|
||||
} else if (MacroAssembler::is_movptr2_at(insn_addr)) {
|
||||
// Move wide OOP
|
||||
return patch_addr_in_movptr2(insn_addr, o);
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
@@ -1617,17 +1785,49 @@ void MacroAssembler::reinit_heapbase() {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::movptr(Register Rd, address addr, int32_t &offset) {
|
||||
int64_t imm64 = (int64_t)addr;
|
||||
void MacroAssembler::movptr(Register Rd, address addr, Register temp) {
|
||||
int offset = 0;
|
||||
movptr(Rd, addr, offset, temp);
|
||||
addi(Rd, Rd, offset);
|
||||
}
|
||||
|
||||
void MacroAssembler::movptr(Register Rd, address addr, int32_t &offset, Register temp) {
|
||||
uint64_t uimm64 = (uint64_t)addr;
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buffer[64];
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIx64, imm64);
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIx64, uimm64);
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
assert((uintptr_t)imm64 < (1ull << 48), "48-bit overflow in address constant");
|
||||
assert(uimm64 < (1ull << 48), "48-bit overflow in address constant");
|
||||
|
||||
if (temp == noreg) {
|
||||
movptr1(Rd, uimm64, offset);
|
||||
} else {
|
||||
movptr2(Rd, uimm64, offset, temp);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::movptr1(Register Rd, uint64_t imm64, int32_t &offset) {
|
||||
// Load upper 31 bits
|
||||
//
|
||||
// In case of 11th bit of `lower` is 0, it's straightforward to understand.
|
||||
// In case of 11th bit of `lower` is 1, it's a bit tricky, to help understand,
|
||||
// imagine divide both `upper` and `lower` into 2 parts respectively, i.e.
|
||||
// [upper_20, upper_12], [lower_20, lower_12], they are the same just before
|
||||
// `lower = (lower << 52) >> 52;`.
|
||||
// After `upper -= lower;`,
|
||||
// upper_20' = upper_20 - (-1) == upper_20 + 1
|
||||
// upper_12 = 0x000
|
||||
// After `lui(Rd, upper);`, `Rd` = upper_20' << 12
|
||||
// Also divide `Rd` into 2 parts [Rd_20, Rd_12],
|
||||
// Rd_20 == upper_20'
|
||||
// Rd_12 == 0x000
|
||||
// After `addi(Rd, Rd, lower);`,
|
||||
// Rd_20 = upper_20' + (-1) == upper_20 + 1 - 1 = upper_20
|
||||
// Rd_12 = lower_12
|
||||
// So, finally Rd == [upper_20, lower_12]
|
||||
int64_t imm = imm64 >> 17;
|
||||
int64_t upper = imm, lower = imm;
|
||||
lower = (lower << 52) >> 52;
|
||||
@@ -1645,6 +1845,28 @@ void MacroAssembler::movptr(Register Rd, address addr, int32_t &offset) {
|
||||
offset = imm64 & 0x3f;
|
||||
}
|
||||
|
||||
void MacroAssembler::movptr2(Register Rd, uint64_t addr, int32_t &offset, Register tmp) {
|
||||
assert_different_registers(Rd, tmp, noreg);
|
||||
|
||||
// addr: [upper18, lower30[mid18, lower12]]
|
||||
|
||||
int64_t upper18 = addr >> 18;
|
||||
lui(tmp, upper18);
|
||||
|
||||
int64_t lower30 = addr & 0x3fffffff;
|
||||
int64_t mid18 = lower30, lower12 = lower30;
|
||||
lower12 = (lower12 << 52) >> 52;
|
||||
// For this tricky part (`mid18 -= lower12;` + `offset = lower12;`),
|
||||
// please refer to movptr1 above.
|
||||
mid18 -= (int32_t)lower12;
|
||||
lui(Rd, mid18);
|
||||
|
||||
slli(tmp, tmp, 18);
|
||||
add(Rd, Rd, tmp);
|
||||
|
||||
offset = lower12;
|
||||
}
|
||||
|
||||
void MacroAssembler::add(Register Rd, Register Rn, int64_t increment, Register temp) {
|
||||
if (is_simm12(increment)) {
|
||||
addi(Rd, Rn, increment);
|
||||
@@ -2120,6 +2342,7 @@ void MacroAssembler::movoop(Register dst, jobject obj) {
|
||||
|
||||
// Move a metadata address into a register.
|
||||
void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
|
||||
assert((uintptr_t)obj < (1ull << 48), "48-bit overflow in metadata");
|
||||
int oop_index;
|
||||
if (obj == nullptr) {
|
||||
oop_index = oop_recorder()->allocate_metadata_index(obj);
|
||||
@@ -2687,14 +2910,13 @@ void MacroAssembler::lookup_virtual_method(Register recv_klass,
|
||||
}
|
||||
|
||||
void MacroAssembler::membar(uint32_t order_constraint) {
|
||||
address prev = pc() - NativeMembar::instruction_size;
|
||||
address prev = pc() - MacroAssembler::instruction_size;
|
||||
address last = code()->last_insn();
|
||||
|
||||
if (last != nullptr && nativeInstruction_at(last)->is_membar() && prev == last) {
|
||||
NativeMembar *bar = NativeMembar_at(prev);
|
||||
if (last != nullptr && is_membar(last) && prev == last) {
|
||||
// We are merging two memory barrier instructions. On RISCV we
|
||||
// can do this simply by ORing them together.
|
||||
bar->set_kind(bar->get_kind() | order_constraint);
|
||||
set_membar_kind(prev, get_membar_kind(prev) | order_constraint);
|
||||
BLOCK_COMMENT("merged membar");
|
||||
} else {
|
||||
code()->set_last_insn(pc());
|
||||
@@ -3554,14 +3776,14 @@ address MacroAssembler::trampoline_call(Address entry) {
|
||||
address MacroAssembler::ic_call(address entry, jint method_index) {
|
||||
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
|
||||
IncompressibleRegion ir(this); // relocations
|
||||
movptr(t1, (address)Universe::non_oop_word());
|
||||
movptr(t1, (address)Universe::non_oop_word(), t0);
|
||||
assert_cond(entry != nullptr);
|
||||
return trampoline_call(Address(entry, rh));
|
||||
}
|
||||
|
||||
int MacroAssembler::ic_check_size() {
|
||||
// No compressed
|
||||
return (NativeInstruction::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
|
||||
return (MacroAssembler::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
|
||||
far_branch_size();
|
||||
}
|
||||
|
||||
@@ -3628,7 +3850,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
// instructions code-section.
|
||||
|
||||
// Make sure the address of destination 8-byte aligned after 3 instructions.
|
||||
align(wordSize, NativeCallTrampolineStub::data_offset);
|
||||
align(wordSize, MacroAssembler::trampoline_stub_data_offset);
|
||||
|
||||
RelocationHolder rh = trampoline_stub_Relocation::spec(code()->insts()->start() +
|
||||
insts_call_instruction_offset);
|
||||
@@ -3641,7 +3863,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
ld(t0, target); // auipc + ld
|
||||
jr(t0); // jalr
|
||||
bind(target);
|
||||
assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset,
|
||||
assert(offset() - stub_start_offset == MacroAssembler::trampoline_stub_data_offset,
|
||||
"should be");
|
||||
assert(offset() % wordSize == 0, "bad alignment");
|
||||
emit_int64((int64_t)dest);
|
||||
@@ -3649,7 +3871,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
|
||||
const address stub_start_addr = addr_at(stub_start_offset);
|
||||
|
||||
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
|
||||
assert(MacroAssembler::is_trampoline_stub_at(stub_start_addr), "doesn't look like a trampoline");
|
||||
|
||||
end_a_stub();
|
||||
return stub_start_addr;
|
||||
@@ -3657,12 +3879,12 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
||||
|
||||
int MacroAssembler::max_trampoline_stub_size() {
|
||||
// Max stub size: alignment nop, TrampolineStub.
|
||||
return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size;
|
||||
return MacroAssembler::instruction_size + MacroAssembler::trampoline_stub_instruction_size;
|
||||
}
|
||||
|
||||
int MacroAssembler::static_call_stub_size() {
|
||||
// (lui, addi, slli, addi, slli, addi) + (lui, addi, slli, addi, slli) + jalr
|
||||
return 12 * NativeInstruction::instruction_size;
|
||||
// (lui, addi, slli, addi, slli, addi) + (lui + lui + slli + add) + jalr
|
||||
return 11 * MacroAssembler::instruction_size;
|
||||
}
|
||||
|
||||
Address MacroAssembler::add_memory_helper(const Address dst, Register tmp) {
|
||||
@@ -4120,7 +4342,7 @@ void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
|
||||
* x12: y
|
||||
* x13: ylen
|
||||
* x14: z
|
||||
* x15: zlen
|
||||
* x15: tmp0
|
||||
* x16: tmp1
|
||||
* x17: tmp2
|
||||
* x7: tmp3
|
||||
@@ -4130,10 +4352,10 @@ void MacroAssembler::multiply_128_x_128_loop(Register y, Register z,
|
||||
* x31: tmp7
|
||||
*/
|
||||
void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen,
|
||||
Register z, Register zlen,
|
||||
Register z, Register tmp0,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6, Register product_hi) {
|
||||
assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
|
||||
assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
|
||||
|
||||
const Register idx = tmp1;
|
||||
const Register kdx = tmp2;
|
||||
@@ -4142,11 +4364,11 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
const Register y_idx = tmp4;
|
||||
const Register carry = tmp5;
|
||||
const Register product = xlen;
|
||||
const Register x_xstart = zlen; // reuse register
|
||||
const Register x_xstart = tmp0;
|
||||
|
||||
mv(idx, ylen); // idx = ylen;
|
||||
mv(kdx, zlen); // kdx = xlen+ylen;
|
||||
mv(carry, zr); // carry = 0;
|
||||
mv(idx, ylen); // idx = ylen;
|
||||
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
|
||||
mv(carry, zr); // carry = 0;
|
||||
|
||||
Label L_multiply_64_x_64_loop, L_done;
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@@ -30,7 +30,6 @@
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "code/vmreg.hpp"
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "nativeInst_riscv.hpp"
|
||||
#include "oops/compressedOops.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
@@ -42,6 +41,7 @@
|
||||
class MacroAssembler: public Assembler {
|
||||
|
||||
public:
|
||||
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod);
|
||||
@@ -49,7 +49,7 @@ class MacroAssembler: public Assembler {
|
||||
// Alignment
|
||||
int align(int modulus, int extra_offset = 0);
|
||||
|
||||
static inline void assert_alignment(address pc, int alignment = NativeInstruction::instruction_size) {
|
||||
static inline void assert_alignment(address pc, int alignment = MacroAssembler::instruction_size) {
|
||||
assert(is_aligned(pc, alignment), "bad alignment");
|
||||
}
|
||||
|
||||
@@ -804,17 +804,16 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void movptr(Register Rd, address addr, int32_t &offset);
|
||||
|
||||
void movptr(Register Rd, address addr) {
|
||||
int offset = 0;
|
||||
movptr(Rd, addr, offset);
|
||||
addi(Rd, Rd, offset);
|
||||
}
|
||||
|
||||
inline void movptr(Register Rd, uintptr_t imm64) {
|
||||
movptr(Rd, (address)imm64);
|
||||
}
|
||||
// Generates a load of a 48-bit constant which can be
|
||||
// patched to any 48-bit constant, i.e. address.
|
||||
// If common case supply additional temp register
|
||||
// to shorten the instruction sequence.
|
||||
void movptr(Register Rd, address addr, Register tmp = noreg);
|
||||
void movptr(Register Rd, address addr, int32_t &offset, Register tmp = noreg);
|
||||
private:
|
||||
void movptr1(Register Rd, uintptr_t addr, int32_t &offset);
|
||||
void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp);
|
||||
public:
|
||||
|
||||
// arith
|
||||
void add (Register Rd, Register Rn, int64_t increment, Register temp = t0);
|
||||
@@ -1233,7 +1232,7 @@ public:
|
||||
|
||||
address ic_call(address entry, jint method_index = 0);
|
||||
static int ic_check_size();
|
||||
int ic_check(int end_alignment = NativeInstruction::instruction_size);
|
||||
int ic_check(int end_alignment = MacroAssembler::instruction_size);
|
||||
|
||||
// Support for memory inc/dec
|
||||
// n.b. increment/decrement calls with an Address destination will
|
||||
@@ -1287,7 +1286,7 @@ public:
|
||||
Register tmp, Register tmp3, Register tmp4,
|
||||
Register tmp6, Register product_hi);
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen,
|
||||
Register z, Register zlen,
|
||||
Register z, Register tmp0,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4,
|
||||
Register tmp5, Register tmp6, Register product_hi);
|
||||
#endif
|
||||
@@ -1543,6 +1542,226 @@ private:
|
||||
public:
|
||||
void lightweight_lock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
|
||||
void lightweight_unlock(Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);
|
||||
|
||||
public:
|
||||
enum {
|
||||
// Refer to function emit_trampoline_stub.
|
||||
trampoline_stub_instruction_size = 3 * instruction_size + wordSize, // auipc + ld + jr + target address
|
||||
trampoline_stub_data_offset = 3 * instruction_size, // auipc + ld + jr
|
||||
|
||||
// movptr
|
||||
movptr1_instruction_size = 6 * instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
|
||||
movptr2_instruction_size = 5 * instruction_size, // lui, lui, slli, add, addi. See movptr2().
|
||||
load_pc_relative_instruction_size = 2 * instruction_size // auipc, ld
|
||||
};
|
||||
|
||||
static bool is_load_pc_relative_at(address branch);
|
||||
static bool is_li16u_at(address instr);
|
||||
|
||||
static bool is_trampoline_stub_at(address addr) {
|
||||
// Ensure that the stub is exactly
|
||||
// ld t0, L--->auipc + ld
|
||||
// jr t0
|
||||
// L:
|
||||
|
||||
// judge inst + register + imm
|
||||
// 1). check the instructions: auipc + ld + jalr
|
||||
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
|
||||
// 3). check if the offset in ld[31:20] equals the data_offset
|
||||
assert_cond(addr != nullptr);
|
||||
const int instr_size = instruction_size;
|
||||
if (is_auipc_at(addr) &&
|
||||
is_ld_at(addr + instr_size) &&
|
||||
is_jalr_at(addr + 2 * instr_size) &&
|
||||
(extract_rd(addr) == x5) &&
|
||||
(extract_rd(addr + instr_size) == x5) &&
|
||||
(extract_rs1(addr + instr_size) == x5) &&
|
||||
(extract_rs1(addr + 2 * instr_size) == x5) &&
|
||||
(Assembler::extract(Assembler::ld_instr(addr + 4), 31, 20) == trampoline_stub_data_offset)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_call_at(address instr) {
|
||||
if (is_jal_at(instr) || is_jalr_at(instr)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
|
||||
static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
|
||||
static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
|
||||
static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
|
||||
static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
|
||||
static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
|
||||
static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
|
||||
static bool is_add_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_to_zr_at(address instr){ assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
|
||||
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
|
||||
|
||||
static bool is_srli_at(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return extract_opcode(instr) == 0b0010011 &&
|
||||
extract_funct3(instr) == 0b101 &&
|
||||
Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
|
||||
}
|
||||
|
||||
static bool is_slli_shift_at(address instr, uint32_t shift) {
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0010011 && // opcode field
|
||||
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
|
||||
Assembler::extract(Assembler::ld_instr(instr), 25, 20) == shift); // shamt field
|
||||
}
|
||||
|
||||
static bool is_movptr1_at(address instr);
|
||||
static bool is_movptr2_at(address instr);
|
||||
|
||||
static bool is_lwu_to_zr(address instr);
|
||||
|
||||
private:
|
||||
static Register extract_rs1(address instr);
|
||||
static Register extract_rs2(address instr);
|
||||
static Register extract_rd(address instr);
|
||||
static uint32_t extract_opcode(address instr);
|
||||
static uint32_t extract_funct3(address instr);
|
||||
|
||||
// the instruction sequence of movptr is as below:
|
||||
// lui
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi/jalr/load
|
||||
static bool check_movptr1_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addi1 = lui + instruction_size;
|
||||
address slli1 = addi1 + instruction_size;
|
||||
address addi2 = slli1 + instruction_size;
|
||||
address slli2 = addi2 + instruction_size;
|
||||
address last_instr = slli2 + instruction_size;
|
||||
return extract_rs1(addi1) == extract_rd(lui) &&
|
||||
extract_rs1(addi1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(slli2) &&
|
||||
extract_rs1(last_instr) == extract_rd(slli2);
|
||||
}
|
||||
|
||||
// the instruction sequence of movptr2 is as below:
|
||||
// lui
|
||||
// lui
|
||||
// slli
|
||||
// add
|
||||
// addi/jalr/load
|
||||
static bool check_movptr2_data_dependency(address instr) {
|
||||
address lui1 = instr;
|
||||
address lui2 = lui1 + instruction_size;
|
||||
address slli = lui2 + instruction_size;
|
||||
address add = slli + instruction_size;
|
||||
address last_instr = add + instruction_size;
|
||||
return extract_rd(add) == extract_rd(lui2) &&
|
||||
extract_rs1(add) == extract_rd(lui2) &&
|
||||
extract_rs2(add) == extract_rd(slli) &&
|
||||
extract_rs1(slli) == extract_rd(lui1) &&
|
||||
extract_rd(slli) == extract_rd(lui1) &&
|
||||
extract_rs1(last_instr) == extract_rd(add);
|
||||
}
|
||||
|
||||
// the instruction sequence of li64 is as below:
|
||||
// lui
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
static bool check_li64_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addi1 = lui + instruction_size;
|
||||
address slli1 = addi1 + instruction_size;
|
||||
address addi2 = slli1 + instruction_size;
|
||||
address slli2 = addi2 + instruction_size;
|
||||
address addi3 = slli2 + instruction_size;
|
||||
address slli3 = addi3 + instruction_size;
|
||||
address addi4 = slli3 + instruction_size;
|
||||
return extract_rs1(addi1) == extract_rd(lui) &&
|
||||
extract_rs1(addi1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(slli2) &&
|
||||
extract_rs1(addi3) == extract_rd(slli2) &&
|
||||
extract_rs1(addi3) == extract_rd(addi3) &&
|
||||
extract_rs1(slli3) == extract_rd(addi3) &&
|
||||
extract_rs1(slli3) == extract_rd(slli3) &&
|
||||
extract_rs1(addi4) == extract_rd(slli3) &&
|
||||
extract_rs1(addi4) == extract_rd(addi4);
|
||||
}
|
||||
|
||||
// the instruction sequence of li16u is as below:
|
||||
// lui
|
||||
// srli
|
||||
static bool check_li16u_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address srli = lui + instruction_size;
|
||||
|
||||
return extract_rs1(srli) == extract_rd(lui) &&
|
||||
extract_rs1(srli) == extract_rd(srli);
|
||||
}
|
||||
|
||||
// the instruction sequence of li32 is as below:
|
||||
// lui
|
||||
// addiw
|
||||
static bool check_li32_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addiw = lui + instruction_size;
|
||||
|
||||
return extract_rs1(addiw) == extract_rd(lui) &&
|
||||
extract_rs1(addiw) == extract_rd(addiw);
|
||||
}
|
||||
|
||||
// the instruction sequence of pc-relative is as below:
|
||||
// auipc
|
||||
// jalr/addi/load/float_load
|
||||
static bool check_pc_relative_data_dependency(address instr) {
|
||||
address auipc = instr;
|
||||
address last_instr = auipc + instruction_size;
|
||||
|
||||
return extract_rs1(last_instr) == extract_rd(auipc);
|
||||
}
|
||||
|
||||
// the instruction sequence of load_label is as below:
|
||||
// auipc
|
||||
// load
|
||||
static bool check_load_pc_relative_data_dependency(address instr) {
|
||||
address auipc = instr;
|
||||
address load = auipc + instruction_size;
|
||||
|
||||
return extract_rd(load) == extract_rd(auipc) &&
|
||||
extract_rs1(load) == extract_rd(load);
|
||||
}
|
||||
|
||||
static bool is_li32_at(address instr);
|
||||
static bool is_li64_at(address instr);
|
||||
static bool is_pc_relative_at(address branch);
|
||||
|
||||
static bool is_membar(address addr) {
|
||||
return (Bytes::get_native_u4(addr) & 0x7f) == 0b1111 && extract_funct3(addr) == 0;
|
||||
}
|
||||
static uint32_t get_membar_kind(address addr);
|
||||
static void set_membar_kind(address addr, uint32_t order_kind);
|
||||
};
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
@@ -138,18 +138,18 @@
|
||||
}
|
||||
|
||||
// Does the CPU supports vector variable rotate instructions?
|
||||
static constexpr bool supports_vector_variable_rotates(void) {
|
||||
return false;
|
||||
static bool supports_vector_variable_rotates(void) {
|
||||
return UseZvbb;
|
||||
}
|
||||
|
||||
// Does the CPU supports vector constant rotate instructions?
|
||||
static constexpr bool supports_vector_constant_rotates(int shift) {
|
||||
return false;
|
||||
static bool supports_vector_constant_rotates(int shift) {
|
||||
return UseZvbb;
|
||||
}
|
||||
|
||||
// Does the CPU supports vector unsigned comparison instructions?
|
||||
static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
|
||||
return false;
|
||||
static bool supports_vector_comparison_unsigned(int vlen, BasicType bt) {
|
||||
return UseRVV;
|
||||
}
|
||||
|
||||
// Some microarchitectures have mask registers used on vectors
|
||||
|
||||
@@ -39,101 +39,20 @@
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
Register NativeInstruction::extract_rs1(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 19, 15));
|
||||
}
|
||||
|
||||
Register NativeInstruction::extract_rs2(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 24, 20));
|
||||
}
|
||||
|
||||
Register NativeInstruction::extract_rd(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return as_Register(Assembler::extract(Assembler::ld_instr(instr), 11, 7));
|
||||
}
|
||||
|
||||
uint32_t NativeInstruction::extract_opcode(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return Assembler::extract(Assembler::ld_instr(instr), 6, 0);
|
||||
}
|
||||
|
||||
uint32_t NativeInstruction::extract_funct3(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return Assembler::extract(Assembler::ld_instr(instr), 14, 12);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_pc_relative_at(address instr) {
|
||||
// auipc + jalr
|
||||
// auipc + addi
|
||||
// auipc + load
|
||||
// auipc + fload_load
|
||||
return (is_auipc_at(instr)) &&
|
||||
(is_addi_at(instr + instruction_size) ||
|
||||
is_jalr_at(instr + instruction_size) ||
|
||||
is_load_at(instr + instruction_size) ||
|
||||
is_float_load_at(instr + instruction_size)) &&
|
||||
check_pc_relative_data_dependency(instr);
|
||||
}
|
||||
|
||||
// ie:ld(Rd, Label)
|
||||
bool NativeInstruction::is_load_pc_relative_at(address instr) {
|
||||
return is_auipc_at(instr) && // auipc
|
||||
is_ld_at(instr + instruction_size) && // ld
|
||||
check_load_pc_relative_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_movptr_at(address instr) {
|
||||
return is_lui_at(instr) && // Lui
|
||||
is_addi_at(instr + instruction_size) && // Addi
|
||||
is_slli_shift_at(instr + instruction_size * 2, 11) && // Slli Rd, Rs, 11
|
||||
is_addi_at(instr + instruction_size * 3) && // Addi
|
||||
is_slli_shift_at(instr + instruction_size * 4, 6) && // Slli Rd, Rs, 6
|
||||
(is_addi_at(instr + instruction_size * 5) ||
|
||||
is_jalr_at(instr + instruction_size * 5) ||
|
||||
is_load_at(instr + instruction_size * 5)) && // Addi/Jalr/Load
|
||||
check_movptr_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li16u_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_srli_at(instr + instruction_size) && // srli
|
||||
check_li16u_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li32_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addiw_at(instr + instruction_size) && // addiw
|
||||
check_li32_data_dependency(instr);
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_li64_at(address instr) {
|
||||
return is_lui_at(instr) && // lui
|
||||
is_addi_at(instr + instruction_size) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 2, 12) && // Slli Rd, Rs, 12
|
||||
is_addi_at(instr + instruction_size * 3) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 4, 12) && // Slli Rd, Rs, 12
|
||||
is_addi_at(instr + instruction_size * 5) && // addi
|
||||
is_slli_shift_at(instr + instruction_size * 6, 8) && // Slli Rd, Rs, 8
|
||||
is_addi_at(instr + instruction_size * 7) && // addi
|
||||
check_li64_data_dependency(instr);
|
||||
}
|
||||
|
||||
void NativeCall::verify() {
|
||||
assert(NativeCall::is_call_at((address)this), "unexpected code at call site");
|
||||
assert(MacroAssembler::is_call_at((address)this), "unexpected code at call site");
|
||||
}
|
||||
|
||||
address NativeCall::destination() const {
|
||||
address addr = (address)this;
|
||||
assert(NativeInstruction::is_jal_at(instruction_address()), "inst must be jal.");
|
||||
assert(MacroAssembler::is_jal_at(instruction_address()), "inst must be jal.");
|
||||
address destination = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
|
||||
// Do we use a trampoline stub for this call?
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
assert(cb && cb->is_nmethod(), "sanity");
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
if (nm != nullptr && nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) {
|
||||
if (nm != nullptr && nm->stub_contains(destination) && MacroAssembler::is_trampoline_stub_at(destination)) {
|
||||
// Yes we do, so get the destination from the trampoline stub.
|
||||
const address trampoline_stub_addr = destination;
|
||||
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
|
||||
@@ -157,12 +76,12 @@ void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
|
||||
"concurrent code patching");
|
||||
|
||||
address addr_call = addr_at(0);
|
||||
assert(NativeCall::is_call_at(addr_call), "unexpected code at call site");
|
||||
assert(MacroAssembler::is_call_at(addr_call), "unexpected code at call site");
|
||||
|
||||
// Patch the constant in the call's trampoline stub.
|
||||
address trampoline_stub_addr = get_trampoline();
|
||||
if (trampoline_stub_addr != nullptr) {
|
||||
assert (!is_NativeCallTrampolineStub_at(dest), "chained trampolines");
|
||||
assert (!MacroAssembler::is_trampoline_stub_at(dest), "chained trampolines");
|
||||
nativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
|
||||
}
|
||||
|
||||
@@ -184,7 +103,7 @@ address NativeCall::get_trampoline() {
|
||||
assert(code != nullptr, "Could not find the containing code blob");
|
||||
|
||||
address jal_destination = MacroAssembler::pd_call_destination(call_addr);
|
||||
if (code != nullptr && code->contains(jal_destination) && is_NativeCallTrampolineStub_at(jal_destination)) {
|
||||
if (code != nullptr && code->contains(jal_destination) && MacroAssembler::is_trampoline_stub_at(jal_destination)) {
|
||||
return jal_destination;
|
||||
}
|
||||
|
||||
@@ -201,10 +120,11 @@ void NativeCall::insert(address code_pos, address entry) { Unimplemented(); }
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
void NativeMovConstReg::verify() {
|
||||
if (!(nativeInstruction_at(instruction_address())->is_movptr() ||
|
||||
is_auipc_at(instruction_address()))) {
|
||||
fatal("should be MOVPTR or AUIPC");
|
||||
NativeInstruction* ni = nativeInstruction_at(instruction_address());
|
||||
if (ni->is_movptr() || ni->is_auipc()) {
|
||||
return;
|
||||
}
|
||||
fatal("should be MOVPTR or AUIPC");
|
||||
}
|
||||
|
||||
intptr_t NativeMovConstReg::data() const {
|
||||
@@ -223,7 +143,7 @@ void NativeMovConstReg::set_data(intptr_t x) {
|
||||
} else {
|
||||
// Store x into the instruction stream.
|
||||
MacroAssembler::pd_patch_instruction_size(instruction_address(), (address)x);
|
||||
ICache::invalidate_range(instruction_address(), movptr_instruction_size);
|
||||
ICache::invalidate_range(instruction_address(), movptr1_instruction_size /* > movptr2_instruction_size */ );
|
||||
}
|
||||
|
||||
// Find and replace the oop/metadata corresponding to this
|
||||
@@ -329,14 +249,7 @@ address NativeGeneralJump::jump_destination() const {
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
bool NativeInstruction::is_safepoint_poll() {
|
||||
return is_lwu_to_zr(address(this));
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_lwu_to_zr(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0000011 &&
|
||||
extract_funct3(instr) == 0b110 &&
|
||||
extract_rd(instr) == zr); // zr
|
||||
return MacroAssembler::is_lwu_to_zr(address(this));
|
||||
}
|
||||
|
||||
// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
|
||||
@@ -393,13 +306,15 @@ void NativeJump::patch_verified_entry(address entry, address verified_entry, add
|
||||
ICache::invalidate_range(verified_entry, instruction_size);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
CodeBuffer cb(code_pos, instruction_size);
|
||||
MacroAssembler a(&cb);
|
||||
Assembler::IncompressibleRegion ir(&a); // Fixed length: see NativeGeneralJump::get_instruction_size()
|
||||
|
||||
int32_t offset = 0;
|
||||
a.movptr(t0, entry, offset); // lui, addi, slli, addi, slli
|
||||
a.movptr(t0, entry, offset, t1); // lui, lui, slli, add
|
||||
a.jr(t0, offset); // jalr
|
||||
|
||||
ICache::invalidate_range(code_pos, instruction_size);
|
||||
@@ -410,6 +325,8 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
address NativeCallTrampolineStub::destination(nmethod *nm) const {
|
||||
return ptr_at(data_offset);
|
||||
}
|
||||
@@ -419,30 +336,6 @@ void NativeCallTrampolineStub::set_destination(address new_destination) {
|
||||
OrderAccess::release();
|
||||
}
|
||||
|
||||
uint32_t NativeMembar::get_kind() {
|
||||
uint32_t insn = uint_at(0);
|
||||
|
||||
uint32_t predecessor = Assembler::extract(insn, 27, 24);
|
||||
uint32_t successor = Assembler::extract(insn, 23, 20);
|
||||
|
||||
return MacroAssembler::pred_succ_to_membar_mask(predecessor, successor);
|
||||
}
|
||||
|
||||
void NativeMembar::set_kind(uint32_t order_kind) {
|
||||
uint32_t predecessor = 0;
|
||||
uint32_t successor = 0;
|
||||
|
||||
MacroAssembler::membar_mask_to_pred_succ(order_kind, predecessor, successor);
|
||||
|
||||
uint32_t insn = uint_at(0);
|
||||
address pInsn = (address) &insn;
|
||||
Assembler::patch(pInsn, 27, 24, predecessor);
|
||||
Assembler::patch(pInsn, 23, 20, successor);
|
||||
|
||||
address membar = addr_at(0);
|
||||
Assembler::sd_instr(membar, insn);
|
||||
}
|
||||
|
||||
void NativePostCallNop::make_deopt() {
|
||||
MacroAssembler::assert_alignment(addr_at(0));
|
||||
NativeDeoptInstruction::insert(addr_at(0));
|
||||
@@ -465,7 +358,7 @@ bool NativePostCallNop::patch(int32_t oopmap_slot, int32_t cb_offset) {
|
||||
}
|
||||
int32_t data = (oopmap_slot << 24) | cb_offset;
|
||||
assert(data != 0, "must be");
|
||||
assert(is_lui_to_zr_at(addr_at(4)) && is_addiw_to_zr_at(addr_at(8)), "must be");
|
||||
assert(MacroAssembler::is_lui_to_zr_at(addr_at(4)) && MacroAssembler::is_addiw_to_zr_at(addr_at(8)), "must be");
|
||||
|
||||
MacroAssembler::patch_imm_in_li32(addr_at(4), data);
|
||||
return true; // successfully encoded
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
#ifndef CPU_RISCV_NATIVEINST_RISCV_HPP
|
||||
#define CPU_RISCV_NATIVEINST_RISCV_HPP
|
||||
|
||||
#include "macroAssembler_riscv.hpp"
|
||||
#include "asm/assembler.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/icache.hpp"
|
||||
@@ -52,172 +53,24 @@ class NativeCall;
|
||||
|
||||
class NativeInstruction {
|
||||
friend class Relocation;
|
||||
friend bool is_NativeCallTrampolineStub_at(address);
|
||||
public:
|
||||
enum {
|
||||
instruction_size = 4,
|
||||
compressed_instruction_size = 2,
|
||||
instruction_size = MacroAssembler::instruction_size,
|
||||
compressed_instruction_size = MacroAssembler::compressed_instruction_size,
|
||||
};
|
||||
|
||||
juint encoding() const {
|
||||
return uint_at(0);
|
||||
}
|
||||
|
||||
bool is_jal() const { return is_jal_at(addr_at(0)); }
|
||||
bool is_movptr() const { return is_movptr_at(addr_at(0)); }
|
||||
bool is_call() const { return is_call_at(addr_at(0)); }
|
||||
bool is_jump() const { return is_jump_at(addr_at(0)); }
|
||||
|
||||
static bool is_jal_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1101111; }
|
||||
static bool is_jalr_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100111 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_branch_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b1100011; }
|
||||
static bool is_ld_at(address instr) { assert_cond(instr != nullptr); return is_load_at(instr) && extract_funct3(instr) == 0b011; }
|
||||
static bool is_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000011; }
|
||||
static bool is_float_load_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0000111; }
|
||||
static bool is_auipc_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010111; }
|
||||
static bool is_jump_at(address instr) { assert_cond(instr != nullptr); return is_branch_at(instr) || is_jal_at(instr) || is_jalr_at(instr); }
|
||||
static bool is_addi_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0010011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0011011 && extract_funct3(instr) == 0b000; }
|
||||
static bool is_addiw_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_addiw_at(instr) && extract_rd(instr) == zr; }
|
||||
static bool is_lui_at(address instr) { assert_cond(instr != nullptr); return extract_opcode(instr) == 0b0110111; }
|
||||
static bool is_lui_to_zr_at(address instr) { assert_cond(instr != nullptr); return is_lui_at(instr) && extract_rd(instr) == zr; }
|
||||
|
||||
static bool is_srli_at(address instr) {
|
||||
assert_cond(instr != nullptr);
|
||||
return extract_opcode(instr) == 0b0010011 &&
|
||||
extract_funct3(instr) == 0b101 &&
|
||||
Assembler::extract(((unsigned*)instr)[0], 31, 26) == 0b000000;
|
||||
}
|
||||
|
||||
static bool is_slli_shift_at(address instr, uint32_t shift) {
|
||||
assert_cond(instr != nullptr);
|
||||
return (extract_opcode(instr) == 0b0010011 && // opcode field
|
||||
extract_funct3(instr) == 0b001 && // funct3 field, select the type of operation
|
||||
Assembler::extract(Assembler::ld_instr(instr), 25, 20) == shift); // shamt field
|
||||
}
|
||||
|
||||
static Register extract_rs1(address instr);
|
||||
static Register extract_rs2(address instr);
|
||||
static Register extract_rd(address instr);
|
||||
static uint32_t extract_opcode(address instr);
|
||||
static uint32_t extract_funct3(address instr);
|
||||
|
||||
// the instruction sequence of movptr is as below:
|
||||
// lui
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi/jalr/load
|
||||
static bool check_movptr_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addi1 = lui + instruction_size;
|
||||
address slli1 = addi1 + instruction_size;
|
||||
address addi2 = slli1 + instruction_size;
|
||||
address slli2 = addi2 + instruction_size;
|
||||
address last_instr = slli2 + instruction_size;
|
||||
return extract_rs1(addi1) == extract_rd(lui) &&
|
||||
extract_rs1(addi1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(slli2) &&
|
||||
extract_rs1(last_instr) == extract_rd(slli2);
|
||||
}
|
||||
|
||||
// the instruction sequence of li64 is as below:
|
||||
// lui
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
// slli
|
||||
// addi
|
||||
static bool check_li64_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addi1 = lui + instruction_size;
|
||||
address slli1 = addi1 + instruction_size;
|
||||
address addi2 = slli1 + instruction_size;
|
||||
address slli2 = addi2 + instruction_size;
|
||||
address addi3 = slli2 + instruction_size;
|
||||
address slli3 = addi3 + instruction_size;
|
||||
address addi4 = slli3 + instruction_size;
|
||||
return extract_rs1(addi1) == extract_rd(lui) &&
|
||||
extract_rs1(addi1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(addi1) &&
|
||||
extract_rs1(slli1) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(slli1) &&
|
||||
extract_rs1(addi2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(addi2) &&
|
||||
extract_rs1(slli2) == extract_rd(slli2) &&
|
||||
extract_rs1(addi3) == extract_rd(slli2) &&
|
||||
extract_rs1(addi3) == extract_rd(addi3) &&
|
||||
extract_rs1(slli3) == extract_rd(addi3) &&
|
||||
extract_rs1(slli3) == extract_rd(slli3) &&
|
||||
extract_rs1(addi4) == extract_rd(slli3) &&
|
||||
extract_rs1(addi4) == extract_rd(addi4);
|
||||
}
|
||||
|
||||
// the instruction sequence of li16u is as below:
|
||||
// lui
|
||||
// srli
|
||||
static bool check_li16u_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address srli = lui + instruction_size;
|
||||
|
||||
return extract_rs1(srli) == extract_rd(lui) &&
|
||||
extract_rs1(srli) == extract_rd(srli);
|
||||
}
|
||||
|
||||
// the instruction sequence of li32 is as below:
|
||||
// lui
|
||||
// addiw
|
||||
static bool check_li32_data_dependency(address instr) {
|
||||
address lui = instr;
|
||||
address addiw = lui + instruction_size;
|
||||
|
||||
return extract_rs1(addiw) == extract_rd(lui) &&
|
||||
extract_rs1(addiw) == extract_rd(addiw);
|
||||
}
|
||||
|
||||
// the instruction sequence of pc-relative is as below:
|
||||
// auipc
|
||||
// jalr/addi/load/float_load
|
||||
static bool check_pc_relative_data_dependency(address instr) {
|
||||
address auipc = instr;
|
||||
address last_instr = auipc + instruction_size;
|
||||
|
||||
return extract_rs1(last_instr) == extract_rd(auipc);
|
||||
}
|
||||
|
||||
// the instruction sequence of load_label is as below:
|
||||
// auipc
|
||||
// load
|
||||
static bool check_load_pc_relative_data_dependency(address instr) {
|
||||
address auipc = instr;
|
||||
address load = auipc + instruction_size;
|
||||
|
||||
return extract_rd(load) == extract_rd(auipc) &&
|
||||
extract_rs1(load) == extract_rd(load);
|
||||
}
|
||||
|
||||
static bool is_movptr_at(address instr);
|
||||
static bool is_li16u_at(address instr);
|
||||
static bool is_li32_at(address instr);
|
||||
static bool is_li64_at(address instr);
|
||||
static bool is_pc_relative_at(address branch);
|
||||
static bool is_load_pc_relative_at(address branch);
|
||||
|
||||
static bool is_call_at(address instr) {
|
||||
if (is_jal_at(instr) || is_jalr_at(instr)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
static bool is_lwu_to_zr(address instr);
|
||||
bool is_jal() const { return MacroAssembler::is_jal_at(addr_at(0)); }
|
||||
bool is_movptr() const { return MacroAssembler::is_movptr1_at(addr_at(0)) ||
|
||||
MacroAssembler::is_movptr2_at(addr_at(0)); }
|
||||
bool is_movptr1() const { return MacroAssembler::is_movptr1_at(addr_at(0)); }
|
||||
bool is_movptr2() const { return MacroAssembler::is_movptr2_at(addr_at(0)); }
|
||||
bool is_auipc() const { return MacroAssembler::is_auipc_at(addr_at(0)); }
|
||||
bool is_call() const { return MacroAssembler::is_call_at(addr_at(0)); }
|
||||
bool is_jump() const { return MacroAssembler::is_jump_at(addr_at(0)); }
|
||||
|
||||
inline bool is_nop() const;
|
||||
inline bool is_jump_or_nop();
|
||||
@@ -246,11 +99,7 @@ class NativeInstruction {
|
||||
inline friend NativeInstruction* nativeInstruction_at(address addr);
|
||||
|
||||
static bool maybe_cpool_ref(address instr) {
|
||||
return is_auipc_at(instr);
|
||||
}
|
||||
|
||||
bool is_membar() {
|
||||
return (uint_at(0) & 0x7f) == 0b1111 && extract_funct3(addr_at(0)) == 0;
|
||||
return MacroAssembler::is_auipc_at(instr);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -306,7 +155,7 @@ class NativeCall: public NativeInstruction {
|
||||
inline friend NativeCall* nativeCall_before(address return_address);
|
||||
|
||||
static bool is_call_before(address return_address) {
|
||||
return is_call_at(return_address - NativeCall::return_address_offset);
|
||||
return MacroAssembler::is_call_at(return_address - NativeCall::return_address_offset);
|
||||
}
|
||||
|
||||
// MT-safe patching of a call instruction.
|
||||
@@ -351,28 +200,35 @@ inline NativeCall* nativeCall_before(address return_address) {
|
||||
class NativeMovConstReg: public NativeInstruction {
|
||||
public:
|
||||
enum RISCV_specific_constants {
|
||||
movptr_instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, addi. See movptr().
|
||||
load_pc_relative_instruction_size = 2 * NativeInstruction::instruction_size, // auipc, ld
|
||||
instruction_offset = 0,
|
||||
displacement_offset = 0
|
||||
movptr1_instruction_size = MacroAssembler::movptr1_instruction_size, // lui, addi, slli, addi, slli, addi. See movptr1().
|
||||
movptr2_instruction_size = MacroAssembler::movptr2_instruction_size, // lui, lui, slli, add, addi. See movptr2().
|
||||
load_pc_relative_instruction_size = MacroAssembler::load_pc_relative_instruction_size // auipc, ld
|
||||
};
|
||||
|
||||
address instruction_address() const { return addr_at(instruction_offset); }
|
||||
address instruction_address() const { return addr_at(0); }
|
||||
address next_instruction_address() const {
|
||||
// if the instruction at 5 * instruction_size is addi,
|
||||
// it means a lui + addi + slli + addi + slli + addi instruction sequence,
|
||||
// and the next instruction address should be addr_at(6 * instruction_size).
|
||||
// However, when the instruction at 5 * instruction_size isn't addi,
|
||||
// the next instruction address should be addr_at(5 * instruction_size)
|
||||
if (nativeInstruction_at(instruction_address())->is_movptr()) {
|
||||
if (is_addi_at(addr_at(movptr_instruction_size - NativeInstruction::instruction_size))) {
|
||||
if (MacroAssembler::is_movptr1_at(instruction_address())) {
|
||||
if (MacroAssembler::is_addi_at(addr_at(movptr1_instruction_size - NativeInstruction::instruction_size))) {
|
||||
// Assume: lui, addi, slli, addi, slli, addi
|
||||
return addr_at(movptr_instruction_size);
|
||||
return addr_at(movptr1_instruction_size);
|
||||
} else {
|
||||
// Assume: lui, addi, slli, addi, slli
|
||||
return addr_at(movptr_instruction_size - NativeInstruction::instruction_size);
|
||||
return addr_at(movptr1_instruction_size - NativeInstruction::instruction_size);
|
||||
}
|
||||
} else if (is_load_pc_relative_at(instruction_address())) {
|
||||
} else if (MacroAssembler::is_movptr2_at(instruction_address())) {
|
||||
if (MacroAssembler::is_addi_at(addr_at(movptr2_instruction_size - NativeInstruction::instruction_size))) {
|
||||
// Assume: lui, lui, slli, add, addi
|
||||
return addr_at(movptr2_instruction_size);
|
||||
} else {
|
||||
// Assume: lui, lui, slli, add
|
||||
return addr_at(movptr2_instruction_size - NativeInstruction::instruction_size);
|
||||
}
|
||||
} else if (MacroAssembler::is_load_pc_relative_at(instruction_address())) {
|
||||
// Assume: auipc, ld
|
||||
return addr_at(load_pc_relative_instruction_size);
|
||||
}
|
||||
@@ -383,12 +239,6 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
intptr_t data() const;
|
||||
void set_data(intptr_t x);
|
||||
|
||||
void flush() {
|
||||
if (!maybe_cpool_ref(instruction_address())) {
|
||||
ICache::invalidate_range(instruction_address(), movptr_instruction_size);
|
||||
}
|
||||
}
|
||||
|
||||
void verify();
|
||||
void print();
|
||||
|
||||
@@ -399,14 +249,14 @@ class NativeMovConstReg: public NativeInstruction {
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_at(address addr) {
|
||||
assert_cond(addr != nullptr);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_offset);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr);
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
|
||||
inline NativeMovConstReg* nativeMovConstReg_before(address addr) {
|
||||
assert_cond(addr != nullptr);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
|
||||
NativeMovConstReg* test = (NativeMovConstReg*)(addr - NativeMovConstReg::instruction_size);
|
||||
DEBUG_ONLY(test->verify());
|
||||
return test;
|
||||
}
|
||||
@@ -484,10 +334,7 @@ inline NativeJump* nativeJump_at(address addr) {
|
||||
class NativeGeneralJump: public NativeJump {
|
||||
public:
|
||||
enum RISCV_specific_constants {
|
||||
instruction_size = 6 * NativeInstruction::instruction_size, // lui, addi, slli, addi, slli, jalr
|
||||
instruction_offset = 0,
|
||||
data_offset = 0,
|
||||
next_instruction_offset = 6 * NativeInstruction::instruction_size // lui, addi, slli, addi, slli, jalr
|
||||
instruction_size = 5 * NativeInstruction::instruction_size, // lui, lui, slli, add, jalr
|
||||
};
|
||||
|
||||
address jump_destination() const;
|
||||
@@ -524,8 +371,8 @@ class NativeCallTrampolineStub : public NativeInstruction {
|
||||
|
||||
enum RISCV_specific_constants {
|
||||
// Refer to function emit_trampoline_stub.
|
||||
instruction_size = 3 * NativeInstruction::instruction_size + wordSize, // auipc + ld + jr + target address
|
||||
data_offset = 3 * NativeInstruction::instruction_size, // auipc + ld + jr
|
||||
instruction_size = MacroAssembler::trampoline_stub_instruction_size, // auipc + ld + jr + target address
|
||||
data_offset = MacroAssembler::trampoline_stub_data_offset, // auipc + ld + jr
|
||||
};
|
||||
|
||||
address destination(nmethod *nm = nullptr) const;
|
||||
@@ -533,49 +380,12 @@ class NativeCallTrampolineStub : public NativeInstruction {
|
||||
ptrdiff_t destination_offset() const;
|
||||
};
|
||||
|
||||
inline bool is_NativeCallTrampolineStub_at(address addr) {
|
||||
// Ensure that the stub is exactly
|
||||
// ld t0, L--->auipc + ld
|
||||
// jr t0
|
||||
// L:
|
||||
|
||||
// judge inst + register + imm
|
||||
// 1). check the instructions: auipc + ld + jalr
|
||||
// 2). check if auipc[11:7] == t0 and ld[11:7] == t0 and ld[19:15] == t0 && jr[19:15] == t0
|
||||
// 3). check if the offset in ld[31:20] equals the data_offset
|
||||
assert_cond(addr != nullptr);
|
||||
const int instr_size = NativeInstruction::instruction_size;
|
||||
if (NativeInstruction::is_auipc_at(addr) &&
|
||||
NativeInstruction::is_ld_at(addr + instr_size) &&
|
||||
NativeInstruction::is_jalr_at(addr + 2 * instr_size) &&
|
||||
(NativeInstruction::extract_rd(addr) == x5) &&
|
||||
(NativeInstruction::extract_rd(addr + instr_size) == x5) &&
|
||||
(NativeInstruction::extract_rs1(addr + instr_size) == x5) &&
|
||||
(NativeInstruction::extract_rs1(addr + 2 * instr_size) == x5) &&
|
||||
(Assembler::extract(Assembler::ld_instr(addr + 4), 31, 20) == NativeCallTrampolineStub::data_offset)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
|
||||
assert_cond(addr != nullptr);
|
||||
assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
|
||||
assert(MacroAssembler::is_trampoline_stub_at(addr), "no call trampoline found");
|
||||
return (NativeCallTrampolineStub*)addr;
|
||||
}
|
||||
|
||||
class NativeMembar : public NativeInstruction {
|
||||
public:
|
||||
uint32_t get_kind();
|
||||
void set_kind(uint32_t order_kind);
|
||||
};
|
||||
|
||||
inline NativeMembar *NativeMembar_at(address addr) {
|
||||
assert_cond(addr != nullptr);
|
||||
assert(nativeInstruction_at(addr)->is_membar(), "no membar found");
|
||||
return (NativeMembar*)addr;
|
||||
}
|
||||
|
||||
// A NativePostCallNop takes the form of three instructions:
|
||||
// nop; lui zr, hi20; addiw zr, lo12
|
||||
//
|
||||
@@ -589,7 +399,7 @@ public:
|
||||
// These instructions only ever appear together in a post-call
|
||||
// NOP, so it's unnecessary to check that the third instruction is
|
||||
// an addiw as well.
|
||||
return is_nop() && is_lui_to_zr_at(addr_at(4));
|
||||
return is_nop() && MacroAssembler::is_lui_to_zr_at(addr_at(4));
|
||||
}
|
||||
bool decode(int32_t& oopmap_slot, int32_t& cb_offset) const;
|
||||
bool patch(int32_t oopmap_slot, int32_t cb_offset);
|
||||
|
||||
@@ -42,7 +42,7 @@ void Relocation::pd_set_data_value(address x, bool verify_only) {
|
||||
case relocInfo::oop_type: {
|
||||
oop_Relocation *reloc = (oop_Relocation *)this;
|
||||
// in movoop when BarrierSet::barrier_set()->barrier_set_nmethod() isn't null
|
||||
if (NativeInstruction::is_load_pc_relative_at(addr())) {
|
||||
if (MacroAssembler::is_load_pc_relative_at(addr())) {
|
||||
address constptr = (address)code()->oop_addr_at(reloc->oop_index());
|
||||
bytes = MacroAssembler::pd_patch_instruction_size(addr(), constptr);
|
||||
assert((address)Bytes::get_native_u8(constptr) == x, "error in oop relocation");
|
||||
@@ -60,7 +60,7 @@ void Relocation::pd_set_data_value(address x, bool verify_only) {
|
||||
|
||||
address Relocation::pd_call_destination(address orig_addr) {
|
||||
assert(is_call(), "should be an address instruction here");
|
||||
if (NativeCall::is_call_at(addr())) {
|
||||
if (MacroAssembler::is_call_at(addr())) {
|
||||
address trampoline = nativeCall_at(addr())->get_trampoline();
|
||||
if (trampoline != nullptr) {
|
||||
return nativeCallTrampolineStub_at(trampoline)->destination();
|
||||
@@ -81,7 +81,7 @@ address Relocation::pd_call_destination(address orig_addr) {
|
||||
|
||||
void Relocation::pd_set_call_destination(address x) {
|
||||
assert(is_call(), "should be an address instruction here");
|
||||
if (NativeCall::is_call_at(addr())) {
|
||||
if (MacroAssembler::is_call_at(addr())) {
|
||||
address trampoline = nativeCall_at(addr())->get_trampoline();
|
||||
if (trampoline != nullptr) {
|
||||
nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false);
|
||||
@@ -94,7 +94,7 @@ void Relocation::pd_set_call_destination(address x) {
|
||||
}
|
||||
|
||||
address* Relocation::pd_address_in_code() {
|
||||
assert(NativeCall::is_load_pc_relative_at(addr()), "Not the expected instruction sequence!");
|
||||
assert(MacroAssembler::is_load_pc_relative_at(addr()), "Not the expected instruction sequence!");
|
||||
return (address*)(MacroAssembler::target_addr_for_insn(addr()));
|
||||
}
|
||||
|
||||
|
||||
@@ -1244,7 +1244,7 @@ int MachCallStaticJavaNode::ret_addr_offset()
|
||||
|
||||
int MachCallDynamicJavaNode::ret_addr_offset()
|
||||
{
|
||||
return 7 * NativeInstruction::instruction_size; // movptr, jal
|
||||
return NativeMovConstReg::movptr2_instruction_size + NativeInstruction::instruction_size; // movptr2, jal
|
||||
}
|
||||
|
||||
int MachCallRuntimeNode::ret_addr_offset() {
|
||||
@@ -1285,12 +1285,11 @@ int CallStaticJavaDirectNode::compute_padding(int current_offset) const
|
||||
// ensure that it does not span a cache line so that it can be patched.
|
||||
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
|
||||
{
|
||||
// skip the movptr in MacroAssembler::ic_call():
|
||||
// lui + addi + slli + addi + slli + addi
|
||||
// Though movptr() has already 4-byte aligned with or without RVC,
|
||||
// skip the movptr2 in MacroAssembler::ic_call():
|
||||
// lui, lui, slli, add, addi
|
||||
// Though movptr2() has already 4-byte aligned with or without RVC,
|
||||
// We need to prevent from further changes by explicitly calculating the size.
|
||||
const int movptr_size = 6 * NativeInstruction::instruction_size;
|
||||
current_offset += movptr_size;
|
||||
current_offset += NativeMovConstReg::movptr2_instruction_size;
|
||||
// to make sure the address of jal 4-byte aligned.
|
||||
return align_up(current_offset, alignment_required()) - current_offset;
|
||||
}
|
||||
@@ -2163,7 +2162,19 @@ static bool is_vector_scalar_bitwise_pattern(Node* n, Node* m) {
|
||||
switch (n->Opcode()) {
|
||||
case Op_AndV:
|
||||
case Op_OrV:
|
||||
case Op_XorV: {
|
||||
case Op_XorV:
|
||||
case Op_AddVB:
|
||||
case Op_AddVS:
|
||||
case Op_AddVI:
|
||||
case Op_AddVL:
|
||||
case Op_SubVB:
|
||||
case Op_SubVS:
|
||||
case Op_SubVI:
|
||||
case Op_SubVL:
|
||||
case Op_MulVB:
|
||||
case Op_MulVS:
|
||||
case Op_MulVI:
|
||||
case Op_MulVL: {
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
@@ -10014,7 +10025,7 @@ instruct CallDynamicJavaDirect(method meth, rFlagsReg cr)
|
||||
|
||||
effect(USE meth, KILL cr);
|
||||
|
||||
ins_cost(BRANCH_COST + ALU_COST * 6);
|
||||
ins_cost(BRANCH_COST + ALU_COST * 5);
|
||||
|
||||
format %{ "CALL,dynamic $meth\t#@CallDynamicJavaDirect" %}
|
||||
|
||||
|
||||
@@ -75,9 +75,11 @@ source %{
|
||||
break;
|
||||
case Op_CountTrailingZerosV:
|
||||
case Op_CountLeadingZerosV:
|
||||
case Op_ReverseBytesV:
|
||||
case Op_PopCountVL:
|
||||
case Op_PopCountVI:
|
||||
case Op_ReverseBytesV:
|
||||
case Op_RotateLeftV:
|
||||
case Op_RotateRightV:
|
||||
return UseZvbb;
|
||||
case Op_LoadVectorGather:
|
||||
case Op_LoadVectorGatherMasked:
|
||||
@@ -387,6 +389,122 @@ instruct vadd_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-immediate add (unpredicated)
|
||||
|
||||
instruct vadd_immI(vReg dst, vReg src1, immI5 con) %{
|
||||
match(Set dst (AddVB src1 (Replicate con)));
|
||||
match(Set dst (AddVS src1 (Replicate con)));
|
||||
match(Set dst (AddVI src1 (Replicate con)));
|
||||
format %{ "vadd_immI $dst, $src1, $con" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vadd_vi(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
$con$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vadd_immL(vReg dst, vReg src1, immL5 con) %{
|
||||
match(Set dst (AddVL src1 (Replicate con)));
|
||||
format %{ "vadd_immL $dst, $src1, $con" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vadd_vi(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
$con$$constant);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-scalar add (unpredicated)
|
||||
|
||||
instruct vadd_regI(vReg dst, vReg src1, iRegIorL2I src2) %{
|
||||
match(Set dst (AddVB src1 (Replicate src2)));
|
||||
match(Set dst (AddVS src1 (Replicate src2)));
|
||||
match(Set dst (AddVI src1 (Replicate src2)));
|
||||
format %{ "vadd_regI $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vadd_vx(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
as_Register($src2$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vadd_regL(vReg dst, vReg src1, iRegL src2) %{
|
||||
match(Set dst (AddVL src1 (Replicate src2)));
|
||||
format %{ "vadd_regL $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vadd_vx(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
as_Register($src2$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-immediate add (predicated)
|
||||
|
||||
instruct vadd_immI_masked(vReg dst_src, immI5 con, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (AddVB (Binary dst_src (Replicate con)) v0));
|
||||
match(Set dst_src (AddVS (Binary dst_src (Replicate con)) v0));
|
||||
match(Set dst_src (AddVI (Binary dst_src (Replicate con)) v0));
|
||||
format %{ "vadd_immI_masked $dst_src, $dst_src, $con" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vadd_vi(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
$con$$constant, Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vadd_immL_masked(vReg dst_src, immL5 con, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (AddVL (Binary dst_src (Replicate con)) v0));
|
||||
format %{ "vadd_immL_masked $dst_src, $dst_src, $con" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vadd_vi(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
$con$$constant, Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-scalar add (predicated)
|
||||
|
||||
instruct vadd_regI_masked(vReg dst_src, iRegIorL2I src2, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (AddVB (Binary dst_src (Replicate src2)) v0));
|
||||
match(Set dst_src (AddVS (Binary dst_src (Replicate src2)) v0));
|
||||
match(Set dst_src (AddVI (Binary dst_src (Replicate src2)) v0));
|
||||
format %{ "vadd_regI_masked $dst_src, $dst_src, $src2" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vadd_vx(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
as_Register($src2$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vadd_regL_masked(vReg dst_src, iRegL src2, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (AddVL (Binary dst_src (Replicate src2)) v0));
|
||||
format %{ "vadd_regL_masked $dst_src, $dst_src, $src2" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vadd_vx(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
as_Register($src2$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector sub
|
||||
|
||||
instruct vsub(vReg dst, vReg src1, vReg src2) %{
|
||||
@@ -451,6 +569,64 @@ instruct vsub_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-scalar sub (unpredicated)
|
||||
|
||||
instruct vsub_regI(vReg dst, vReg src1, iRegIorL2I src2) %{
|
||||
match(Set dst (SubVB src1 (Replicate src2)));
|
||||
match(Set dst (SubVS src1 (Replicate src2)));
|
||||
match(Set dst (SubVI src1 (Replicate src2)));
|
||||
format %{ "vsub_regI $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vsub_vx(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
as_Register($src2$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vsub_regL(vReg dst, vReg src1, iRegL src2) %{
|
||||
match(Set dst (SubVL src1 (Replicate src2)));
|
||||
format %{ "vsub_regL $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vsub_vx(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
as_Register($src2$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-scalar sub (predicated)
|
||||
|
||||
instruct vsub_regI_masked(vReg dst_src, iRegIorL2I src2, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (SubVB (Binary dst_src (Replicate src2)) v0));
|
||||
match(Set dst_src (SubVS (Binary dst_src (Replicate src2)) v0));
|
||||
match(Set dst_src (SubVI (Binary dst_src (Replicate src2)) v0));
|
||||
format %{ "vsub_regI_masked $dst_src, $dst_src, $src2" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vsub_vx(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
as_Register($src2$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vsub_regL_masked(vReg dst_src, iRegL src2, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (SubVL (Binary dst_src (Replicate src2)) v0));
|
||||
format %{ "vsub_regL_masked $dst_src, $dst_src, $src2" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vsub_vx(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
as_Register($src2$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector and
|
||||
|
||||
instruct vand(vReg dst, vReg src1, vReg src2) %{
|
||||
@@ -1467,6 +1643,64 @@ instruct vmul_fp_masked(vReg dst_src1, vReg src2, vRegMask_V0 v0) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-scalar mul (unpredicated)
|
||||
|
||||
instruct vmul_regI(vReg dst, vReg src1, iRegIorL2I src2) %{
|
||||
match(Set dst (MulVB src1 (Replicate src2)));
|
||||
match(Set dst (MulVS src1 (Replicate src2)));
|
||||
match(Set dst (MulVI src1 (Replicate src2)));
|
||||
format %{ "vmul_regI $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vmul_vx(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
as_Register($src2$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vmul_regL(vReg dst, vReg src1, iRegL src2) %{
|
||||
match(Set dst (MulVL src1 (Replicate src2)));
|
||||
format %{ "vmul_regL $dst, $src1, $src2" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vmul_vx(as_VectorRegister($dst$$reg),
|
||||
as_VectorRegister($src1$$reg),
|
||||
as_Register($src2$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector-scalar mul (predicated)
|
||||
|
||||
instruct vmul_regI_masked(vReg dst_src, iRegIorL2I src2, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (MulVB (Binary dst_src (Replicate src2)) v0));
|
||||
match(Set dst_src (MulVS (Binary dst_src (Replicate src2)) v0));
|
||||
match(Set dst_src (MulVI (Binary dst_src (Replicate src2)) v0));
|
||||
format %{ "vmul_regI_masked $dst_src, $dst_src, $src2" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vmul_vx(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
as_Register($src2$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vmul_regL_masked(vReg dst_src, iRegL src2, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (MulVL (Binary dst_src (Replicate src2)) v0));
|
||||
format %{ "vmul_regL_masked $dst_src, $dst_src, $src2" %}
|
||||
ins_encode %{
|
||||
__ vsetvli_helper(T_LONG, Matcher::vector_length(this));
|
||||
__ vmul_vx(as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($dst_src$$reg),
|
||||
as_Register($src2$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector neg
|
||||
|
||||
instruct vneg(vReg dst, vReg src) %{
|
||||
@@ -3057,6 +3291,200 @@ instruct vshiftcnt(vReg dst, iRegIorL2I cnt) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// --------------------------------- Vector Rotation ----------------------------------
|
||||
//
|
||||
// Following rotate instruct's are shared by vectorization (in SLP, superword.cpp) and Vector API.
|
||||
//
|
||||
// Rotate behaviour in vectorization is defined by java API, which includes:
|
||||
// 1. Integer.rorateRight, Integer.rorateLeft.
|
||||
// `rotation by any multiple of 32 is a no-op, so all but the last five bits of the rotation distance can be ignored`.
|
||||
// 2. Long.rorateRight, Long.rorateLeft.
|
||||
// `rotation by any multiple of 64 is a no-op, so all but the last six bits of the rotation distance can be ignored`.
|
||||
//
|
||||
// Rotate behaviour in Vector API is defined as below, e.g.
|
||||
// 1. For Byte ROR, `a ROR b` is: (byte)(((((byte)a) & 0xFF) >>> (b & 7)) | ((((byte)a) & 0xFF) << (8 - (b & 7))))
|
||||
// 2. For Short ROR, `a ROR b` is: (short)(((((short)a) & 0xFFFF) >>> (b & 15)) | ((((short)a) & 0xFFFF) << (16 - (b & 15))))
|
||||
// 3. For Integer ROR, `a ROR b` is: Integer.rotateRight(a, ((int)b))
|
||||
// 4. For Long ROR, `a ROR b` is: Long.rotateRight(a, ((int)b))
|
||||
//
|
||||
// Basically, the behaviour between vectorization and Vector API is the same for Long and Integer, except that Vector API
|
||||
// also supports Byte and Short rotation. But we can still share the intrinsics between vectorization and Vector API.
|
||||
//
|
||||
// NOTE: As vror.vi encodes 6-bits immediate rotate amount, which is different from other vector-immediate instructions,
|
||||
// implementation of vector rotation for long and other types can be unified.
|
||||
|
||||
// Rotate right
|
||||
|
||||
instruct vrotate_right(vReg dst, vReg src, vReg shift) %{
|
||||
match(Set dst (RotateRightV src shift));
|
||||
format %{ "vrotate_right $dst, $src, $shift\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vror_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg),
|
||||
as_VectorRegister($shift$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_right_reg(vReg dst, vReg src, iRegIorL2I shift) %{
|
||||
match(Set dst (RotateRightV src (Replicate shift)));
|
||||
format %{ "vrotate_right_reg $dst, $src, $shift\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vror_vx(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg),
|
||||
as_Register($shift$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_right_imm(vReg dst, vReg src, immI shift) %{
|
||||
match(Set dst (RotateRightV src shift));
|
||||
format %{ "vrotate_right_imm $dst, $src, $shift\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint32_t bits = type2aelembytes(bt) * 8;
|
||||
uint32_t con = (unsigned)$shift$$constant & (bits - 1);
|
||||
if (con == 0) {
|
||||
return;
|
||||
}
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vror_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Rotate right - masked
|
||||
|
||||
instruct vrotate_right_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (RotateRightV (Binary dst_src shift) v0));
|
||||
format %{ "vrotate_right_masked $dst_src, $dst_src, $shift, v0.t\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vror_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($shift$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_right_reg_masked(vReg dst_src, iRegIorL2I shift, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (RotateRightV (Binary dst_src (Replicate shift)) v0));
|
||||
format %{ "vrotate_right_reg_masked $dst_src, $dst_src, $shift, v0.t\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vror_vx(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg),
|
||||
as_Register($shift$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_right_imm_masked(vReg dst_src, immI shift, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (RotateRightV (Binary dst_src shift) v0));
|
||||
format %{ "vrotate_right_imm_masked $dst_src, $dst_src, $shift, v0.t\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint32_t bits = type2aelembytes(bt) * 8;
|
||||
uint32_t con = (unsigned)$shift$$constant & (bits - 1);
|
||||
if (con == 0) {
|
||||
return;
|
||||
}
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vror_vi(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg),
|
||||
con, Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Rotate left
|
||||
|
||||
instruct vrotate_left(vReg dst, vReg src, vReg shift) %{
|
||||
match(Set dst (RotateLeftV src shift));
|
||||
format %{ "vrotate_left $dst, $src, $shift\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vrol_vv(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg),
|
||||
as_VectorRegister($shift$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_left_reg(vReg dst, vReg src, iRegIorL2I shift) %{
|
||||
match(Set dst (RotateLeftV src (Replicate shift)));
|
||||
format %{ "vrotate_left_reg $dst, $src, $shift\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vrol_vx(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg),
|
||||
as_Register($shift$$reg));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_left_imm(vReg dst, vReg src, immI shift) %{
|
||||
match(Set dst (RotateLeftV src shift));
|
||||
format %{ "vrotate_left_imm $dst, $src, $shift\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint32_t bits = type2aelembytes(bt) * 8;
|
||||
uint32_t con = (unsigned)$shift$$constant & (bits - 1);
|
||||
if (con == 0) {
|
||||
return;
|
||||
}
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
con = bits - con;
|
||||
__ vror_vi(as_VectorRegister($dst$$reg), as_VectorRegister($src$$reg), con);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Rotate left - masked
|
||||
|
||||
instruct vrotate_left_masked(vReg dst_src, vReg shift, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (RotateLeftV (Binary dst_src shift) v0));
|
||||
format %{ "vrotate_left_masked $dst_src, $dst_src, $shift, v0.t\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vrol_vv(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg),
|
||||
as_VectorRegister($shift$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_left_reg_masked(vReg dst_src, iRegIorL2I shift, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (RotateLeftV (Binary dst_src (Replicate shift)) v0));
|
||||
format %{ "vrotate_left_reg_masked $dst_src, $dst_src, $shift, v0.t\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
__ vrol_vx(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg),
|
||||
as_Register($shift$$reg), Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vrotate_left_imm_masked(vReg dst_src, immI shift, vRegMask_V0 v0) %{
|
||||
match(Set dst_src (RotateLeftV (Binary dst_src shift) v0));
|
||||
format %{ "vrotate_left_imm_masked $dst_src, $dst_src, $shift, v0.t\t" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint32_t bits = type2aelembytes(bt) * 8;
|
||||
uint32_t con = (unsigned)$shift$$constant & (bits - 1);
|
||||
if (con == 0) {
|
||||
return;
|
||||
}
|
||||
__ vsetvli_helper(bt, Matcher::vector_length(this));
|
||||
con = bits - con;
|
||||
__ vror_vi(as_VectorRegister($dst_src$$reg), as_VectorRegister($dst_src$$reg),
|
||||
con, Assembler::v0_t);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// vector sqrt
|
||||
|
||||
instruct vsqrt_fp(vReg dst, vReg src) %{
|
||||
|
||||
@@ -2840,7 +2840,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
* c_rarg2 - y address
|
||||
* c_rarg3 - y length
|
||||
* c_rarg4 - z address
|
||||
* c_rarg5 - z length
|
||||
*/
|
||||
address generate_multiplyToLen()
|
||||
{
|
||||
@@ -2853,8 +2852,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register y = x12;
|
||||
const Register ylen = x13;
|
||||
const Register z = x14;
|
||||
const Register zlen = x15;
|
||||
|
||||
const Register tmp0 = x15;
|
||||
const Register tmp1 = x16;
|
||||
const Register tmp2 = x17;
|
||||
const Register tmp3 = x7;
|
||||
@@ -2865,7 +2864,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ ret();
|
||||
|
||||
@@ -2881,10 +2880,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register x = x10;
|
||||
const Register xlen = x11;
|
||||
const Register z = x12;
|
||||
const Register zlen = x13;
|
||||
const Register y = x14; // == x
|
||||
const Register ylen = x15; // == xlen
|
||||
|
||||
const Register tmp0 = x13; // zlen, unused
|
||||
const Register tmp1 = x16;
|
||||
const Register tmp2 = x17;
|
||||
const Register tmp3 = x7;
|
||||
@@ -2897,7 +2896,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ enter();
|
||||
__ mv(y, x);
|
||||
__ mv(ylen, xlen);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ multiply_to_len(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
|
||||
__ leave();
|
||||
__ ret();
|
||||
|
||||
|
||||
@@ -223,7 +223,7 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
|
||||
|
||||
__ block_comment("{ on_entry");
|
||||
__ la(c_rarg0, Address(sp, frame_data_offset));
|
||||
__ movptr(c_rarg1, (intptr_t) receiver);
|
||||
__ movptr(c_rarg1, (address) receiver);
|
||||
__ rt_call(CAST_FROM_FN_PTR(address, UpcallLinker::on_entry));
|
||||
__ mv(xthread, x10);
|
||||
__ reinit_heapbase();
|
||||
|
||||
@@ -164,7 +164,7 @@ void C1_MacroAssembler::try_allocate(
|
||||
Register obj, // result: Pointer to object after successful allocation.
|
||||
Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise.
|
||||
int con_size_in_bytes, // Object size in bytes if known at compile time.
|
||||
Register t1, // Temp register: Must be global register for incr_allocated_bytes.
|
||||
Register t1, // Temp register.
|
||||
Label& slow_case // Continuation point if fast allocation fails.
|
||||
) {
|
||||
if (UseTLAB) {
|
||||
|
||||
@@ -282,7 +282,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
|
||||
__ z_lgr(Rtmp1, Rstore_addr);
|
||||
__ z_xgr(Rtmp1, Rnew_val);
|
||||
}
|
||||
__ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
|
||||
__ z_srag(Rtmp1, Rtmp1, G1HeapRegion::LogOfHRGrainBytes);
|
||||
__ z_bre(filtered);
|
||||
|
||||
// Crosses regions, storing null?
|
||||
|
||||
@@ -5281,9 +5281,6 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen,
|
||||
|
||||
z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
|
||||
|
||||
// In openJdk, we store the argument as 32-bit value to slot.
|
||||
Address zlen(Z_SP, _z_abi(remaining_cargs)); // Int in long on big endian.
|
||||
|
||||
const Register idx = tmp1;
|
||||
const Register kdx = tmp2;
|
||||
const Register xstart = tmp3;
|
||||
@@ -5308,7 +5305,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen,
|
||||
//
|
||||
|
||||
lgr_if_needed(idx, ylen); // idx = ylen
|
||||
z_llgf(kdx, zlen); // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
|
||||
z_agrk(kdx, xlen, ylen); // kdx = xlen + ylen
|
||||
clear_reg(carry); // carry = 0
|
||||
|
||||
Label L_done;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -2981,7 +2981,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Z_ARG3 - y address
|
||||
// Z_ARG4 - y length
|
||||
// Z_ARG5 - z address
|
||||
// 160[Z_SP] - z length
|
||||
address generate_multiplyToLen() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
|
||||
@@ -2993,8 +2992,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register y = Z_ARG3;
|
||||
const Register ylen = Z_ARG4;
|
||||
const Register z = Z_ARG5;
|
||||
// zlen is passed on the stack:
|
||||
// Address zlen(Z_SP, _z_abi(remaining_cargs));
|
||||
|
||||
// Next registers will be saved on stack in multiply_to_len().
|
||||
const Register tmp1 = Z_tmp_1;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -308,17 +308,29 @@ class Address {
|
||||
|
||||
private:
|
||||
bool base_needs_rex() const {
|
||||
return _base->is_valid() && _base->encoding() >= 8;
|
||||
return _base->is_valid() && ((_base->encoding() & 8) == 8);
|
||||
}
|
||||
|
||||
bool base_needs_rex2() const {
|
||||
return _base->is_valid() && _base->encoding() >= 16;
|
||||
}
|
||||
|
||||
bool index_needs_rex() const {
|
||||
return _index->is_valid() &&_index->encoding() >= 8;
|
||||
return _index->is_valid() && ((_index->encoding() & 8) == 8);
|
||||
}
|
||||
|
||||
bool index_needs_rex2() const {
|
||||
return _index->is_valid() &&_index->encoding() >= 16;
|
||||
}
|
||||
|
||||
bool xmmindex_needs_rex() const {
|
||||
return _xmmindex->is_valid() && ((_xmmindex->encoding() & 8) == 8);
|
||||
}
|
||||
|
||||
bool xmmindex_needs_rex2() const {
|
||||
return _xmmindex->is_valid() && _xmmindex->encoding() >= 16;
|
||||
}
|
||||
|
||||
relocInfo::relocType reloc() const { return _rspec.type(); }
|
||||
|
||||
friend class Assembler;
|
||||
@@ -508,12 +520,26 @@ class Assembler : public AbstractAssembler {
|
||||
REX_WRX = 0x4E,
|
||||
REX_WRXB = 0x4F,
|
||||
|
||||
REX2 = 0xd5,
|
||||
WREX2 = REX2 << 8,
|
||||
|
||||
VEX_3bytes = 0xC4,
|
||||
VEX_2bytes = 0xC5,
|
||||
EVEX_4bytes = 0x62,
|
||||
Prefix_EMPTY = 0x0
|
||||
};
|
||||
|
||||
enum PrefixBits {
|
||||
REXBIT_B = 0x01,
|
||||
REXBIT_X = 0x02,
|
||||
REXBIT_R = 0x04,
|
||||
REXBIT_W = 0x08,
|
||||
REX2BIT_B4 = 0x10,
|
||||
REX2BIT_X4 = 0x20,
|
||||
REX2BIT_R4 = 0x40,
|
||||
REX2BIT_M0 = 0x80
|
||||
};
|
||||
|
||||
enum VexPrefix {
|
||||
VEX_B = 0x20,
|
||||
VEX_X = 0x40,
|
||||
@@ -525,10 +551,18 @@ class Assembler : public AbstractAssembler {
|
||||
EVEX_F = 0x04,
|
||||
EVEX_V = 0x08,
|
||||
EVEX_Rb = 0x10,
|
||||
EVEX_B = 0x20,
|
||||
EVEX_X = 0x40,
|
||||
EVEX_Z = 0x80
|
||||
};
|
||||
|
||||
enum ExtEvexPrefix {
|
||||
EEVEX_R = 0x10,
|
||||
EEVEX_B = 0x08,
|
||||
EEVEX_X = 0x04,
|
||||
EEVEX_V = 0x08
|
||||
};
|
||||
|
||||
enum EvexRoundPrefix {
|
||||
EVEX_RNE = 0x0,
|
||||
EVEX_RD = 0x1,
|
||||
@@ -540,7 +574,7 @@ class Assembler : public AbstractAssembler {
|
||||
VEX_SIMD_NONE = 0x0,
|
||||
VEX_SIMD_66 = 0x1,
|
||||
VEX_SIMD_F3 = 0x2,
|
||||
VEX_SIMD_F2 = 0x3
|
||||
VEX_SIMD_F2 = 0x3,
|
||||
};
|
||||
|
||||
enum VexOpcode {
|
||||
@@ -548,6 +582,7 @@ class Assembler : public AbstractAssembler {
|
||||
VEX_OPCODE_0F = 0x1,
|
||||
VEX_OPCODE_0F_38 = 0x2,
|
||||
VEX_OPCODE_0F_3A = 0x3,
|
||||
VEX_OPCODE_0F_3C = 0x4,
|
||||
VEX_OPCODE_MASK = 0x1F
|
||||
};
|
||||
|
||||
@@ -572,7 +607,8 @@ class Assembler : public AbstractAssembler {
|
||||
EVEX_OVM = 20,
|
||||
EVEX_M128 = 21,
|
||||
EVEX_DUP = 22,
|
||||
EVEX_ETUP = 23
|
||||
EVEX_NOSCALE = 23,
|
||||
EVEX_ETUP = 24
|
||||
};
|
||||
|
||||
enum EvexInputSizeInBits {
|
||||
@@ -686,33 +722,62 @@ private:
|
||||
InstructionAttr *_attributes;
|
||||
void set_attributes(InstructionAttr* attributes);
|
||||
|
||||
int get_base_prefix_bits(int enc);
|
||||
int get_index_prefix_bits(int enc);
|
||||
int get_base_prefix_bits(Register base);
|
||||
int get_index_prefix_bits(Register index);
|
||||
int get_reg_prefix_bits(int enc);
|
||||
|
||||
// 64bit prefixes
|
||||
void prefix(Register reg);
|
||||
void prefix(Register dst, Register src, Prefix p);
|
||||
void prefix_rex2(Register dst, Register src);
|
||||
void prefix(Register dst, Address adr, Prefix p);
|
||||
void prefix_rex2(Register dst, Address adr);
|
||||
|
||||
void prefix(Address adr);
|
||||
void prefix(Address adr, Register reg, bool byteinst = false);
|
||||
// The is_map1 bool indicates an x86 map1 instruction which, when
|
||||
// legacy encoded, uses a 0x0F opcode prefix. By specification, the
|
||||
// opcode prefix is omitted when using rex2 encoding in support
|
||||
// of APX extended GPRs.
|
||||
void prefix(Address adr, bool is_map1 = false);
|
||||
void prefix_rex2(Address adr, bool is_map1 = false);
|
||||
void prefix(Address adr, Register reg, bool byteinst = false, bool is_map1 = false);
|
||||
void prefix_rex2(Address adr, Register reg, bool byteinst = false, bool is_map1 = false);
|
||||
void prefix(Address adr, XMMRegister reg);
|
||||
void prefix_rex2(Address adr, XMMRegister reg);
|
||||
|
||||
int prefix_and_encode(int reg_enc, bool byteinst = false);
|
||||
int prefix_and_encode(int dst_enc, int src_enc) {
|
||||
return prefix_and_encode(dst_enc, false, src_enc, false);
|
||||
int prefix_and_encode(int reg_enc, bool byteinst = false, bool is_map1 = false);
|
||||
int prefix_and_encode_rex2(int reg_enc, bool is_map1 = false);
|
||||
int prefix_and_encode(int dst_enc, int src_enc, bool is_map1 = false) {
|
||||
return prefix_and_encode(dst_enc, false, src_enc, false, is_map1);
|
||||
}
|
||||
int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte);
|
||||
int prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte, bool is_map1 = false);
|
||||
|
||||
int prefix_and_encode_rex2(int dst_enc, int src_enc, int init_bits = 0);
|
||||
// Some prefixq variants always emit exactly one prefix byte, so besides a
|
||||
// prefix-emitting method we provide a method to get the prefix byte to emit,
|
||||
// which can then be folded into a byte stream.
|
||||
int8_t get_prefixq(Address adr);
|
||||
int8_t get_prefixq(Address adr, Register reg);
|
||||
int get_prefixq(Address adr, bool is_map1 = false);
|
||||
int get_prefixq_rex2(Address adr, bool is_map1 = false);
|
||||
int get_prefixq(Address adr, Register reg, bool is_map1 = false);
|
||||
int get_prefixq_rex2(Address adr, Register reg, bool ismap1 = false);
|
||||
|
||||
void prefixq(Address adr);
|
||||
void prefixq(Address adr, Register reg);
|
||||
void prefixq(Address adr, Register reg, bool is_map1 = false);
|
||||
void prefixq(Address adr, XMMRegister reg);
|
||||
void prefixq_rex2(Address adr, XMMRegister src);
|
||||
|
||||
int prefixq_and_encode(int reg_enc);
|
||||
int prefixq_and_encode(int dst_enc, int src_enc);
|
||||
bool prefix_is_rex2(int prefix);
|
||||
|
||||
int prefixq_and_encode(int reg_enc, bool is_map1 = false);
|
||||
int prefixq_and_encode_rex2(int reg_enc, bool is_map1 = false);
|
||||
int prefixq_and_encode(int dst_enc, int src_enc, bool is_map1 = false);
|
||||
int prefixq_and_encode_rex2(int dst_enc, int src_enc, bool is_map1 = false);
|
||||
|
||||
bool needs_rex2(Register reg1, Register reg2 = noreg, Register reg3 = noreg);
|
||||
|
||||
bool needs_eevex(Register reg1, Register reg2 = noreg, Register reg3 = noreg);
|
||||
bool needs_eevex(int enc1, int enc2 = -1, int enc3 = -1);
|
||||
|
||||
void rex_prefix(Address adr, XMMRegister xreg,
|
||||
VexSimdPrefix pre, VexOpcode opc, bool rex_w);
|
||||
@@ -721,22 +786,21 @@ private:
|
||||
|
||||
void vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
|
||||
|
||||
void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v,
|
||||
int nds_enc, VexSimdPrefix pre, VexOpcode opc);
|
||||
void evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_v, bool evex_r, bool evex_b,
|
||||
bool eevex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc);
|
||||
|
||||
void vex_prefix(Address adr, int nds_enc, int xreg_enc,
|
||||
VexSimdPrefix pre, VexOpcode opc,
|
||||
void vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc,
|
||||
InstructionAttr *attributes);
|
||||
|
||||
int vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc,
|
||||
VexSimdPrefix pre, VexOpcode opc,
|
||||
InstructionAttr *attributes);
|
||||
InstructionAttr *attributes, bool src_is_gpr = false);
|
||||
|
||||
void simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
|
||||
VexOpcode opc, InstructionAttr *attributes);
|
||||
|
||||
int simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
|
||||
VexOpcode opc, InstructionAttr *attributes);
|
||||
VexOpcode opc, InstructionAttr *attributes, bool src_is_gpr = false);
|
||||
|
||||
// Helper functions for groups of instructions
|
||||
void emit_arith_b(int op1, int op2, Register dst, int imm8);
|
||||
@@ -821,6 +885,10 @@ private:
|
||||
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
|
||||
void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
|
||||
|
||||
void emit_prefix_and_int8(int prefix, int b1);
|
||||
void emit_opcode_prefix_and_encoding(int byte1, int ocp_and_encoding);
|
||||
void emit_opcode_prefix_and_encoding(int byte1, int byte2, int ocp_and_encoding);
|
||||
void emit_opcode_prefix_and_encoding(int byte1, int byte2, int ocp_and_encoding, int byte3);
|
||||
bool always_reachable(AddressLiteral adr) NOT_LP64( { return true; } );
|
||||
bool reachable(AddressLiteral adr) NOT_LP64( { return true; } );
|
||||
|
||||
@@ -907,6 +975,8 @@ private:
|
||||
// Instruction prefixes
|
||||
void prefix(Prefix p);
|
||||
|
||||
void prefix16(int p);
|
||||
|
||||
public:
|
||||
|
||||
// Creation
|
||||
@@ -1730,7 +1800,7 @@ private:
|
||||
void negq(Address dst);
|
||||
#endif
|
||||
|
||||
void nop(int i = 1);
|
||||
void nop(uint i = 1);
|
||||
|
||||
void notl(Register dst);
|
||||
|
||||
|
||||
@@ -30,23 +30,53 @@
|
||||
#include "code/codeCache.hpp"
|
||||
|
||||
#ifndef _LP64
|
||||
inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
|
||||
inline int Assembler::prefixq_and_encode(int reg_enc) { return reg_enc; }
|
||||
inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst, bool is_map1)
|
||||
{
|
||||
int opc_prefix = is_map1 ? 0x0F00 : 0;
|
||||
return opc_prefix | reg_enc;
|
||||
}
|
||||
|
||||
inline int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) { return dst_enc << 3 | src_enc; }
|
||||
inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; }
|
||||
inline int Assembler::prefixq_and_encode(int reg_enc, bool is_map1) {
|
||||
int opc_prefix = is_map1 ? 0xF00 : 0;
|
||||
return opc_prefix | reg_enc;
|
||||
}
|
||||
|
||||
inline int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte, bool is_map1) {
|
||||
int opc_prefix = is_map1 ? 0xF00 : 0;
|
||||
return opc_prefix | (dst_enc << 3 | src_enc);
|
||||
}
|
||||
|
||||
inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc, bool is_map1) {
|
||||
int opc_prefix = is_map1 ? 0xF00 : 0;
|
||||
return opc_prefix | dst_enc << 3 | src_enc;
|
||||
}
|
||||
|
||||
inline void Assembler::prefix(Register reg) {}
|
||||
inline void Assembler::prefix(Register dst, Register src, Prefix p) {}
|
||||
inline void Assembler::prefix(Register dst, Address adr, Prefix p) {}
|
||||
inline void Assembler::prefix(Address adr) {}
|
||||
|
||||
inline void Assembler::prefix(Address adr, bool is_map1) {
|
||||
if (is_map1) {
|
||||
emit_int8(0x0F);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Assembler::prefixq(Address adr) {}
|
||||
|
||||
inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {}
|
||||
inline void Assembler::prefixq(Address adr, Register reg) {}
|
||||
inline void Assembler::prefix(Address adr, Register reg, bool byteinst, bool is_map1) {
|
||||
if (is_map1) {
|
||||
emit_int8(0x0F);
|
||||
}
|
||||
}
|
||||
inline void Assembler::prefixq(Address adr, Register reg, bool is_map1) {
|
||||
if (is_map1) {
|
||||
emit_int8(0x0F);
|
||||
}
|
||||
}
|
||||
|
||||
inline void Assembler::prefix(Address adr, XMMRegister reg) {}
|
||||
inline void Assembler::prefixq(Address adr, XMMRegister reg) {}
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
#endif // CPU_X86_ASSEMBLER_X86_INLINE_HPP
|
||||
|
||||
@@ -5631,7 +5631,6 @@ void C2_MacroAssembler::vector_mask_compress(KRegister dst, KRegister src, Regis
|
||||
kmov(dst, rtmp2);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void C2_MacroAssembler::vector_compress_expand_avx2(int opcode, XMMRegister dst, XMMRegister src,
|
||||
XMMRegister mask, Register rtmp, Register rscratch,
|
||||
XMMRegister permv, XMMRegister xtmp, BasicType bt,
|
||||
@@ -5665,7 +5664,6 @@ void C2_MacroAssembler::vector_compress_expand_avx2(int opcode, XMMRegister dst,
|
||||
// compressing/expanding the source vector lanes.
|
||||
vblendvps(dst, dst, xtmp, permv, vec_enc, false, permv);
|
||||
}
|
||||
#endif
|
||||
|
||||
void C2_MacroAssembler::vector_compress_expand(int opcode, XMMRegister dst, XMMRegister src, KRegister mask,
|
||||
bool merge, BasicType bt, int vec_enc) {
|
||||
|
||||
@@ -286,7 +286,7 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
|
||||
|
||||
__ movptr(tmp, store_addr);
|
||||
__ xorptr(tmp, new_val);
|
||||
__ shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
|
||||
__ shrptr(tmp, G1HeapRegion::LogOfHRGrainBytes);
|
||||
__ jcc(Assembler::equal, done);
|
||||
|
||||
// crosses regions, storing null?
|
||||
|
||||
@@ -352,36 +352,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
|
||||
__ verify_tlab();
|
||||
}
|
||||
|
||||
void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register thread,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1) {
|
||||
if (!thread->is_valid()) {
|
||||
#ifdef _LP64
|
||||
thread = r15_thread;
|
||||
#else
|
||||
assert(t1->is_valid(), "need temp reg");
|
||||
thread = t1;
|
||||
__ get_thread(thread);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
|
||||
} else {
|
||||
__ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
|
||||
}
|
||||
#else
|
||||
if (var_size_in_bytes->is_valid()) {
|
||||
__ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
|
||||
} else {
|
||||
__ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
|
||||
}
|
||||
__ adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation) {
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
|
||||
@@ -37,12 +37,6 @@ class Node;
|
||||
class InterpreterMacroAssembler;
|
||||
|
||||
class BarrierSetAssembler: public CHeapObj<mtGC> {
|
||||
private:
|
||||
void incr_allocated_bytes(MacroAssembler* masm, Register thread,
|
||||
Register var_size_in_bytes,
|
||||
int con_size_in_bytes,
|
||||
Register t1);
|
||||
|
||||
public:
|
||||
virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
|
||||
Register src, Register dst, Register count) {}
|
||||
|
||||
@@ -219,7 +219,6 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
assert(pre_val != rax, "check this code");
|
||||
}
|
||||
|
||||
Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
|
||||
Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
|
||||
Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -1213,6 +1213,7 @@ public:
|
||||
#define __ masm->
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, ZLoadBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
BLOCK_COMMENT("ZLoadBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
@@ -1232,6 +1233,7 @@ void ZBarrierSetAssembler::generate_c2_load_barrier_stub(MacroAssembler* masm, Z
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::generate_c2_store_barrier_stub(MacroAssembler* masm, ZStoreBarrierStubC2* stub) const {
|
||||
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
|
||||
BLOCK_COMMENT("ZStoreBarrierStubC2");
|
||||
|
||||
// Stub entry
|
||||
|
||||
@@ -232,8 +232,10 @@ define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
|
||||
\
|
||||
product(bool, IntelJccErratumMitigation, true, DIAGNOSTIC, \
|
||||
"Turn off JVM mitigations related to Intel micro code " \
|
||||
"mitigations for the Intel JCC erratum")
|
||||
|
||||
"mitigations for the Intel JCC erratum") \
|
||||
\
|
||||
product(bool, UseAPX, false, EXPERIMENTAL, \
|
||||
"Use Advanced Performance Extensions on x86") \
|
||||
// end of ARCH_FLAGS
|
||||
|
||||
#endif // CPU_X86_GLOBALS_X86_HPP
|
||||
|
||||
@@ -1150,20 +1150,20 @@ void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratc
|
||||
// Stub code is generated once and never copied.
|
||||
// NMethods can't use this because they get copied and we can't force alignment > 32 bytes.
|
||||
void MacroAssembler::align64() {
|
||||
align(64, (unsigned long long) pc());
|
||||
align(64, (uint)(uintptr_t)pc());
|
||||
}
|
||||
|
||||
void MacroAssembler::align32() {
|
||||
align(32, (unsigned long long) pc());
|
||||
align(32, (uint)(uintptr_t)pc());
|
||||
}
|
||||
|
||||
void MacroAssembler::align(int modulus) {
|
||||
void MacroAssembler::align(uint modulus) {
|
||||
// 8273459: Ensure alignment is possible with current segment alignment
|
||||
assert(modulus <= CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
|
||||
assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment");
|
||||
align(modulus, offset());
|
||||
}
|
||||
|
||||
void MacroAssembler::align(int modulus, int target) {
|
||||
void MacroAssembler::align(uint modulus, uint target) {
|
||||
if (target % modulus != 0) {
|
||||
nop(modulus - (target % modulus));
|
||||
}
|
||||
@@ -6983,7 +6983,7 @@ void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
|
||||
* rsi: y
|
||||
* rcx: ylen
|
||||
* r8: z
|
||||
* r11: zlen
|
||||
* r11: tmp0
|
||||
* r12: tmp1
|
||||
* r13: tmp2
|
||||
* r14: tmp3
|
||||
@@ -6991,11 +6991,12 @@ void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
|
||||
* rbx: tmp5
|
||||
*
|
||||
*/
|
||||
void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
|
||||
void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
|
||||
ShortBranchVerifier sbv(this);
|
||||
assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
|
||||
assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
|
||||
|
||||
push(tmp0);
|
||||
push(tmp1);
|
||||
push(tmp2);
|
||||
push(tmp3);
|
||||
@@ -7003,7 +7004,6 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
push(tmp5);
|
||||
|
||||
push(xlen);
|
||||
push(zlen);
|
||||
|
||||
const Register idx = tmp1;
|
||||
const Register kdx = tmp2;
|
||||
@@ -7012,7 +7012,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
const Register y_idx = tmp4;
|
||||
const Register carry = tmp5;
|
||||
const Register product = xlen;
|
||||
const Register x_xstart = zlen; // reuse register
|
||||
const Register x_xstart = tmp0;
|
||||
|
||||
// First Loop.
|
||||
//
|
||||
@@ -7028,9 +7028,9 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
// z[xstart] = (int)carry;
|
||||
//
|
||||
|
||||
movl(idx, ylen); // idx = ylen;
|
||||
movl(kdx, zlen); // kdx = xlen+ylen;
|
||||
xorq(carry, carry); // carry = 0;
|
||||
movl(idx, ylen); // idx = ylen;
|
||||
lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen;
|
||||
xorq(carry, carry); // carry = 0;
|
||||
|
||||
Label L_done;
|
||||
|
||||
@@ -7134,7 +7134,6 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
|
||||
bind(L_done);
|
||||
|
||||
pop(zlen);
|
||||
pop(xlen);
|
||||
|
||||
pop(tmp5);
|
||||
@@ -7142,6 +7141,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
pop(tmp3);
|
||||
pop(tmp2);
|
||||
pop(tmp1);
|
||||
pop(tmp0);
|
||||
}
|
||||
|
||||
void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
|
||||
|
||||
@@ -213,8 +213,8 @@ class MacroAssembler: public Assembler {
|
||||
// Alignment
|
||||
void align32();
|
||||
void align64();
|
||||
void align(int modulus);
|
||||
void align(int modulus, int target);
|
||||
void align(uint modulus);
|
||||
void align(uint modulus, uint target);
|
||||
|
||||
void post_call_nop();
|
||||
// A 5 byte nop that is safe for patching (see patch_verified_entry)
|
||||
@@ -2018,7 +2018,7 @@ public:
|
||||
Register yz_idx, Register idx, Register jdx,
|
||||
Register carry, Register product,
|
||||
Register carry2);
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
|
||||
void multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0,
|
||||
Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5);
|
||||
void square_rshift(Register x, Register len, Register z, Register tmp1, Register tmp3,
|
||||
Register tmp4, Register tmp5, Register rdxReg, Register raxReg);
|
||||
|
||||
@@ -391,27 +391,27 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
|
||||
template <>
|
||||
inline Register AbstractRegSet<Register>::first() {
|
||||
uint32_t first = _bitset & -_bitset;
|
||||
return first ? as_Register(exact_log2(first)) : noreg;
|
||||
if (_bitset == 0) { return noreg; }
|
||||
return as_Register(count_trailing_zeros(_bitset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline Register AbstractRegSet<Register>::last() {
|
||||
if (_bitset == 0) { return noreg; }
|
||||
uint32_t last = 31 - count_leading_zeros(_bitset);
|
||||
int last = max_size() - 1 - count_leading_zeros(_bitset);
|
||||
return as_Register(last);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline XMMRegister AbstractRegSet<XMMRegister>::first() {
|
||||
uint32_t first = _bitset & -_bitset;
|
||||
return first ? as_XMMRegister(exact_log2(first)) : xnoreg;
|
||||
if (_bitset == 0) { return xnoreg; }
|
||||
return as_XMMRegister(count_trailing_zeros(_bitset));
|
||||
}
|
||||
|
||||
template <>
|
||||
inline XMMRegister AbstractRegSet<XMMRegister>::last() {
|
||||
if (_bitset == 0) { return xnoreg; }
|
||||
uint32_t last = 31 - count_leading_zeros(_bitset);
|
||||
int last = max_size() - 1 - count_leading_zeros(_bitset);
|
||||
return as_XMMRegister(last);
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user