mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-08 10:29:40 +01:00
Compare commits
308 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e40fb6bda | ||
|
|
2528c620a6 | ||
|
|
1be29bd725 | ||
|
|
2f683fdc4a | ||
|
|
c75df634be | ||
|
|
fd13e1ce98 | ||
|
|
6c9236c80c | ||
|
|
1926aeb1a3 | ||
|
|
74822ce12a | ||
|
|
ea86a20e6d | ||
|
|
5e30bf6835 | ||
|
|
c50370599e | ||
|
|
c460f842bf | ||
|
|
549b875866 | ||
|
|
832bfbc0dd | ||
|
|
3066a67e62 | ||
|
|
ce9986991d | ||
|
|
eac8f5d2c9 | ||
|
|
2304044ab2 | ||
|
|
0f1cd987b3 | ||
|
|
1ac7489874 | ||
|
|
055d2ffa69 | ||
|
|
2bff8e0a13 | ||
|
|
c6448dc3af | ||
|
|
1703915d3f | ||
|
|
a910b20b51 | ||
|
|
7d7e60c8ae | ||
|
|
534d2b33dc | ||
|
|
e9a62d79cd | ||
|
|
13a3927855 | ||
|
|
282ee40a56 | ||
|
|
e7a450038a | ||
|
|
38f59f84c9 | ||
|
|
e1681c4828 | ||
|
|
e138297323 | ||
|
|
7583a7b857 | ||
|
|
e85c7d09df | ||
|
|
eec1153993 | ||
|
|
fc739fee53 | ||
|
|
aeca49e43f | ||
|
|
54c95cf226 | ||
|
|
cd6caedd0a | ||
|
|
b32ccf2cb2 | ||
|
|
0572b6ece7 | ||
|
|
d1052c70cb | ||
|
|
61a590e9be | ||
|
|
9d518b3213 | ||
|
|
1dda79cfab | ||
|
|
aa1911191c | ||
|
|
00adbbe553 | ||
|
|
c2d76f9844 | ||
|
|
4dd1b3a610 | ||
|
|
240541e1c1 | ||
|
|
a23de2ec09 | ||
|
|
3525a40f39 | ||
|
|
712d866b72 | ||
|
|
da7080fffb | ||
|
|
4edf791aec | ||
|
|
12196baf67 | ||
|
|
a471fe992f | ||
|
|
839cede1a4 | ||
|
|
ecd2d83096 | ||
|
|
d8f9b188fa | ||
|
|
aa26cede63 | ||
|
|
20e983a97c | ||
|
|
97ec9d3e0a | ||
|
|
01b15bc1f9 | ||
|
|
6d05a1d3f4 | ||
|
|
8ea544c33f | ||
|
|
334683e634 | ||
|
|
20e0055e20 | ||
|
|
83fe688d80 | ||
|
|
a49ecb26c5 | ||
|
|
7f702cf483 | ||
|
|
bd666f90eb | ||
|
|
a0c3efa6a8 | ||
|
|
7576064a10 | ||
|
|
8df6b2c4a3 | ||
|
|
ff24088c86 | ||
|
|
5a1301df19 | ||
|
|
5039b42de1 | ||
|
|
1ca008fd02 | ||
|
|
cf75f1f9c6 | ||
|
|
52526080ba | ||
|
|
f799cf180a | ||
|
|
56c75453cd | ||
|
|
7447276475 | ||
|
|
c90c31b07e | ||
|
|
52c6044fe4 | ||
|
|
4e1bf31368 | ||
|
|
878497fb85 | ||
|
|
f2ef809719 | ||
|
|
1fa090524a | ||
|
|
f71d64fbeb | ||
|
|
850bc20306 | ||
|
|
75ce44aa84 | ||
|
|
5c4f92ba9a | ||
|
|
263e32bb85 | ||
|
|
7d6c902ce8 | ||
|
|
5091057614 | ||
|
|
ba0c12231b | ||
|
|
cbcf401170 | ||
|
|
f8de5bc582 | ||
|
|
9a726df373 | ||
|
|
2af869b193 | ||
|
|
5c1f77fab1 | ||
|
|
54acadbe66 | ||
|
|
0514cee6c8 | ||
|
|
28e96e333b | ||
|
|
f5201ac117 | ||
|
|
a3609ba5ac | ||
|
|
116b8543b0 | ||
|
|
fdfc557878 | ||
|
|
54fec2b98b | ||
|
|
03d66d9ee2 | ||
|
|
f6ff38ab42 | ||
|
|
dbbfa76b73 | ||
|
|
9af36b13c5 | ||
|
|
34412da52b | ||
|
|
a350a1115a | ||
|
|
bcad87eacb | ||
|
|
2a70a6dc58 | ||
|
|
6df0f5e390 | ||
|
|
9c3eaa49f7 | ||
|
|
dfcea0547e | ||
|
|
c748d358b2 | ||
|
|
72679c94ee | ||
|
|
10e6eec9e6 | ||
|
|
251f2ac785 | ||
|
|
fe7ec31259 | ||
|
|
516197f50b | ||
|
|
ad1033d68f | ||
|
|
c220b1358c | ||
|
|
9ae39b62b9 | ||
|
|
3d35b408e1 | ||
|
|
de34bb8e66 | ||
|
|
620df7ec34 | ||
|
|
6b4393917a | ||
|
|
81985d422d | ||
|
|
d627282f0c | ||
|
|
96f71a9a6b | ||
|
|
17cf49746d | ||
|
|
c6ab63d306 | ||
|
|
ff54a6493a | ||
|
|
c11f36e620 | ||
|
|
8f121a173c | ||
|
|
33970629ac | ||
|
|
c51bed739d | ||
|
|
a6464b74a8 | ||
|
|
0dd50dbb3e | ||
|
|
dec42bebb8 | ||
|
|
2fe1298447 | ||
|
|
2e3bdec985 | ||
|
|
c4fb00a7be | ||
|
|
01d4b772de | ||
|
|
c7125aa2af | ||
|
|
0dce98b716 | ||
|
|
08b1fa4cb3 | ||
|
|
23e1e2ff4a | ||
|
|
5a62e99523 | ||
|
|
982064e50c | ||
|
|
b52af182c4 | ||
|
|
7bc0d82450 | ||
|
|
b7fcd0b235 | ||
|
|
984d7f9cdf | ||
|
|
42d3604a31 | ||
|
|
cf78925859 | ||
|
|
ba32b78bfa | ||
|
|
547ce03016 | ||
|
|
f07f5ce984 | ||
|
|
cabd7c1f7a | ||
|
|
57266064a7 | ||
|
|
2b94b70ef5 | ||
|
|
1130c1bc33 | ||
|
|
2f63d3aee5 | ||
|
|
382f870cd5 | ||
|
|
8c760e78b9 | ||
|
|
afa52e4681 | ||
|
|
164cae469c | ||
|
|
49a82d8806 | ||
|
|
96070212ad | ||
|
|
53a83d15a1 | ||
|
|
21b72dea78 | ||
|
|
51877f568b | ||
|
|
c1deb9eebf | ||
|
|
f62f1178aa | ||
|
|
a08208283b | ||
|
|
f7cd3fad24 | ||
|
|
ff75f763c0 | ||
|
|
a16d23557b | ||
|
|
e55ddabffa | ||
|
|
9a1c1f2efb | ||
|
|
e57a214e2a | ||
|
|
2f2acb2e3f | ||
|
|
06d804a0f0 | ||
|
|
6e390ef17c | ||
|
|
9652ae9a8d | ||
|
|
59460ff700 | ||
|
|
9d060574e5 | ||
|
|
fedd0a0ee3 | ||
|
|
79497ef7f5 | ||
|
|
8416ca3104 | ||
|
|
d8c3533a91 | ||
|
|
eacfcd86d3 | ||
|
|
534a8605e5 | ||
|
|
6fe9143bbb | ||
|
|
1a01839f8c | ||
|
|
26848a7d6c | ||
|
|
0e725c6fb1 | ||
|
|
b3f56086c9 | ||
|
|
ee35f6384f | ||
|
|
12a0dd03b8 | ||
|
|
366650a438 | ||
|
|
78b1360e7d | ||
|
|
417f8ecf07 | ||
|
|
57cabc6d74 | ||
|
|
b4c4496ef8 | ||
|
|
b5334fe237 | ||
|
|
e8ef93ae9d | ||
|
|
25b22c9b55 | ||
|
|
ead4529c92 | ||
|
|
3a1887269b | ||
|
|
e7f63ba310 | ||
|
|
a0fb35c837 | ||
|
|
032ead1d90 | ||
|
|
a8b4284848 | ||
|
|
ed39e17e34 | ||
|
|
6749c62b9e | ||
|
|
9aeacf2de5 | ||
|
|
991097b7bf | ||
|
|
523a4efe1c | ||
|
|
0dd7c69b9e | ||
|
|
66535fe26d | ||
|
|
db7af2b3c3 | ||
|
|
99829950f6 | ||
|
|
0ef0986731 | ||
|
|
610a18e7b3 | ||
|
|
8d33ea7395 | ||
|
|
3c53057fa6 | ||
|
|
1fcede053c | ||
|
|
fae9c7a3f0 | ||
|
|
dd68829017 | ||
|
|
b85fe02be5 | ||
|
|
e18277b470 | ||
|
|
e5ce5c57c8 | ||
|
|
91fdd72c97 | ||
|
|
b6ec93b038 | ||
|
|
65e63b6ab4 | ||
|
|
3f0fef2c9c | ||
|
|
3e0ef832cc | ||
|
|
7b7136b4ec | ||
|
|
5886ef728f | ||
|
|
d7aa349820 | ||
|
|
3b32f6a8ec | ||
|
|
8f73357004 | ||
|
|
429158218b | ||
|
|
ef4cbec6fb | ||
|
|
e9216efefc | ||
|
|
e5196fc24d | ||
|
|
c98dffa186 | ||
|
|
7d7fc69355 | ||
|
|
42ab8fcfb9 | ||
|
|
bf7d40d048 | ||
|
|
5ae32c4c86 | ||
|
|
56ce70c5df | ||
|
|
abc76c6b5b | ||
|
|
9586817cea | ||
|
|
38b877e941 | ||
|
|
8f487d26c0 | ||
|
|
500a3a2d0a | ||
|
|
a2f99fd88b | ||
|
|
0582bd290d | ||
|
|
3ff83ec49e | ||
|
|
7c9c8ba363 | ||
|
|
ca7b885873 | ||
|
|
92be7821f5 | ||
|
|
bcf860703d | ||
|
|
d186dacdb7 | ||
|
|
ef45c8154c | ||
|
|
cd9b1bc820 | ||
|
|
fcb68ea22d | ||
|
|
eb256deb80 | ||
|
|
156187accc | ||
|
|
a377773fa7 | ||
|
|
cae1fd3385 | ||
|
|
eb8ee8bdc7 | ||
|
|
2103dc15cb | ||
|
|
1c72b350e4 | ||
|
|
52338c94f6 | ||
|
|
91f12600d2 | ||
|
|
6c616c71ec | ||
|
|
e94ad551c6 | ||
|
|
d735255919 | ||
|
|
d024f58e61 | ||
|
|
026975a1aa | ||
|
|
8adb052b46 | ||
|
|
9658cecde3 | ||
|
|
b2e7cda6a0 | ||
|
|
65fda5c02a | ||
|
|
d1b788005b | ||
|
|
bb2611ad43 | ||
|
|
e918a59b1d | ||
|
|
28acca609b | ||
|
|
029e3bf8f5 | ||
|
|
78158f30ae | ||
|
|
c793de989f | ||
|
|
15178aa298 | ||
|
|
fe3be498b8 |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -310,7 +310,7 @@ jobs:
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
with:
|
||||
platform: windows-x64
|
||||
msvc-toolset-version: '14.43'
|
||||
msvc-toolset-version: '14.44'
|
||||
msvc-toolset-architecture: 'x86.x64'
|
||||
configure-arguments: ${{ github.event.inputs.configure-arguments }}
|
||||
make-arguments: ${{ github.event.inputs.make-arguments }}
|
||||
@@ -322,7 +322,7 @@ jobs:
|
||||
uses: ./.github/workflows/build-windows.yml
|
||||
with:
|
||||
platform: windows-aarch64
|
||||
msvc-toolset-version: '14.43'
|
||||
msvc-toolset-version: '14.44'
|
||||
msvc-toolset-architecture: 'arm64'
|
||||
make-target: 'hotspot'
|
||||
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'
|
||||
|
||||
@@ -77,6 +77,9 @@ SFINAE</a></li>
|
||||
<li><a href="#thread_local" id="toc-thread_local">thread_local</a></li>
|
||||
<li><a href="#nullptr" id="toc-nullptr">nullptr</a></li>
|
||||
<li><a href="#atomic" id="toc-atomic"><atomic></a></li>
|
||||
<li><a href="#initializing-variables-with-static-storage-duration"
|
||||
id="toc-initializing-variables-with-static-storage-duration">Initializing
|
||||
variables with static storage duration</a></li>
|
||||
<li><a href="#uniform-initialization"
|
||||
id="toc-uniform-initialization">Uniform Initialization</a></li>
|
||||
<li><a href="#local-function-objects"
|
||||
@@ -84,6 +87,7 @@ id="toc-local-function-objects">Local Function Objects</a></li>
|
||||
<li><a href="#inheriting-constructors"
|
||||
id="toc-inheriting-constructors">Inheriting constructors</a></li>
|
||||
<li><a href="#attributes" id="toc-attributes">Attributes</a></li>
|
||||
<li><a href="#noexcept" id="toc-noexcept">noexcept</a></li>
|
||||
<li><a href="#additional-permitted-features"
|
||||
id="toc-additional-permitted-features">Additional Permitted
|
||||
Features</a></li>
|
||||
@@ -791,6 +795,33 @@ differ from what the Java compilers implement.</p>
|
||||
"conservative" memory ordering, which may differ from (may be stronger
|
||||
than) sequentially consistent. There are algorithms in HotSpot that are
|
||||
believed to rely on that ordering.</p>
|
||||
<h3
|
||||
id="initializing-variables-with-static-storage-duration">Initializing
|
||||
variables with static storage duration</h3>
|
||||
<p>Variables with static storage duration and <em>dynamic
|
||||
initialization</em> <a
|
||||
href="https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4296.pdf">C++14
|
||||
3.6.2</a>). should be avoided, unless an implementation is permitted to
|
||||
perform the initialization as a static initialization. The order in
|
||||
which dynamic initializations occur is incompletely specified.
|
||||
Initialization order problems can be difficult to deal with and lead to
|
||||
surprises.</p>
|
||||
<p>Variables with static storage duration and non-trivial destructors
|
||||
should be avoided. HotSpot doesn't generally try to cleanup on exit, and
|
||||
running destructors at exit can lead to problems.</p>
|
||||
<p>Some of the approaches used in HotSpot to avoid dynamic
|
||||
initialization include:</p>
|
||||
<ul>
|
||||
<li><p>Use the <code>Deferred<T></code> class template. Add a call
|
||||
to its initialization function at an appropriate place during VM
|
||||
initialization. The underlying object is never destroyed.</p></li>
|
||||
<li><p>For objects of class type, use a variable whose value is a
|
||||
pointer to the class, initialized to <code>nullptr</code>. Provide an
|
||||
initialization function that sets the variable to a dynamically
|
||||
allocated object. Add a call to that function at an appropriate place
|
||||
during VM initialization. Such objects are usually never
|
||||
destroyed.</p></li>
|
||||
</ul>
|
||||
<h3 id="uniform-initialization">Uniform Initialization</h3>
|
||||
<p>The use of <em>uniform initialization</em> (<a
|
||||
href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2672.htm">n2672</a>),
|
||||
@@ -1110,6 +1141,58 @@ function name and the parameter list.</li>
|
||||
<code>memory_order_consume</code>.</li>
|
||||
<li><code>[[deprecated]]</code> - Not relevant in HotSpot code.</li>
|
||||
</ul>
|
||||
<h3 id="noexcept">noexcept</h3>
|
||||
<p>Use of <code>noexcept</code> exception specifications (<a
|
||||
href="http://wg21.link/n3050">n3050</a>) are permitted with restrictions
|
||||
described below.</p>
|
||||
<ul>
|
||||
<li>Only the argument-less form of <code>noexcept</code> exception
|
||||
specifications are permitted.</li>
|
||||
<li>Allocation functions that may return <code>nullptr</code> to
|
||||
indicate allocation failure must be declared <code>noexcept</code>.</li>
|
||||
<li>All other uses of <code>noexcept</code> exception specifications are
|
||||
forbidden.</li>
|
||||
<li><code>noexcept</code> expressions are forbidden.</li>
|
||||
<li>Dynamic exception specifications are forbidden.</li>
|
||||
</ul>
|
||||
<p>HotSpot is built with exceptions disabled, e.g. compile with
|
||||
<code>-fno-exceptions</code> (gcc, clang) or no <code>/EH</code> option
|
||||
(MSVC++). So why do we need to consider <code>noexcept</code> at all?
|
||||
It's because <code>noexcept</code> exception specifications serve two
|
||||
distinct purposes.</p>
|
||||
<p>The first is to allow the compiler to avoid generating code or data
|
||||
in support of exceptions being thrown by a function. But this is
|
||||
unnecessary, because exceptions are disabled.</p>
|
||||
<p>The second is to allow the compiler and library code to choose
|
||||
different algorithms, depending on whether some function may throw
|
||||
exceptions. This is only relevant to a certain set of functions.</p>
|
||||
<ul>
|
||||
<li><p>Some allocation functions (<code>operator new</code> and
|
||||
<code>operator new[]</code>) return <code>nullptr</code> to indicate
|
||||
allocation failure. If a <code>new</code> expression calls such an
|
||||
allocation function, it must check for and handle that possibility.
|
||||
Declaring such a function <code>noexcept</code> informs the compiler
|
||||
that <code>nullptr</code> is a possible result. If an allocation
|
||||
function is not declared <code>noexcept</code> then the compiler may
|
||||
elide that checking and handling for a <code>new</code> expression
|
||||
calling that function.</p></li>
|
||||
<li><p>Certain Standard Library facilities (notably containers) provide
|
||||
different guarantees for some operations (and may choose different
|
||||
algorithms to implement those operations), depending on whether certain
|
||||
functions (constructors, copy/move operations, swap) are nothrow or not.
|
||||
They detect this using type traits that test whether a function is
|
||||
declared <code>noexcept</code>. This can have a significant performance
|
||||
impact if, for example, copying is chosen over a potentially throwing
|
||||
move. But this isn't relevant, since HotSpot forbids the use of most
|
||||
Standard Library facilities.</p></li>
|
||||
</ul>
|
||||
<p>HotSpot code can assume no exceptions will ever be thrown, even from
|
||||
functions not declared <code>noexcept</code>. So HotSpot code doesn't
|
||||
ever need to check, either with conditional exception specifications or
|
||||
with <code>noexcept</code> expressions.</p>
|
||||
<p>Dynamic exception specifications were deprecated in C++11. C++17
|
||||
removed all but <code>throw()</code>, with that remaining a deprecated
|
||||
equivalent to <code>noexcept</code>.</p>
|
||||
<h3 id="additional-permitted-features">Additional Permitted
|
||||
Features</h3>
|
||||
<ul>
|
||||
@@ -1198,12 +1281,6 @@ Library names.</p></li>
|
||||
href="http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html">n2179</a>)
|
||||
— HotSpot does not permit the use of exceptions, so this feature isn't
|
||||
useful.</p></li>
|
||||
<li><p>Avoid non-local variables with non-constexpr initialization. In
|
||||
particular, avoid variables with types requiring non-trivial
|
||||
initialization or destruction. Initialization order problems can be
|
||||
difficult to deal with and lead to surprises, as can destruction
|
||||
ordering. HotSpot doesn't generally try to cleanup on exit, and running
|
||||
destructors at exit can also lead to problems.</p></li>
|
||||
<li><p>Avoid most operator overloading, preferring named functions. When
|
||||
operator overloading is used, ensure the semantics conform to the normal
|
||||
expected behavior of the operation.</p></li>
|
||||
|
||||
@@ -770,6 +770,32 @@ ordering, which may differ from (may be stronger than) sequentially
|
||||
consistent. There are algorithms in HotSpot that are believed to rely
|
||||
on that ordering.
|
||||
|
||||
### Initializing variables with static storage duration
|
||||
|
||||
Variables with static storage duration and _dynamic initialization_
|
||||
[C++14 3.6.2](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2014/n4296.pdf)).
|
||||
should be avoided, unless an implementation is permitted to perform the
|
||||
initialization as a static initialization. The order in which dynamic
|
||||
initializations occur is incompletely specified. Initialization order
|
||||
problems can be difficult to deal with and lead to surprises.
|
||||
|
||||
Variables with static storage duration and non-trivial destructors should be
|
||||
avoided. HotSpot doesn't generally try to cleanup on exit, and running
|
||||
destructors at exit can lead to problems.
|
||||
|
||||
Some of the approaches used in HotSpot to avoid dynamic initialization
|
||||
include:
|
||||
|
||||
* Use the `Deferred<T>` class template. Add a call to its initialization
|
||||
function at an appropriate place during VM initialization. The underlying
|
||||
object is never destroyed.
|
||||
|
||||
* For objects of class type, use a variable whose value is a pointer to the
|
||||
class, initialized to `nullptr`. Provide an initialization function that sets
|
||||
the variable to a dynamically allocated object. Add a call to that function at
|
||||
an appropriate place during VM initialization. Such objects are usually never
|
||||
destroyed.
|
||||
|
||||
### Uniform Initialization
|
||||
|
||||
The use of _uniform initialization_
|
||||
@@ -1104,6 +1130,57 @@ The following attributes are expressly forbidden:
|
||||
* `[[carries_dependency]]` - Related to `memory_order_consume`.
|
||||
* `[[deprecated]]` - Not relevant in HotSpot code.
|
||||
|
||||
### noexcept
|
||||
|
||||
Use of `noexcept` exception specifications
|
||||
([n3050](http://wg21.link/n3050))
|
||||
are permitted with restrictions described below.
|
||||
|
||||
* Only the argument-less form of `noexcept` exception specifications are
|
||||
permitted.
|
||||
* Allocation functions that may return `nullptr` to indicate allocation
|
||||
failure must be declared `noexcept`.
|
||||
* All other uses of `noexcept` exception specifications are forbidden.
|
||||
* `noexcept` expressions are forbidden.
|
||||
* Dynamic exception specifications are forbidden.
|
||||
|
||||
HotSpot is built with exceptions disabled, e.g. compile with `-fno-exceptions`
|
||||
(gcc, clang) or no `/EH` option (MSVC++). So why do we need to consider
|
||||
`noexcept` at all? It's because `noexcept` exception specifications serve two
|
||||
distinct purposes.
|
||||
|
||||
The first is to allow the compiler to avoid generating code or data in support
|
||||
of exceptions being thrown by a function. But this is unnecessary, because
|
||||
exceptions are disabled.
|
||||
|
||||
The second is to allow the compiler and library code to choose different
|
||||
algorithms, depending on whether some function may throw exceptions. This is
|
||||
only relevant to a certain set of functions.
|
||||
|
||||
* Some allocation functions (`operator new` and `operator new[]`) return
|
||||
`nullptr` to indicate allocation failure. If a `new` expression calls such an
|
||||
allocation function, it must check for and handle that possibility. Declaring
|
||||
such a function `noexcept` informs the compiler that `nullptr` is a possible
|
||||
result. If an allocation function is not declared `noexcept` then the compiler
|
||||
may elide that checking and handling for a `new` expression calling that
|
||||
function.
|
||||
|
||||
* Certain Standard Library facilities (notably containers) provide different
|
||||
guarantees for some operations (and may choose different algorithms to
|
||||
implement those operations), depending on whether certain functions
|
||||
(constructors, copy/move operations, swap) are nothrow or not. They detect
|
||||
this using type traits that test whether a function is declared `noexcept`.
|
||||
This can have a significant performance impact if, for example, copying is
|
||||
chosen over a potentially throwing move. But this isn't relevant, since
|
||||
HotSpot forbids the use of most Standard Library facilities.
|
||||
|
||||
HotSpot code can assume no exceptions will ever be thrown, even from functions
|
||||
not declared `noexcept`. So HotSpot code doesn't ever need to check, either
|
||||
with conditional exception specifications or with `noexcept` expressions.
|
||||
|
||||
Dynamic exception specifications were deprecated in C++11. C++17 removed all
|
||||
but `throw()`, with that remaining a deprecated equivalent to `noexcept`.
|
||||
|
||||
### Additional Permitted Features
|
||||
|
||||
* `alignof`
|
||||
@@ -1199,13 +1276,6 @@ namespace std;` to avoid needing to qualify Standard Library names.
|
||||
([n2179](http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2179.html)) —
|
||||
HotSpot does not permit the use of exceptions, so this feature isn't useful.
|
||||
|
||||
* Avoid non-local variables with non-constexpr initialization.
|
||||
In particular, avoid variables with types requiring non-trivial
|
||||
initialization or destruction. Initialization order problems can be
|
||||
difficult to deal with and lead to surprises, as can destruction
|
||||
ordering. HotSpot doesn't generally try to cleanup on exit, and
|
||||
running destructors at exit can also lead to problems.
|
||||
|
||||
* Avoid most operator overloading, preferring named functions. When
|
||||
operator overloading is used, ensure the semantics conform to the
|
||||
normal expected behavior of the operation.
|
||||
|
||||
4
make/autoconf/configure
vendored
4
make/autoconf/configure
vendored
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -366,7 +366,7 @@ EOT
|
||||
|
||||
# Print additional help, e.g. a list of toolchains and JVM features.
|
||||
# This must be done by the autoconf script.
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf )
|
||||
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
|
||||
|
||||
cat <<EOT
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
################################################################################
|
||||
|
||||
# Minimum supported versions
|
||||
JTREG_MINIMUM_VERSION=7.5.1
|
||||
JTREG_MINIMUM_VERSION=7.5.2
|
||||
GTEST_MINIMUM_VERSION=1.14.0
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
# Versions and download locations for dependencies used by GitHub Actions (GHA)
|
||||
|
||||
GTEST_VERSION=1.14.0
|
||||
JTREG_VERSION=7.5.1+1
|
||||
JTREG_VERSION=7.5.2+1
|
||||
|
||||
LINUX_X64_BOOT_JDK_EXT=tar.gz
|
||||
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz
|
||||
|
||||
@@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
jtreg: {
|
||||
server: "jpg",
|
||||
product: "jtreg",
|
||||
version: "7.5.1",
|
||||
version: "7.5.2",
|
||||
build_number: "1",
|
||||
file: "bundles/jtreg-7.5.1+1.zip",
|
||||
file: "bundles/jtreg-7.5.2+1.zip",
|
||||
environment_name: "JT_HOME",
|
||||
environment_path: input.get("jtreg", "home_path") + "/bin",
|
||||
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
|
||||
@@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
server: "jpg",
|
||||
product: "jcov",
|
||||
version: "3.0",
|
||||
build_number: "1",
|
||||
file: "bundles/jcov-3.0+1.zip",
|
||||
build_number: "3",
|
||||
file: "bundles/jcov-3.0+3.zip",
|
||||
environment_name: "JCOV_HOME",
|
||||
},
|
||||
|
||||
|
||||
@@ -67,6 +67,8 @@ ATTRIBUTE_DEFAULT_VISIBILITY ATTRIBUTE_USED const char* CDECL __asan_default_opt
|
||||
#endif
|
||||
"print_suppressions=0,"
|
||||
"handle_segv=0,"
|
||||
// A lot of libjsig related tests fail because of the link order check; so better avoid it
|
||||
"verify_asan_link_order=0,"
|
||||
// See https://github.com/google/sanitizers/issues/1322. Hopefully this is resolved
|
||||
// at some point and we can remove this option.
|
||||
"intercept_tls_get_addr=0";
|
||||
|
||||
@@ -62,5 +62,8 @@
|
||||
// thread so it is easier to track down. You can override these options by setting the environment
|
||||
// variable UBSAN_OPTIONS.
|
||||
ATTRIBUTE_DEFAULT_VISIBILITY ATTRIBUTE_USED const char* __ubsan_default_options() {
|
||||
return "halt_on_error=1,print_stacktrace=1" _LLVM_SYMBOLIZER(LLVM_SYMBOLIZER);
|
||||
return "halt_on_error=1,"
|
||||
"handle_segv=0,"
|
||||
"handle_sigbus=0,"
|
||||
"print_stacktrace=1" _LLVM_SYMBOLIZER(LLVM_SYMBOLIZER);
|
||||
}
|
||||
|
||||
@@ -64,6 +64,30 @@ IS_WSL=`echo $UNAME_RELEASE | grep Microsoft`
|
||||
IS_MSYS=`echo $UNAME_OS | grep -i Msys`
|
||||
MSYS2_ARG_CONV_EXCL="*" # make "cmd.exe /c" work for msys2
|
||||
CMD_EXE="cmd.exe /c"
|
||||
|
||||
# Detect host architecture to determine devkit platform support
|
||||
# Note: The devkit always includes x86, x64, and aarch64 libraries and tools
|
||||
# The difference is in toolchain capabilities:
|
||||
# - On x64|AMD64 hosts: aarch64 tools are cross-compilation tools (Hostx64/arm64)
|
||||
# - On aarch64|ARMv8 hosts: aarch64 tools are native tools (Hostarm64/arm64)
|
||||
HOST_ARCH=`echo $PROCESSOR_IDENTIFIER`
|
||||
case $HOST_ARCH in
|
||||
AMD64)
|
||||
echo "Running on x64 host - generating devkit with native x86/x64 tools and cross-compiled aarch64 tools."
|
||||
echo "For native aarch64 compilation tools, run this script on a Windows/aarch64 machine."
|
||||
SUPPORTED_PLATFORMS="x86, x64 (native) and aarch64 (cross-compiled)"
|
||||
;;
|
||||
ARMv8)
|
||||
echo "Running on aarch64 host - generating devkit with native tools for all platforms (x86, x64, aarch64)."
|
||||
SUPPORTED_PLATFORMS="x86, x64, and aarch64 (all native)"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown host architecture: $HOST_ARCH"
|
||||
echo "Proceeding with devkit generation - toolchain capabilities may vary."
|
||||
SUPPORTED_PLATFORMS="x86, x64, and aarch64"
|
||||
;;
|
||||
esac
|
||||
|
||||
if test "x$IS_CYGWIN" != "x"; then
|
||||
BUILD_ENV="cygwin"
|
||||
elif test "x$IS_MSYS" != "x"; then
|
||||
@@ -139,6 +163,7 @@ DEVKIT_ROOT="${BUILD_DIR}/VS${VS_VERSION}-${VS_VERSION_SP}-devkit"
|
||||
DEVKIT_BUNDLE="${DEVKIT_ROOT}.tar.gz"
|
||||
|
||||
echo "Creating devkit in $DEVKIT_ROOT"
|
||||
echo "Platform support: $SUPPORTED_PLATFORMS"
|
||||
|
||||
MSVCR_DLL=${MSVC_CRT_DIR}/vcruntime${VS_DLL_VERSION}.dll
|
||||
VCRUNTIME_1_DLL=${MSVC_CRT_DIR}/vcruntime${VS_DLL_VERSION}_1.dll
|
||||
@@ -156,7 +181,11 @@ REDIST_SUBDIR="VC/Redist/MSVC/$REDIST_VERSION"
|
||||
echo "Copying VC..."
|
||||
rm -rf $DEVKIT_ROOT/VC
|
||||
mkdir -p $DEVKIT_ROOT/VC/bin
|
||||
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/arm64" $DEVKIT_ROOT/VC/bin/
|
||||
if [ -d "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostarm64/arm64" ]; then
|
||||
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostarm64/arm64" $DEVKIT_ROOT/VC/bin/
|
||||
else
|
||||
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/arm64" $DEVKIT_ROOT/VC/bin/
|
||||
fi
|
||||
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx64/x64" $DEVKIT_ROOT/VC/bin/
|
||||
cp -r "$VS_INSTALL_DIR/${VC_SUBDIR}/bin/Hostx86/x86" $DEVKIT_ROOT/VC/bin/
|
||||
mkdir -p $DEVKIT_ROOT/VC/lib
|
||||
|
||||
@@ -542,10 +542,10 @@ class Bundle {
|
||||
if (pattern != null) {
|
||||
// Perform date-time format pattern conversion which is
|
||||
// applicable to both SimpleDateFormat and j.t.f.DateTimeFormatter.
|
||||
String transPattern = translateDateFormatLetters(calendarType, pattern, this::convertDateTimePatternLetter);
|
||||
String transPattern = translateDateFormatLetters(calendarType, key, pattern, this::convertDateTimePatternLetter);
|
||||
dateTimePatterns.add(i, transPattern);
|
||||
// Additionally, perform SDF specific date-time format pattern conversion
|
||||
sdfPatterns.add(i, translateDateFormatLetters(calendarType, transPattern, this::convertSDFLetter));
|
||||
sdfPatterns.add(i, translateDateFormatLetters(calendarType, key, transPattern, this::convertSDFLetter));
|
||||
} else {
|
||||
dateTimePatterns.add(i, null);
|
||||
sdfPatterns.add(i, null);
|
||||
@@ -568,7 +568,7 @@ class Bundle {
|
||||
}
|
||||
}
|
||||
|
||||
private String translateDateFormatLetters(CalendarType calendarType, String cldrFormat, ConvertDateTimeLetters converter) {
|
||||
private String translateDateFormatLetters(CalendarType calendarType, String patternKey, String cldrFormat, ConvertDateTimeLetters converter) {
|
||||
String pattern = cldrFormat;
|
||||
int length = pattern.length();
|
||||
boolean inQuote = false;
|
||||
@@ -587,7 +587,7 @@ class Bundle {
|
||||
if (nextc == '\'') {
|
||||
i++;
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -597,7 +597,7 @@ class Bundle {
|
||||
}
|
||||
if (!inQuote) {
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -614,7 +614,7 @@ class Bundle {
|
||||
}
|
||||
if (!(c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z')) {
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = 0;
|
||||
count = 0;
|
||||
}
|
||||
@@ -627,7 +627,7 @@ class Bundle {
|
||||
count++;
|
||||
continue;
|
||||
}
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
lastLetter = c;
|
||||
count = 1;
|
||||
}
|
||||
@@ -637,7 +637,7 @@ class Bundle {
|
||||
}
|
||||
|
||||
if (count != 0) {
|
||||
converter.convert(calendarType, lastLetter, count, jrePattern);
|
||||
converter.convert(calendarType, patternKey, lastLetter, count, jrePattern);
|
||||
}
|
||||
if (cldrFormat.contentEquals(jrePattern)) {
|
||||
return cldrFormat;
|
||||
@@ -661,7 +661,7 @@ class Bundle {
|
||||
* on the support given by the SimpleDateFormat and the j.t.f.DateTimeFormatter
|
||||
* for date-time formatting.
|
||||
*/
|
||||
private void convertDateTimePatternLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
|
||||
private void convertDateTimePatternLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
|
||||
switch (cldrLetter) {
|
||||
case 'u':
|
||||
case 'U':
|
||||
@@ -683,7 +683,7 @@ class Bundle {
|
||||
* Perform a conversion of CLDR date-time format pattern letter which is
|
||||
* specific to the SimpleDateFormat.
|
||||
*/
|
||||
private void convertSDFLetter(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb) {
|
||||
private void convertSDFLetter(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb) {
|
||||
switch (cldrLetter) {
|
||||
case 'G':
|
||||
if (calendarType != CalendarType.GREGORIAN) {
|
||||
@@ -722,6 +722,17 @@ class Bundle {
|
||||
appendN('z', count, sb);
|
||||
break;
|
||||
|
||||
case 'y':
|
||||
// If the style is FULL/LONG for a Japanese Calendar, make the
|
||||
// count == 4 for Gan-nen
|
||||
if (calendarType == CalendarType.JAPANESE &&
|
||||
(patternKey.contains("full-") ||
|
||||
patternKey.contains("long-"))) {
|
||||
count = 4;
|
||||
}
|
||||
appendN(cldrLetter, count, sb);
|
||||
break;
|
||||
|
||||
case 'Z':
|
||||
if (count == 4 || count == 5) {
|
||||
sb.append("XXX");
|
||||
@@ -767,6 +778,7 @@ class Bundle {
|
||||
.collect(Collectors.toMap(
|
||||
e -> calendarPrefix + e.getKey(),
|
||||
e -> translateDateFormatLetters(calendarType,
|
||||
e.getKey(),
|
||||
(String)e.getValue(),
|
||||
this::convertDateTimePatternLetter)
|
||||
))
|
||||
@@ -775,7 +787,7 @@ class Bundle {
|
||||
|
||||
@FunctionalInterface
|
||||
private interface ConvertDateTimeLetters {
|
||||
void convert(CalendarType calendarType, char cldrLetter, int count, StringBuilder sb);
|
||||
void convert(CalendarType calendarType, String patternKey, char cldrLetter, int count, StringBuilder sb);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -46,6 +46,8 @@ CLDR_GEN_DONE := $(GENSRC_DIR)/_cldr-gensrc.marker
|
||||
TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata
|
||||
ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template
|
||||
|
||||
# The `-utf8` option is used even for US English, as some names
|
||||
# may contain non-ASCII characters, such as “Türkiye”.
|
||||
$(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \
|
||||
$(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \
|
||||
@@ -61,7 +63,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-basemodule \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-zntempfile $(ZONENAME_TEMPLATE) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -45,7 +45,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
|
||||
-baselocales "en-US" \
|
||||
-year $(COPYRIGHT_YEAR) \
|
||||
-o $(GENSRC_DIR) \
|
||||
-tzdatadir $(TZ_DATA_DIR))
|
||||
-tzdatadir $(TZ_DATA_DIR) \
|
||||
-utf8)
|
||||
$(TOUCH) $@
|
||||
|
||||
TARGETS += $(CLDR_GEN_DONE)
|
||||
|
||||
@@ -62,7 +62,7 @@ BUILD_JDK_JTREG_LIBRARIES_JDK_LIBS_libGetXSpace := java.base:libjava
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
BUILD_JDK_JTREG_EXCLUDE += libDirectIO.c libInheritedChannel.c \
|
||||
libExplicitAttach.c libImplicitAttach.c \
|
||||
exelauncher.c
|
||||
exelauncher.c libFDLeaker.c exeFDLeakTester.c
|
||||
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeNullCallerTest := $(LIBCXX)
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exerevokeall := advapi32.lib
|
||||
|
||||
@@ -187,22 +187,18 @@ public class HelloWorld {
|
||||
new Run("none", "Hello from Cupertino")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u53F0\u5317\u554F\u5019\u60A8\u0021")
|
||||
new Run("none", "台北問候您!")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0391\u03B8\u03B7\u03BD\u03B1\u03B9\u0020" // Greek
|
||||
+ "\u03B1\u03C3\u03C0\u03B1\u03B6\u03BF\u03BD"
|
||||
+ "\u03C4\u03B1\u03B9\u0020\u03C5\u03BC\u03B1"
|
||||
+ "\u03C2\u0021")
|
||||
new Run("none", "Αθηναι ασπαζονται υμας!") // Greek
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u6771\u4eac\u304b\u3089\u4eca\u65e5\u306f")
|
||||
new Run("none", "東京から今日は")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u05e9\u05dc\u05d5\u05dd \u05de\u05d9\u05e8\u05d5"
|
||||
+ "\u05e9\u05dc\u05d9\u05dd")
|
||||
new Run("none", "שלום מירושלים")
|
||||
}),
|
||||
new Paragraph("title", new Run[] {
|
||||
new Run("none", "\u0633\u0644\u0627\u0645")
|
||||
new Run("none", "سلام")
|
||||
}), };
|
||||
}
|
||||
|
||||
@@ -456,13 +456,13 @@ SliderDemo.horizontal=Horizontal
|
||||
SliderDemo.vertical=Vertikal
|
||||
SliderDemo.plain=Einfach
|
||||
SliderDemo.a_plain_slider=Ein einfacher Schieberegler
|
||||
SliderDemo.majorticks=Grobteilungen
|
||||
SliderDemo.majorticksdescription=Ein Schieberegler mit Grobteilungsmarkierungen
|
||||
SliderDemo.ticks=Feinteilungen, Teilungen zum Einrasten und Labels
|
||||
SliderDemo.minorticks=Feinteilungen
|
||||
SliderDemo.minorticksdescription=Ein Schieberegler mit Grob- und Feinteilungen, mit Teilungen, in die der Schieberegler einrastet, wobei einige Teilungen mit einem sichtbaren Label versehen sind
|
||||
SliderDemo.majorticks=Hauptteilstriche
|
||||
SliderDemo.majorticksdescription=Ein Schieberegler mit Hauptteilstrichen
|
||||
SliderDemo.ticks=Hilfsteilstriche, zum Einrasten und Beschriften
|
||||
SliderDemo.minorticks=Hilfsteilstriche
|
||||
SliderDemo.minorticksdescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, in die der Schieberegler einrastet, wobei einige Teilstriche mit einer sichtbaren Beschriftung versehen sind
|
||||
SliderDemo.disabled=Deaktiviert
|
||||
SliderDemo.disableddescription=Ein Schieberegler mit Grob- und Feinteilungen, der nicht aktiviert ist (kann nicht bearbeitet werden)
|
||||
SliderDemo.disableddescription=Ein Schieberegler mit Haupt- und Hilfsteilstrichen, der nicht aktiviert ist (kann nicht bearbeitet werden)
|
||||
|
||||
### SplitPane Demo ###
|
||||
|
||||
|
||||
@@ -1765,10 +1765,6 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
||||
// n.b. frame size includes space for return pc and rfp
|
||||
const int framesize = C->output()->frame_size_in_bytes();
|
||||
|
||||
// insert a nop at the start of the prolog so we can patch in a
|
||||
// branch if we need to invalidate the method later
|
||||
__ nop();
|
||||
|
||||
if (C->clinit_barrier_on_entry()) {
|
||||
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
|
||||
|
||||
@@ -1888,7 +1884,7 @@ void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
||||
code_stub = &stub->entry();
|
||||
}
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3921,6 +3917,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
|
||||
@@ -1136,6 +1136,10 @@ public:
|
||||
system(0b00, 0b011, 0b00011, SY, 0b110);
|
||||
}
|
||||
|
||||
void sb() {
|
||||
system(0b00, 0b011, 0b00011, 0b0000, 0b111);
|
||||
}
|
||||
|
||||
void sys(int op1, int CRn, int CRm, int op2,
|
||||
Register rt = as_Register(0b11111)) {
|
||||
system(0b01, op1, CRn, CRm, op2, rt);
|
||||
|
||||
@@ -483,7 +483,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||
|
||||
code_stub->set_safepoint_offset(__ offset());
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
|
||||
__ ret(lr);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -43,15 +43,15 @@ define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -51,8 +51,8 @@ define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
@@ -69,12 +69,12 @@ define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 8);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, false);
|
||||
|
||||
@@ -90,13 +90,15 @@ void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address
|
||||
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
|
||||
|
||||
#ifdef ASSERT
|
||||
NativeGeneralJump* jump = nativeGeneralJump_at(method_holder->next_instruction_address());
|
||||
NativeJump* jump = MacroAssembler::codestub_branch_needs_far_jump()
|
||||
? nativeGeneralJump_at(method_holder->next_instruction_address())
|
||||
: nativeJump_at(method_holder->next_instruction_address());
|
||||
verify_mt_safe(callee, entry, method_holder, jump);
|
||||
#endif
|
||||
|
||||
// Update stub.
|
||||
method_holder->set_data((intptr_t)callee());
|
||||
NativeGeneralJump::insert_unconditional(method_holder->next_instruction_address(), entry);
|
||||
MacroAssembler::pd_patch_instruction(method_holder->next_instruction_address(), entry);
|
||||
ICache::invalidate_range(stub, to_interp_stub_size());
|
||||
// Update jump to call.
|
||||
set_destination_mt_safe(stub);
|
||||
|
||||
@@ -289,7 +289,7 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
|
||||
__ verify_sve_vector_length(tmp1);
|
||||
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */, tmp1);
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, false /* in_nmethod */, tmp1);
|
||||
|
||||
__ ldrw(tmp1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(tmp1, L_safepoint_poll_slow_path);
|
||||
|
||||
@@ -106,6 +106,13 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, KILL cr);
|
||||
// The main load is a candidate to implement implicit null checks, as long as
|
||||
// legitimize_address() does not require a preceding lea instruction to
|
||||
// materialize the memory operand. The absence of a preceding lea instruction
|
||||
// is guaranteed for immLoffset8 memory operands, because these do not lead to
|
||||
// out-of-range offsets (see definition of immLoffset8). Fortunately,
|
||||
// immLoffset8 memory operands are the most common ones in practice.
|
||||
ins_is_late_expanded_null_check_candidate(opnd_array(1)->opcode() == INDOFFL8);
|
||||
|
||||
ins_cost(4 * INSN_COST);
|
||||
|
||||
@@ -117,7 +124,11 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
|
||||
// Fix up any out-of-range offsets.
|
||||
assert_different_registers(rscratch2, as_Register($mem$$base));
|
||||
assert_different_registers(rscratch2, $dst$$Register);
|
||||
ref_addr = __ legitimize_address(ref_addr, 8, rscratch2);
|
||||
int size = 8;
|
||||
assert(!this->is_late_expanded_null_check_candidate() ||
|
||||
!MacroAssembler::legitimize_address_requires_lea(ref_addr, size),
|
||||
"an instruction that can be used for implicit null checking should emit the candidate memory access first");
|
||||
ref_addr = __ legitimize_address(ref_addr, size, rscratch2);
|
||||
}
|
||||
__ ldr($dst$$Register, ref_addr);
|
||||
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);
|
||||
|
||||
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
@@ -117,7 +117,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
product(ccstr, OnSpinWaitInst, "yield", DIAGNOSTIC, \
|
||||
"The instruction to use to implement " \
|
||||
"java.lang.Thread.onSpinWait()." \
|
||||
"Options: none, nop, isb, yield.") \
|
||||
"Options: none, nop, isb, yield, sb.") \
|
||||
product(uint, OnSpinWaitInstCount, 1, DIAGNOSTIC, \
|
||||
"The number of OnSpinWaitInst instructions to generate." \
|
||||
"It cannot be used with OnSpinWaitInst=none.") \
|
||||
|
||||
@@ -603,7 +603,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// the stack, will call InterpreterRuntime::at_unwind.
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
br(Assembler::AL, fast_path);
|
||||
bind(slow_path);
|
||||
push(state);
|
||||
@@ -1016,31 +1016,16 @@ void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
|
||||
}
|
||||
|
||||
|
||||
void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
|
||||
Register bumped_count) {
|
||||
void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
|
||||
if (ProfileInterpreter) {
|
||||
Label profile_continue;
|
||||
|
||||
// If no method data exists, go to profile_continue.
|
||||
// Otherwise, assign to mdp
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
// We are taking a branch. Increment the taken count.
|
||||
// We inline increment_mdp_data_at to return bumped_count in a register
|
||||
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
|
||||
Address data(mdp, in_bytes(JumpData::taken_offset()));
|
||||
ldr(bumped_count, data);
|
||||
assert(DataLayout::counter_increment == 1,
|
||||
"flow-free idiom only works with 1");
|
||||
// Intel does this to catch overflow
|
||||
// addptr(bumped_count, DataLayout::counter_increment);
|
||||
// sbbptr(bumped_count, 0);
|
||||
// so we do this
|
||||
adds(bumped_count, bumped_count, DataLayout::counter_increment);
|
||||
Label L;
|
||||
br(Assembler::CS, L); // skip store if counter overflow
|
||||
str(bumped_count, data);
|
||||
bind(L);
|
||||
increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
|
||||
|
||||
// The method data pointer needs to be updated to reflect the new target.
|
||||
update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
|
||||
bind(profile_continue);
|
||||
@@ -1055,7 +1040,7 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
// We are taking a branch. Increment the not taken count.
|
||||
// We are not taking a branch. Increment the not taken count.
|
||||
increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
|
||||
|
||||
// The method data pointer needs to be updated to correspond to
|
||||
|
||||
@@ -276,7 +276,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// narrow int return value
|
||||
void narrow(Register result);
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||
void profile_taken_branch(Register mdp);
|
||||
void profile_not_taken_branch(Register mdp);
|
||||
void profile_call(Register mdp);
|
||||
void profile_final_call(Register mdp);
|
||||
|
||||
@@ -553,13 +553,8 @@ address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned
|
||||
return MacroAssembler::target_addr_for_insn(insn_addr, insn);
|
||||
}
|
||||
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) {
|
||||
if (acquire) {
|
||||
lea(tmp, Address(rthread, JavaThread::polling_word_offset()));
|
||||
ldar(tmp, tmp);
|
||||
} else {
|
||||
ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
|
||||
}
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp) {
|
||||
ldr(tmp, Address(rthread, JavaThread::polling_word_offset()));
|
||||
if (at_return) {
|
||||
// Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore,
|
||||
// we may safely use the sp instead to perform the stack watermark check.
|
||||
@@ -989,11 +984,19 @@ void MacroAssembler::emit_static_call_stub() {
|
||||
mov_metadata(rmethod, nullptr);
|
||||
|
||||
// Jump to the entry point of the c2i stub.
|
||||
movptr(rscratch1, 0);
|
||||
br(rscratch1);
|
||||
if (codestub_branch_needs_far_jump()) {
|
||||
movptr(rscratch1, 0);
|
||||
br(rscratch1);
|
||||
} else {
|
||||
b(pc());
|
||||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::static_call_stub_size() {
|
||||
if (!codestub_branch_needs_far_jump()) {
|
||||
// isb; movk; movz; movz; b
|
||||
return 5 * NativeInstruction::instruction_size;
|
||||
}
|
||||
// isb; movk; movz; movz; movk; movz; movz; br
|
||||
return 8 * NativeInstruction::instruction_size;
|
||||
}
|
||||
@@ -6811,6 +6814,9 @@ void MacroAssembler::spin_wait() {
|
||||
case SpinWait::YIELD:
|
||||
yield();
|
||||
break;
|
||||
case SpinWait::SB:
|
||||
sb();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
@@ -120,7 +120,7 @@ class MacroAssembler: public Assembler {
|
||||
virtual void check_and_handle_popframe(Register java_thread);
|
||||
virtual void check_and_handle_earlyret(Register java_thread);
|
||||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp = rscratch1);
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp = rscratch1);
|
||||
void rt_call(address dest, Register tmp = rscratch1);
|
||||
|
||||
// Load Effective Address
|
||||
@@ -129,16 +129,21 @@ class MacroAssembler: public Assembler {
|
||||
a.lea(this, r);
|
||||
}
|
||||
|
||||
// Whether materializing the given address for a LDR/STR requires an
|
||||
// additional lea instruction.
|
||||
static bool legitimize_address_requires_lea(const Address &a, int size) {
|
||||
return a.getMode() == Address::base_plus_offset &&
|
||||
!Address::offset_ok_for_immed(a.offset(), exact_log2(size));
|
||||
}
|
||||
|
||||
/* Sometimes we get misaligned loads and stores, usually from Unsafe
|
||||
accesses, and these can exceed the offset range. */
|
||||
Address legitimize_address(const Address &a, int size, Register scratch) {
|
||||
if (a.getMode() == Address::base_plus_offset) {
|
||||
if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
if (legitimize_address_requires_lea(a, size)) {
|
||||
block_comment("legitimize_address {");
|
||||
lea(scratch, a);
|
||||
block_comment("} legitimize_address");
|
||||
return Address(scratch);
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
@@ -543,7 +543,7 @@ void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw,
|
||||
// }
|
||||
//
|
||||
// /* compute n */
|
||||
// z = scalbnA(z,q0); /* actual value of z */
|
||||
// z = scalbn(z,q0); /* actual value of z */
|
||||
// z -= 8.0*floor(z*0.125); /* trim off integer >= 8 */
|
||||
// n = (int) z;
|
||||
// z -= (double)n;
|
||||
@@ -576,7 +576,7 @@ void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw,
|
||||
// }
|
||||
// if(ih==2) {
|
||||
// z = one - z;
|
||||
// if(carry!=0) z -= scalbnA(one,q0);
|
||||
// if(carry!=0) z -= scalbn(one,q0);
|
||||
// }
|
||||
// }
|
||||
//
|
||||
@@ -602,7 +602,7 @@ void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw,
|
||||
// jz -= 1; q0 -= 24;
|
||||
// while(iq[jz]==0) { jz--; q0-=24;}
|
||||
// } else { /* break z into 24-bit if necessary */
|
||||
// z = scalbnA(z,-q0);
|
||||
// z = scalbn(z,-q0);
|
||||
// if(z>=two24B) {
|
||||
// fw = (double)((int)(twon24*z));
|
||||
// iq[jz] = (int)(z-two24B*fw);
|
||||
@@ -612,7 +612,7 @@ void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw,
|
||||
// }
|
||||
//
|
||||
// /* convert integer "bit" chunk to floating-point value */
|
||||
// fw = scalbnA(one,q0);
|
||||
// fw = scalbn(one,q0);
|
||||
// for(i=jz;i>=0;i--) {
|
||||
// q[i] = fw*(double)iq[i]; fw*=twon24;
|
||||
// }
|
||||
@@ -925,7 +925,7 @@ void MacroAssembler::generate__kernel_rem_pio2(address two_over_pi, address pio2
|
||||
fmovd(v25, 1.0);
|
||||
fsubd(v18, v25, v18); // z = one - z;
|
||||
cbzw(rscratch2, IH_HANDLED);
|
||||
fsubd(v18, v18, v30); // z -= scalbnA(one,q0);
|
||||
fsubd(v18, v18, v30); // z -= scalbn(one,q0);
|
||||
}
|
||||
}
|
||||
bind(IH_HANDLED);
|
||||
@@ -1026,7 +1026,7 @@ void MacroAssembler::generate__kernel_rem_pio2(address two_over_pi, address pio2
|
||||
bind(Z_ZERO_CHECK_DONE);
|
||||
// convert integer "bit" chunk to floating-point value
|
||||
// v17 = twon24
|
||||
// update v30, which was scalbnA(1.0, <old q0>);
|
||||
// update v30, which was scalbn(1.0, <old q0>);
|
||||
addw(tmp2, rscratch1, 1023); // biased exponent
|
||||
lsl(tmp2, tmp2, 52); // put at correct position
|
||||
mov(i, jz);
|
||||
|
||||
@@ -212,11 +212,6 @@ void NativeMovRegMem::verify() {
|
||||
|
||||
void NativeJump::verify() { ; }
|
||||
|
||||
|
||||
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
}
|
||||
|
||||
|
||||
address NativeJump::jump_destination() const {
|
||||
address dest = MacroAssembler::target_addr_for_insn_or_null(instruction_address());
|
||||
|
||||
@@ -345,10 +340,6 @@ bool NativeInstruction::is_movk() {
|
||||
return Instruction_aarch64::extract(int_at(0), 30, 23) == 0b11100101;
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_sigill_not_entrant() {
|
||||
return uint_at(0) == 0xd4bbd5a1; // dcps1 #0xdead
|
||||
}
|
||||
|
||||
void NativeIllegalInstruction::insert(address code_pos) {
|
||||
*(juint*)code_pos = 0xd4bbd5a1; // dcps1 #0xdead
|
||||
}
|
||||
@@ -359,45 +350,8 @@ bool NativeInstruction::is_stop() {
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// MT-safe inserting of a jump over a jump or a nop (used by
|
||||
// nmethod::make_not_entrant)
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|
||||
|| nativeInstruction_at(verified_entry)->is_sigill_not_entrant(),
|
||||
"Aarch64 cannot replace non-jump with jump");
|
||||
|
||||
// Patch this nmethod atomically.
|
||||
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
|
||||
ptrdiff_t disp = dest - verified_entry;
|
||||
guarantee(disp < 1 << 27 && disp > - (1 << 27), "branch overflow");
|
||||
|
||||
unsigned int insn = (0b000101 << 26) | ((disp >> 2) & 0x3ffffff);
|
||||
*(unsigned int*)verified_entry = insn;
|
||||
} else {
|
||||
// We use an illegal instruction for marking a method as not_entrant.
|
||||
NativeIllegalInstruction::insert(verified_entry);
|
||||
}
|
||||
|
||||
ICache::invalidate_range(verified_entry, instruction_size);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::verify() { }
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
NativeGeneralJump* n_jump = (NativeGeneralJump*)code_pos;
|
||||
|
||||
CodeBuffer cb(code_pos, instruction_size);
|
||||
MacroAssembler a(&cb);
|
||||
|
||||
a.movptr(rscratch1, (uintptr_t)entry);
|
||||
a.br(rscratch1);
|
||||
|
||||
ICache::invalidate_range(code_pos, instruction_size);
|
||||
}
|
||||
|
||||
// MT-safe patching of a long jump instruction.
|
||||
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
|
||||
ShouldNotCallThis();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -83,7 +83,6 @@ public:
|
||||
bool is_safepoint_poll();
|
||||
bool is_movz();
|
||||
bool is_movk();
|
||||
bool is_sigill_not_entrant();
|
||||
bool is_stop();
|
||||
|
||||
protected:
|
||||
@@ -360,9 +359,6 @@ public:
|
||||
|
||||
// Insertion of native jump instruction
|
||||
static void insert(address code_pos, address entry);
|
||||
// MT-safe insertion of native jump at verified method entry
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
};
|
||||
|
||||
inline NativeJump* nativeJump_at(address address) {
|
||||
@@ -383,7 +379,6 @@ public:
|
||||
address jump_destination() const;
|
||||
void set_jump_destination(address dest);
|
||||
|
||||
static void insert_unconditional(address code_pos, address entry);
|
||||
static void replace_mt_safe(address instr_addr, address code_buffer);
|
||||
static void verify();
|
||||
};
|
||||
|
||||
@@ -1877,7 +1877,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Check for safepoint operation in progress and/or pending suspend requests.
|
||||
{
|
||||
// No need for acquire as Java threads always disarm themselves.
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
|
||||
__ ldrw(rscratch1, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbnzw(rscratch1, safepoint_in_progress);
|
||||
__ bind(safepoint_in_progress_done);
|
||||
|
||||
@@ -31,7 +31,8 @@ public:
|
||||
NONE = -1,
|
||||
NOP,
|
||||
ISB,
|
||||
YIELD
|
||||
YIELD,
|
||||
SB
|
||||
};
|
||||
|
||||
private:
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
#define CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -11653,6 +11653,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
};
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for aarch64
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generate initial stubs and initializes the entry points
|
||||
|
||||
@@ -11898,6 +11902,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1018,7 +1018,7 @@ address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* in_nmethod */);
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
@@ -1065,7 +1065,7 @@ address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractI
|
||||
|
||||
Label slow_path;
|
||||
// If we need a safepoint check, generate full interpreter entry.
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, false /* at_return */, false /* in_nmethod */);
|
||||
|
||||
// We don't generate local frame and don't align stack because
|
||||
// we call stub code and there is no safepoint on this path.
|
||||
@@ -1455,7 +1455,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
Label L, Continue;
|
||||
|
||||
// No need for acquire as Java threads always disarm themselves.
|
||||
__ safepoint_poll(L, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(L, true /* at_return */, false /* in_nmethod */);
|
||||
__ ldrw(rscratch2, Address(rthread, JavaThread::suspend_flags_offset()));
|
||||
__ cbz(rscratch2, Continue);
|
||||
__ bind(L);
|
||||
@@ -1608,7 +1608,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
__ br(Assembler::AL, fast_path);
|
||||
__ bind(slow_path);
|
||||
__ push(dtos);
|
||||
|
||||
@@ -1759,7 +1759,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result)
|
||||
|
||||
void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
{
|
||||
__ profile_taken_branch(r0, r1);
|
||||
__ profile_taken_branch(r0);
|
||||
const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset();
|
||||
const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
|
||||
@@ -1809,7 +1809,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
if (UseLoopCounter) {
|
||||
// increment backedge counter for backward branches
|
||||
// r0: MDO
|
||||
// w1: MDO bumped taken-count
|
||||
// r2: target offset
|
||||
__ cmp(r2, zr);
|
||||
__ br(Assembler::GT, dispatch); // count only if backward branch
|
||||
@@ -1820,12 +1819,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
|
||||
__ cbnz(rscratch1, has_counters);
|
||||
__ push(r0);
|
||||
__ push(r1);
|
||||
__ push(r2);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), rmethod);
|
||||
__ pop(r2);
|
||||
__ pop(r1);
|
||||
__ pop(r0);
|
||||
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
|
||||
__ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
|
||||
|
||||
@@ -57,8 +57,13 @@ static SpinWait get_spin_wait_desc() {
|
||||
return SpinWait(SpinWait::ISB, OnSpinWaitInstCount);
|
||||
} else if (strcmp(OnSpinWaitInst, "yield") == 0) {
|
||||
return SpinWait(SpinWait::YIELD, OnSpinWaitInstCount);
|
||||
} else if (strcmp(OnSpinWaitInst, "sb") == 0) {
|
||||
if (!VM_Version::supports_sb()) {
|
||||
vm_exit_during_initialization("OnSpinWaitInst is SB but current CPU does not support SB instruction");
|
||||
}
|
||||
return SpinWait(SpinWait::SB, OnSpinWaitInstCount);
|
||||
} else if (strcmp(OnSpinWaitInst, "none") != 0) {
|
||||
vm_exit_during_initialization("The options for OnSpinWaitInst are nop, isb, yield, and none", OnSpinWaitInst);
|
||||
vm_exit_during_initialization("The options for OnSpinWaitInst are nop, isb, yield, sb, and none", OnSpinWaitInst);
|
||||
}
|
||||
|
||||
if (!FLAG_IS_DEFAULT(OnSpinWaitInstCount) && OnSpinWaitInstCount > 0) {
|
||||
|
||||
@@ -131,6 +131,7 @@ enum Ampere_CPU_Model {
|
||||
decl(SHA3, sha3, 17) \
|
||||
decl(SHA512, sha512, 21) \
|
||||
decl(SVE, sve, 22) \
|
||||
decl(SB, sb, 29) \
|
||||
decl(PACA, paca, 30) \
|
||||
/* flags above must follow Linux HWCAP */ \
|
||||
decl(SVEBITPERM, svebitperm, 27) \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -51,7 +51,7 @@ define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -94,7 +94,7 @@ define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 4ULL*G);
|
||||
#endif
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed
|
||||
|
||||
@@ -193,10 +193,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
|
||||
__ bind(guard);
|
||||
|
||||
// nmethod guard value. Skipped over in common case.
|
||||
//
|
||||
// Put a debug value to make any offsets skew
|
||||
// clearly visible in coredump
|
||||
__ emit_int32(0xDEADBEAF);
|
||||
__ emit_int32(0); // initial armed value, will be reset later
|
||||
|
||||
__ bind(skip);
|
||||
__ block_comment("nmethod_barrier end");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -36,9 +36,9 @@ define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, false); // No need - only few compiler's stubs
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64);
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
#define DEFAULT_STACK_YELLOW_PAGES (2)
|
||||
#define DEFAULT_STACK_RED_PAGES (1)
|
||||
|
||||
@@ -282,16 +282,6 @@ void NativeMovConstReg::set_pc_relative_offset(address addr, address pc) {
|
||||
}
|
||||
}
|
||||
|
||||
void RawNativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
}
|
||||
|
||||
void RawNativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "should be");
|
||||
int *a = (int *)verified_entry;
|
||||
a[0] = not_entrant_illegal_instruction; // always illegal
|
||||
ICache::invalidate_range((address)&a[0], sizeof a[0]);
|
||||
}
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
int offset = (int)(entry - code_pos - 8);
|
||||
assert(offset < 0x2000000 && offset > -0x2000000, "encoding constraint");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -61,10 +61,6 @@ class RawNativeInstruction {
|
||||
instr_fld_fst = 0xd0
|
||||
};
|
||||
|
||||
// illegal instruction used by NativeJump::patch_verified_entry
|
||||
// permanently undefined (UDF): 0xe << 28 | 0b1111111 << 20 | 0b1111 << 4
|
||||
static const int not_entrant_illegal_instruction = 0xe7f000f0;
|
||||
|
||||
static int decode_rotated_imm12(int encoding) {
|
||||
int base = encoding & 0xff;
|
||||
int right_rotation = (encoding & 0xf00) >> 7;
|
||||
@@ -274,10 +270,6 @@ class RawNativeJump: public NativeInstruction {
|
||||
}
|
||||
}
|
||||
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
|
||||
};
|
||||
|
||||
inline RawNativeJump* rawNativeJump_at(address address) {
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_ARM_STUBDECLARATIONS_HPP
|
||||
#define CPU_ARM_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -3126,6 +3126,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//---------------------------------------------------------------------------
|
||||
// Initialization
|
||||
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for arm
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
@@ -3201,6 +3205,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -46,7 +46,6 @@ void C1_MacroAssembler::explicit_null_check(Register base) {
|
||||
|
||||
|
||||
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
|
||||
// Avoid stack bang as first instruction. It may get overwritten by patch_verified_entry.
|
||||
const Register return_pc = R20;
|
||||
mflr(return_pc);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -44,17 +44,17 @@ define_pd_global(intx, CompileThreshold, 1000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 1400);
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
#endif // !COMPILER2
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -78,17 +78,17 @@ define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, OptoScheduling, false);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, true);
|
||||
|
||||
|
||||
@@ -333,9 +333,9 @@ int SaveLiveRegisters::iterate_over_register_mask(IterationAction action, int of
|
||||
}
|
||||
} else if (vm_reg->is_ConditionRegister()) {
|
||||
// NOP. Conditions registers are covered by save_LR_CR
|
||||
} else if (vm_reg->is_VectorSRegister()) {
|
||||
} else if (vm_reg->is_VectorRegister()) {
|
||||
assert(SuperwordUseVSX, "or should not reach here");
|
||||
VectorSRegister vs_reg = vm_reg->as_VectorSRegister();
|
||||
VectorSRegister vs_reg = (vm_reg->as_VectorRegister()).to_vsr();
|
||||
if (vs_reg->encoding() >= VSR32->encoding() && vs_reg->encoding() <= VSR51->encoding()) {
|
||||
reg_save_index += (2 + (reg_save_index & 1)); // 2 slots + alignment if needed
|
||||
|
||||
|
||||
@@ -141,6 +141,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
|
||||
predicate((UseZGC && n->as_Load()->barrier_data() != 0)
|
||||
@@ -160,6 +161,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
effect(TEMP_DEF dst, KILL cr0);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
ins_cost(3 * MEMORY_REF_COST);
|
||||
|
||||
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -59,10 +59,10 @@ define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
|
||||
define_pd_global(bool, VMContinuations, true);
|
||||
|
||||
// Use large code-entry alignment.
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 128);
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 128);
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineSmallCode, 1500);
|
||||
|
||||
// Flags for template interpreter.
|
||||
define_pd_global(bool, RewriteBytecodes, true);
|
||||
|
||||
@@ -3928,8 +3928,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
|
||||
Label L_outer_loop, L_inner_loop, L_last;
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
load_const_optimized(t0, VM_Version::_dscr_val | 7);
|
||||
mtdscr(t0);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
load_const_optimized(t0, VM_Version::_dscr_val | 7);
|
||||
mtdscr(t0);
|
||||
}
|
||||
|
||||
mtvrwz(VCRC, crc); // crc lives in VCRC, now
|
||||
|
||||
@@ -4073,8 +4075,10 @@ void MacroAssembler::kernel_crc32_vpmsum_aligned(Register crc, Register buf, Reg
|
||||
// ********** Main loop end **********
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
load_const_optimized(t0, VM_Version::_dscr_val);
|
||||
mtdscr(t0);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
load_const_optimized(t0, VM_Version::_dscr_val);
|
||||
mtdscr(t0);
|
||||
}
|
||||
|
||||
// ********** Simple loop for remaining 16 byte blocks **********
|
||||
{
|
||||
|
||||
@@ -39,18 +39,6 @@
|
||||
#include "c1/c1_Runtime1.hpp"
|
||||
#endif
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant
|
||||
// Work around a C++ compiler bug which changes 'this'
|
||||
bool NativeInstruction::is_sigill_not_entrant_at(address addr) {
|
||||
if (!Assembler::is_illtrap(addr)) return false;
|
||||
CodeBlob* cb = CodeCache::find_blob(addr);
|
||||
if (cb == nullptr || !cb->is_nmethod()) return false;
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
// This method is not_entrant iff the illtrap instruction is
|
||||
// located at the verified entry point.
|
||||
return nm->verified_entry_point() == addr;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void NativeInstruction::verify() {
|
||||
// Make sure code pattern is actually an instruction address.
|
||||
@@ -331,25 +319,6 @@ void NativeMovConstReg::verify() {
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
ResourceMark rm;
|
||||
int code_size = 1 * BytesPerInstWord;
|
||||
CodeBuffer cb(verified_entry, code_size + 1);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
#ifdef COMPILER2
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
#endif
|
||||
// Patch this nmethod atomically. Always use illtrap/trap in debug build.
|
||||
if (DEBUG_ONLY(false &&) a->is_within_range_of_b(dest, a->pc())) {
|
||||
a->b(dest);
|
||||
} else {
|
||||
// The signal handler will continue at dest=OptoRuntime::handle_wrong_method_stub().
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
a->illtrap();
|
||||
}
|
||||
ICache::ppc64_flush_icache_bytes(verified_entry, code_size);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
void NativeJump::verify() {
|
||||
address addr = addr_at(0);
|
||||
@@ -462,9 +431,7 @@ bool NativeDeoptInstruction::is_deopt_at(address code_pos) {
|
||||
if (!Assembler::is_illtrap(code_pos)) return false;
|
||||
CodeBlob* cb = CodeCache::find_blob(code_pos);
|
||||
if (cb == nullptr || !cb->is_nmethod()) return false;
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
// see NativeInstruction::is_sigill_not_entrant_at()
|
||||
return nm->verified_entry_point() != code_pos;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Inserts an instruction which is specified to cause a SIGILL at a given pc
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -69,13 +69,6 @@ class NativeInstruction {
|
||||
return MacroAssembler::tdi_get_si16(long_at(0), Assembler::traptoUnconditional, 0);
|
||||
}
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
bool is_sigill_not_entrant() {
|
||||
// Work around a C++ compiler bug which changes 'this'.
|
||||
return NativeInstruction::is_sigill_not_entrant_at(addr_at(0));
|
||||
}
|
||||
static bool is_sigill_not_entrant_at(address addr);
|
||||
|
||||
#ifdef COMPILER2
|
||||
// SIGTRAP-based implicit range checks
|
||||
bool is_sigtrap_range_check() {
|
||||
@@ -328,15 +321,7 @@ class NativeJump: public NativeInstruction {
|
||||
}
|
||||
}
|
||||
|
||||
// MT-safe insertion of native jump at verified method entry
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
|
||||
void verify() NOT_DEBUG_RETURN;
|
||||
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
// We just patch one instruction on ppc64, so the jump doesn't have to
|
||||
// be aligned. Nothing to do here.
|
||||
}
|
||||
};
|
||||
|
||||
// Instantiates a NativeJump object starting at the given instruction
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -321,6 +321,7 @@ class VectorRegister {
|
||||
|
||||
// accessors
|
||||
constexpr int encoding() const { assert(is_valid(), "invalid register"); return _encoding; }
|
||||
inline VMReg as_VMReg() const;
|
||||
|
||||
// testers
|
||||
constexpr bool is_valid() const { return (0 <= _encoding && _encoding < number_of_registers); }
|
||||
@@ -392,7 +393,6 @@ class VectorSRegister {
|
||||
|
||||
// accessors
|
||||
constexpr int encoding() const { assert(is_valid(), "invalid register"); return _encoding; }
|
||||
inline VMReg as_VMReg() const;
|
||||
VectorSRegister successor() const { return VectorSRegister(encoding() + 1); }
|
||||
|
||||
// testers
|
||||
@@ -484,8 +484,8 @@ class ConcreteRegisterImpl : public AbstractRegisterImpl {
|
||||
enum {
|
||||
max_gpr = Register::number_of_registers * 2,
|
||||
max_fpr = max_gpr + FloatRegister::number_of_registers * 2,
|
||||
max_vsr = max_fpr + VectorSRegister::number_of_registers * 4,
|
||||
max_cnd = max_vsr + ConditionRegister::number_of_registers,
|
||||
max_vr = max_fpr + VectorRegister::number_of_registers * 4,
|
||||
max_cnd = max_vr + ConditionRegister::number_of_registers,
|
||||
max_spr = max_cnd + SpecialRegister::number_of_registers,
|
||||
// This number must be large enough to cover REG_COUNT (defined by c2) registers.
|
||||
// There is no requirement that any ordering here matches any ordering c2 gives
|
||||
|
||||
@@ -111,13 +111,13 @@ class RegisterSaver {
|
||||
int_reg,
|
||||
float_reg,
|
||||
special_reg,
|
||||
vs_reg
|
||||
vec_reg
|
||||
} RegisterType;
|
||||
|
||||
typedef enum {
|
||||
reg_size = 8,
|
||||
half_reg_size = reg_size / 2,
|
||||
vs_reg_size = 16
|
||||
vec_reg_size = 16
|
||||
} RegisterConstants;
|
||||
|
||||
typedef struct {
|
||||
@@ -137,8 +137,8 @@ class RegisterSaver {
|
||||
#define RegisterSaver_LiveSpecialReg(regname) \
|
||||
{ RegisterSaver::special_reg, regname->encoding(), regname->as_VMReg() }
|
||||
|
||||
#define RegisterSaver_LiveVSReg(regname) \
|
||||
{ RegisterSaver::vs_reg, regname->encoding(), regname->as_VMReg() }
|
||||
#define RegisterSaver_LiveVecReg(regname) \
|
||||
{ RegisterSaver::vec_reg, regname->encoding(), regname->as_VMReg() }
|
||||
|
||||
static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
|
||||
// Live registers which get spilled to the stack. Register
|
||||
@@ -220,42 +220,42 @@ static const RegisterSaver::LiveRegType RegisterSaver_LiveRegs[] = {
|
||||
RegisterSaver_LiveIntReg( R31 ) // must be the last register (see save/restore functions below)
|
||||
};
|
||||
|
||||
static const RegisterSaver::LiveRegType RegisterSaver_LiveVSRegs[] = {
|
||||
static const RegisterSaver::LiveRegType RegisterSaver_LiveVecRegs[] = {
|
||||
//
|
||||
// live vector scalar registers (optional, only these ones are used by C2):
|
||||
// live vector registers (optional, only these ones are used by C2):
|
||||
//
|
||||
RegisterSaver_LiveVSReg( VSR32 ),
|
||||
RegisterSaver_LiveVSReg( VSR33 ),
|
||||
RegisterSaver_LiveVSReg( VSR34 ),
|
||||
RegisterSaver_LiveVSReg( VSR35 ),
|
||||
RegisterSaver_LiveVSReg( VSR36 ),
|
||||
RegisterSaver_LiveVSReg( VSR37 ),
|
||||
RegisterSaver_LiveVSReg( VSR38 ),
|
||||
RegisterSaver_LiveVSReg( VSR39 ),
|
||||
RegisterSaver_LiveVSReg( VSR40 ),
|
||||
RegisterSaver_LiveVSReg( VSR41 ),
|
||||
RegisterSaver_LiveVSReg( VSR42 ),
|
||||
RegisterSaver_LiveVSReg( VSR43 ),
|
||||
RegisterSaver_LiveVSReg( VSR44 ),
|
||||
RegisterSaver_LiveVSReg( VSR45 ),
|
||||
RegisterSaver_LiveVSReg( VSR46 ),
|
||||
RegisterSaver_LiveVSReg( VSR47 ),
|
||||
RegisterSaver_LiveVSReg( VSR48 ),
|
||||
RegisterSaver_LiveVSReg( VSR49 ),
|
||||
RegisterSaver_LiveVSReg( VSR50 ),
|
||||
RegisterSaver_LiveVSReg( VSR51 ),
|
||||
RegisterSaver_LiveVSReg( VSR52 ),
|
||||
RegisterSaver_LiveVSReg( VSR53 ),
|
||||
RegisterSaver_LiveVSReg( VSR54 ),
|
||||
RegisterSaver_LiveVSReg( VSR55 ),
|
||||
RegisterSaver_LiveVSReg( VSR56 ),
|
||||
RegisterSaver_LiveVSReg( VSR57 ),
|
||||
RegisterSaver_LiveVSReg( VSR58 ),
|
||||
RegisterSaver_LiveVSReg( VSR59 ),
|
||||
RegisterSaver_LiveVSReg( VSR60 ),
|
||||
RegisterSaver_LiveVSReg( VSR61 ),
|
||||
RegisterSaver_LiveVSReg( VSR62 ),
|
||||
RegisterSaver_LiveVSReg( VSR63 )
|
||||
RegisterSaver_LiveVecReg( VR0 ),
|
||||
RegisterSaver_LiveVecReg( VR1 ),
|
||||
RegisterSaver_LiveVecReg( VR2 ),
|
||||
RegisterSaver_LiveVecReg( VR3 ),
|
||||
RegisterSaver_LiveVecReg( VR4 ),
|
||||
RegisterSaver_LiveVecReg( VR5 ),
|
||||
RegisterSaver_LiveVecReg( VR6 ),
|
||||
RegisterSaver_LiveVecReg( VR7 ),
|
||||
RegisterSaver_LiveVecReg( VR8 ),
|
||||
RegisterSaver_LiveVecReg( VR9 ),
|
||||
RegisterSaver_LiveVecReg( VR10 ),
|
||||
RegisterSaver_LiveVecReg( VR11 ),
|
||||
RegisterSaver_LiveVecReg( VR12 ),
|
||||
RegisterSaver_LiveVecReg( VR13 ),
|
||||
RegisterSaver_LiveVecReg( VR14 ),
|
||||
RegisterSaver_LiveVecReg( VR15 ),
|
||||
RegisterSaver_LiveVecReg( VR16 ),
|
||||
RegisterSaver_LiveVecReg( VR17 ),
|
||||
RegisterSaver_LiveVecReg( VR18 ),
|
||||
RegisterSaver_LiveVecReg( VR19 ),
|
||||
RegisterSaver_LiveVecReg( VR20 ),
|
||||
RegisterSaver_LiveVecReg( VR21 ),
|
||||
RegisterSaver_LiveVecReg( VR22 ),
|
||||
RegisterSaver_LiveVecReg( VR23 ),
|
||||
RegisterSaver_LiveVecReg( VR24 ),
|
||||
RegisterSaver_LiveVecReg( VR25 ),
|
||||
RegisterSaver_LiveVecReg( VR26 ),
|
||||
RegisterSaver_LiveVecReg( VR27 ),
|
||||
RegisterSaver_LiveVecReg( VR28 ),
|
||||
RegisterSaver_LiveVecReg( VR29 ),
|
||||
RegisterSaver_LiveVecReg( VR30 ),
|
||||
RegisterSaver_LiveVecReg( VR31 )
|
||||
};
|
||||
|
||||
|
||||
@@ -277,10 +277,10 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
|
||||
// calculate frame size
|
||||
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType);
|
||||
const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) /
|
||||
const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType))
|
||||
: 0;
|
||||
const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size;
|
||||
const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
|
||||
const int frame_size_in_bytes = align_up(register_save_size, frame::alignment_in_bytes)
|
||||
+ frame::native_abi_reg_args_size;
|
||||
|
||||
@@ -298,8 +298,8 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
|
||||
|
||||
// Save some registers in the last (non-vector) slots of the new frame so we
|
||||
// can use them as scratch regs or to determine the return pc.
|
||||
__ std(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP);
|
||||
__ std(R30, frame_size_in_bytes - 2*reg_size - vsregstosave_num * vs_reg_size, R1_SP);
|
||||
__ std(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
|
||||
__ std(R30, frame_size_in_bytes - 2*reg_size - vecregstosave_num * vec_reg_size, R1_SP);
|
||||
|
||||
// save the flags
|
||||
// Do the save_LR by hand and adjust the return pc if requested.
|
||||
@@ -360,37 +360,37 @@ OopMap* RegisterSaver::push_frame_reg_args_and_save_live_registers(MacroAssemble
|
||||
// the utilized instructions (PowerArchitecturePPC64).
|
||||
assert(is_aligned(offset, StackAlignmentInBytes), "should be");
|
||||
if (PowerArchitecturePPC64 >= 10) {
|
||||
assert(is_even(vsregstosave_num), "expectation");
|
||||
for (int i = 0; i < vsregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVSRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
assert(is_even(vecregstosave_num), "expectation");
|
||||
for (int i = 0; i < vecregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
|
||||
__ stxvp(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ stxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
|
||||
// Note: The contents were read in the same order (see loadV16_Power9 node in ppc.ad).
|
||||
if (generate_oop_map) {
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2),
|
||||
RegisterSaver_LiveVSRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((offset + vs_reg_size) >> 2),
|
||||
RegisterSaver_LiveVSRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg);
|
||||
RegisterSaver_LiveVecRegs[i LITTLE_ENDIAN_ONLY(+1) ].vmreg);
|
||||
map->set_callee_saved(VMRegImpl::stack2reg((offset + vec_reg_size) >> 2),
|
||||
RegisterSaver_LiveVecRegs[i BIG_ENDIAN_ONLY(+1) ].vmreg);
|
||||
}
|
||||
offset += (2 * vs_reg_size);
|
||||
offset += (2 * vec_reg_size);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < vsregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
for (int i = 0; i < vecregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
__ stxv(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ stxv(as_VectorRegister(reg_num)->to_vsr(), offset, R1_SP);
|
||||
} else {
|
||||
__ li(R31, offset);
|
||||
__ stxvd2x(as_VectorSRegister(reg_num), R31, R1_SP);
|
||||
__ stxvd2x(as_VectorRegister(reg_num)->to_vsr(), R31, R1_SP);
|
||||
}
|
||||
// Note: The contents were read in the same order (see loadV16_Power8 / loadV16_Power9 node in ppc.ad).
|
||||
if (generate_oop_map) {
|
||||
VMReg vsr = RegisterSaver_LiveVSRegs[i].vmreg;
|
||||
VMReg vsr = RegisterSaver_LiveVecRegs[i].vmreg;
|
||||
map->set_callee_saved(VMRegImpl::stack2reg(offset >> 2), vsr);
|
||||
}
|
||||
offset += vs_reg_size;
|
||||
offset += vec_reg_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,10 +411,10 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
|
||||
bool save_vectors) {
|
||||
const int regstosave_num = sizeof(RegisterSaver_LiveRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType);
|
||||
const int vsregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVSRegs) /
|
||||
const int vecregstosave_num = save_vectors ? (sizeof(RegisterSaver_LiveVecRegs) /
|
||||
sizeof(RegisterSaver::LiveRegType))
|
||||
: 0;
|
||||
const int register_save_size = regstosave_num * reg_size + vsregstosave_num * vs_reg_size;
|
||||
const int register_save_size = regstosave_num * reg_size + vecregstosave_num * vec_reg_size;
|
||||
|
||||
const int register_save_offset = frame_size_in_bytes - register_save_size;
|
||||
|
||||
@@ -456,26 +456,26 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
|
||||
|
||||
assert(is_aligned(offset, StackAlignmentInBytes), "should be");
|
||||
if (PowerArchitecturePPC64 >= 10) {
|
||||
for (int i = 0; i < vsregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVSRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
for (int i = 0; i < vecregstosave_num; i += 2) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
assert(RegisterSaver_LiveVecRegs[i + 1].reg_num == reg_num + 1, "or use other instructions!");
|
||||
|
||||
__ lxvp(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ lxvp(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
|
||||
|
||||
offset += (2 * vs_reg_size);
|
||||
offset += (2 * vec_reg_size);
|
||||
}
|
||||
} else {
|
||||
for (int i = 0; i < vsregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVSRegs[i].reg_num;
|
||||
for (int i = 0; i < vecregstosave_num; i++) {
|
||||
int reg_num = RegisterSaver_LiveVecRegs[i].reg_num;
|
||||
|
||||
if (PowerArchitecturePPC64 >= 9) {
|
||||
__ lxv(as_VectorSRegister(reg_num), offset, R1_SP);
|
||||
__ lxv(as_VectorRegister(reg_num).to_vsr(), offset, R1_SP);
|
||||
} else {
|
||||
__ li(R31, offset);
|
||||
__ lxvd2x(as_VectorSRegister(reg_num), R31, R1_SP);
|
||||
__ lxvd2x(as_VectorRegister(reg_num).to_vsr(), R31, R1_SP);
|
||||
}
|
||||
|
||||
offset += vs_reg_size;
|
||||
offset += vec_reg_size;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -486,7 +486,7 @@ void RegisterSaver::restore_live_registers_and_pop_frame(MacroAssembler* masm,
|
||||
__ mtlr(R31);
|
||||
|
||||
// restore scratch register's value
|
||||
__ ld(R31, frame_size_in_bytes - reg_size - vsregstosave_num * vs_reg_size, R1_SP);
|
||||
__ ld(R31, frame_size_in_bytes - reg_size - vecregstosave_num * vec_reg_size, R1_SP);
|
||||
|
||||
// pop the frame
|
||||
__ addi(R1_SP, R1_SP, frame_size_in_bytes);
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_PPC_STUBDECLARATIONS_HPP
|
||||
#define CPU_PPC_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -952,8 +952,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address start_pc = __ pc();
|
||||
Register tmp1 = R6_ARG4;
|
||||
// probably copy stub would have changed value reset it.
|
||||
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp1);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp1, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp1);
|
||||
}
|
||||
__ li(R3_RET, 0); // return 0
|
||||
__ blr();
|
||||
return start_pc;
|
||||
@@ -1070,9 +1072,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// If supported set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1092,8 +1095,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_10); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1344,8 +1349,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// If supported set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. It's not aligned 16-byte
|
||||
@@ -1365,8 +1372,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_9); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
__ bind(l_6);
|
||||
|
||||
@@ -1527,9 +1537,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1549,9 +1560,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_7); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1672,9 +1684,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1694,8 +1707,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_4);
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
__ cmpwi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, l_6);
|
||||
@@ -1788,9 +1803,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1810,8 +1826,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_5); // Dec CTR and loop if not zero.
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
} // FasterArrayCopy
|
||||
|
||||
@@ -1910,9 +1928,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ dcbt(R3_ARG1, 0);
|
||||
|
||||
// Set DSCR pre-fetch to deepest.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val | 7);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
__ li(tmp1, 16);
|
||||
|
||||
// Backbranch target aligned to 32-byte. Not 16-byte align as
|
||||
@@ -1932,8 +1951,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ bdnz(l_4);
|
||||
|
||||
// Restore DSCR pre-fetch value.
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
__ load_const_optimized(tmp2, VM_Version::_dscr_val);
|
||||
__ mtdscr(tmp2);
|
||||
}
|
||||
|
||||
__ cmpwi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, l_1);
|
||||
@@ -4938,6 +4959,10 @@ void generate_lookup_secondary_supers_table_stub() {
|
||||
}
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for ppc
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points
|
||||
|
||||
@@ -5067,6 +5092,9 @@ void generate_lookup_secondary_supers_table_stub() {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -80,7 +80,9 @@ void VM_Version::initialize() {
|
||||
"%zu on this machine", PowerArchitecturePPC64);
|
||||
|
||||
// Power 8: Configure Data Stream Control Register.
|
||||
config_dscr();
|
||||
if (VM_Version::has_mfdscr()) {
|
||||
config_dscr();
|
||||
}
|
||||
|
||||
if (!UseSIGTRAP) {
|
||||
MSG(TrapBasedICMissChecks);
|
||||
@@ -170,7 +172,8 @@ void VM_Version::initialize() {
|
||||
// Create and print feature-string.
|
||||
char buf[(num_features+1) * 16]; // Max 16 chars per feature.
|
||||
jio_snprintf(buf, sizeof(buf),
|
||||
"ppc64 sha aes%s%s",
|
||||
"ppc64 sha aes%s%s%s",
|
||||
(has_mfdscr() ? " mfdscr" : ""),
|
||||
(has_darn() ? " darn" : ""),
|
||||
(has_brw() ? " brw" : "")
|
||||
// Make sure number of %s matches num_features!
|
||||
@@ -488,6 +491,7 @@ void VM_Version::determine_features() {
|
||||
uint32_t *code = (uint32_t *)a->pc();
|
||||
// Keep R3_ARG1 unmodified, it contains &field (see below).
|
||||
// Keep R4_ARG2 unmodified, it contains offset = 0 (see below).
|
||||
a->mfdscr(R0);
|
||||
a->darn(R7);
|
||||
a->brw(R5, R6);
|
||||
a->blr();
|
||||
@@ -524,6 +528,7 @@ void VM_Version::determine_features() {
|
||||
|
||||
// determine which instructions are legal.
|
||||
int feature_cntr = 0;
|
||||
if (code[feature_cntr++]) features |= mfdscr_m;
|
||||
if (code[feature_cntr++]) features |= darn_m;
|
||||
if (code[feature_cntr++]) features |= brw_m;
|
||||
|
||||
|
||||
@@ -32,12 +32,14 @@
|
||||
class VM_Version: public Abstract_VM_Version {
|
||||
protected:
|
||||
enum Feature_Flag {
|
||||
mfdscr,
|
||||
darn,
|
||||
brw,
|
||||
num_features // last entry to count features
|
||||
};
|
||||
enum Feature_Flag_Set {
|
||||
unknown_m = 0,
|
||||
mfdscr_m = (1 << mfdscr ),
|
||||
darn_m = (1 << darn ),
|
||||
brw_m = (1 << brw ),
|
||||
all_features_m = (unsigned long)-1
|
||||
@@ -67,8 +69,9 @@ public:
|
||||
|
||||
static bool is_determine_features_test_running() { return _is_determine_features_test_running; }
|
||||
// CPU instruction support
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
static bool has_mfdscr() { return (_features & mfdscr_m) != 0; } // Power8, but may be unavailable (QEMU)
|
||||
static bool has_darn() { return (_features & darn_m) != 0; }
|
||||
static bool has_brw() { return (_features & brw_m) != 0; }
|
||||
|
||||
// Assembler testing
|
||||
static void allow_all();
|
||||
|
||||
@@ -47,7 +47,7 @@ void VMRegImpl::set_regName() {
|
||||
}
|
||||
|
||||
VectorSRegister vsreg = ::as_VectorSRegister(0);
|
||||
for ( ; i < ConcreteRegisterImpl::max_vsr; ) {
|
||||
for ( ; i < ConcreteRegisterImpl::max_vr; ) {
|
||||
regName[i++] = vsreg->name();
|
||||
regName[i++] = vsreg->name();
|
||||
regName[i++] = vsreg->name();
|
||||
|
||||
@@ -35,13 +35,13 @@ inline bool is_FloatRegister() {
|
||||
value() < ConcreteRegisterImpl::max_fpr;
|
||||
}
|
||||
|
||||
inline bool is_VectorSRegister() {
|
||||
inline bool is_VectorRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_fpr &&
|
||||
value() < ConcreteRegisterImpl::max_vsr;
|
||||
value() < ConcreteRegisterImpl::max_vr;
|
||||
}
|
||||
|
||||
inline bool is_ConditionRegister() {
|
||||
return value() >= ConcreteRegisterImpl::max_vsr &&
|
||||
return value() >= ConcreteRegisterImpl::max_vr &&
|
||||
value() < ConcreteRegisterImpl::max_cnd;
|
||||
}
|
||||
|
||||
@@ -60,15 +60,15 @@ inline FloatRegister as_FloatRegister() {
|
||||
return ::as_FloatRegister((value() - ConcreteRegisterImpl::max_gpr) >> 1);
|
||||
}
|
||||
|
||||
inline VectorSRegister as_VectorSRegister() {
|
||||
assert(is_VectorSRegister(), "must be");
|
||||
return ::as_VectorSRegister((value() - ConcreteRegisterImpl::max_fpr) >> 2);
|
||||
inline VectorRegister as_VectorRegister() {
|
||||
assert(is_VectorRegister(), "must be");
|
||||
return ::as_VectorRegister((value() - ConcreteRegisterImpl::max_fpr) >> 2);
|
||||
}
|
||||
|
||||
inline bool is_concrete() {
|
||||
assert(is_reg(), "must be");
|
||||
if (is_Register() || is_FloatRegister()) return is_even(value());
|
||||
if (is_VectorSRegister()) {
|
||||
if (is_VectorRegister()) {
|
||||
int base = value() - ConcreteRegisterImpl::max_fpr;
|
||||
return (base & 3) == 0;
|
||||
}
|
||||
|
||||
@@ -40,13 +40,13 @@ inline VMReg FloatRegister::as_VMReg() const {
|
||||
return VMRegImpl::as_VMReg((encoding() << 1) + ConcreteRegisterImpl::max_gpr);
|
||||
}
|
||||
|
||||
inline VMReg VectorSRegister::as_VMReg() const {
|
||||
inline VMReg VectorRegister::as_VMReg() const {
|
||||
// Four halves, multiply by 4.
|
||||
return VMRegImpl::as_VMReg((encoding() << 2) + ConcreteRegisterImpl::max_fpr);
|
||||
}
|
||||
|
||||
inline VMReg ConditionRegister::as_VMReg() const {
|
||||
return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_vsr);
|
||||
return VMRegImpl::as_VMReg((encoding()) + ConcreteRegisterImpl::max_vr);
|
||||
}
|
||||
|
||||
inline VMReg SpecialRegister::as_VMReg() const {
|
||||
|
||||
@@ -401,7 +401,7 @@ void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
|
||||
|
||||
code_stub->set_safepoint_offset(__ offset());
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub->entry(), true /* at_return */, true /* in_nmethod */);
|
||||
__ ret();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -43,15 +43,15 @@ define_pd_global(intx, CompileThreshold, 1500 );
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 933 );
|
||||
define_pd_global(intx, NewSizeThreadIncrease, 4*K );
|
||||
define_pd_global(intx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M );
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M );
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M );
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
|
||||
@@ -2170,15 +2170,13 @@ void C2_MacroAssembler::enc_cmove_cmp_fp(int cmpFlag, FloatRegister op1, FloatRe
|
||||
cmov_cmp_fp_le(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::ge:
|
||||
assert(false, "Should go to BoolTest::le case");
|
||||
ShouldNotReachHere();
|
||||
cmov_cmp_fp_ge(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::lt:
|
||||
cmov_cmp_fp_lt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
case BoolTest::gt:
|
||||
assert(false, "Should go to BoolTest::lt case");
|
||||
ShouldNotReachHere();
|
||||
cmov_cmp_fp_gt(op1, op2, dst, src, is_single);
|
||||
break;
|
||||
default:
|
||||
assert(false, "unsupported compare condition");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -51,8 +51,8 @@ define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K));
|
||||
define_pd_global(intx, LoopUnrollLimit, 60);
|
||||
define_pd_global(intx, LoopPercentProfileLimit, 10);
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(intx, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(intx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2496*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t,MaxRAM, 128ULL*G);
|
||||
@@ -69,12 +69,12 @@ define_pd_global(bool, SuperWordLoopUnrollAnalysis, true);
|
||||
define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, true);
|
||||
|
||||
define_pd_global(intx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(intx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(intx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(intx, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M );
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 6);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, false);
|
||||
|
||||
@@ -287,7 +287,7 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
__ membar(MacroAssembler::AnyAny);
|
||||
}
|
||||
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
__ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
|
||||
__ bnez(t0, L_safepoint_poll_slow_path);
|
||||
|
||||
|
||||
@@ -96,6 +96,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst, TEMP tmp, KILL cr);
|
||||
ins_is_late_expanded_null_check_candidate(true);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
|
||||
@@ -645,7 +645,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// the stack, will call InterpreterRuntime::at_unwind.
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
j(fast_path);
|
||||
|
||||
bind(slow_path);
|
||||
@@ -1050,26 +1050,16 @@ void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
|
||||
addi(sp, sp, 2 * wordSize);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
|
||||
Register bumped_count) {
|
||||
void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
|
||||
if (ProfileInterpreter) {
|
||||
Label profile_continue;
|
||||
|
||||
// If no method data exists, go to profile_continue.
|
||||
// Otherwise, assign to mdp
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
// We are taking a branch. Increment the taken count.
|
||||
Address data(mdp, in_bytes(JumpData::taken_offset()));
|
||||
ld(bumped_count, data);
|
||||
assert(DataLayout::counter_increment == 1,
|
||||
"flow-free idiom only works with 1");
|
||||
addi(bumped_count, bumped_count, DataLayout::counter_increment);
|
||||
Label L;
|
||||
// eg: bumped_count=0x7fff ffff ffff ffff + 1 < 0. so we use <= 0;
|
||||
blez(bumped_count, L); // skip store if counter overflow,
|
||||
sd(bumped_count, data);
|
||||
bind(L);
|
||||
increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
|
||||
|
||||
// The method data pointer needs to be updated to reflect the new target.
|
||||
update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
|
||||
bind(profile_continue);
|
||||
@@ -1083,7 +1073,7 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
|
||||
// If no method data exists, go to profile_continue.
|
||||
test_method_data_pointer(mdp, profile_continue);
|
||||
|
||||
// We are taking a branch. Increment the not taken count.
|
||||
// We are not taking a branch. Increment the not taken count.
|
||||
increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
|
||||
|
||||
// The method data pointer needs to be updated to correspond to
|
||||
|
||||
@@ -261,7 +261,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
// narrow int return value
|
||||
void narrow(Register result);
|
||||
|
||||
void profile_taken_branch(Register mdp, Register bumped_count);
|
||||
void profile_taken_branch(Register mdp);
|
||||
void profile_not_taken_branch(Register mdp);
|
||||
void profile_call(Register mdp);
|
||||
void profile_final_call(Register mdp);
|
||||
|
||||
@@ -1268,12 +1268,19 @@ void MacroAssembler::cmov_gtu(Register cmp1, Register cmp2, Register dst, Regist
|
||||
}
|
||||
|
||||
// ----------- cmove, compare float -----------
|
||||
//
|
||||
// For CmpF/D + CMoveI/L, ordered ones are quite straight and simple,
|
||||
// so, just list behaviour of unordered ones as follow.
|
||||
//
|
||||
// Set dst (CMoveI (Binary cop (CmpF/D op1 op2)) (Binary dst src))
|
||||
// (If one or both inputs to the compare are NaN, then)
|
||||
// 1. (op1 lt op2) => true => CMove: dst = src
|
||||
// 2. (op1 le op2) => true => CMove: dst = src
|
||||
// 3. (op1 gt op2) => false => CMove: dst = dst
|
||||
// 4. (op1 ge op2) => false => CMove: dst = dst
|
||||
// 5. (op1 eq op2) => false => CMove: dst = dst
|
||||
// 6. (op1 ne op2) => true => CMove: dst = src
|
||||
|
||||
// Move src to dst only if cmp1 == cmp2,
|
||||
// otherwise leave dst unchanged, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 != cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 eq cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1289,7 +1296,7 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 != cmp2, including the case of NaN
|
||||
// not jump (i.e. move src to dst) if cmp1 == cmp2
|
||||
// fallthrough (i.e. move src to dst) if cmp1 == cmp2
|
||||
float_bne(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bne(cmp1, cmp2, no_set);
|
||||
@@ -1298,11 +1305,6 @@ void MacroAssembler::cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Keep dst unchanged only if cmp1 == cmp2,
|
||||
// otherwise move src to dst, including the case where one of them is NaN.
|
||||
// Clarification:
|
||||
// java code : cmp1 == cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 ne cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1318,7 +1320,7 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 == cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 != cmp2, including the case of NaN
|
||||
float_beq(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_beq(cmp1, cmp2, no_set);
|
||||
@@ -1327,14 +1329,6 @@ void MacroAssembler::cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 <= cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 < cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 > cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 le cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1350,7 +1344,7 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 > cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 <= cmp2 or either is NaN
|
||||
float_bgt(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bgt(cmp1, cmp2, no_set);
|
||||
@@ -1359,14 +1353,30 @@ void MacroAssembler::cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// When cmp1 < cmp2 or any of them is NaN then dst = src, otherwise, dst = dst
|
||||
// Clarification
|
||||
// scenario 1:
|
||||
// java code : cmp2 <= cmp1 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
// scenario 2:
|
||||
// java code : cmp1 >= cmp2 ? dst : src
|
||||
// transformed to : CMove dst, (cmp1 lt cmp2), dst, src
|
||||
void MacroAssembler::cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
fle_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
fle_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 >= cmp2
|
||||
float_blt(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_blt(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
@@ -1382,7 +1392,7 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 >= cmp2
|
||||
// not jump (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 < cmp2 or either is NaN
|
||||
float_bge(cmp1, cmp2, no_set);
|
||||
} else {
|
||||
double_bge(cmp1, cmp2, no_set);
|
||||
@@ -1391,6 +1401,30 @@ void MacroAssembler::cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Regi
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
void MacroAssembler::cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single) {
|
||||
if (UseZicond) {
|
||||
if (is_single) {
|
||||
flt_s(t0, cmp2, cmp1);
|
||||
} else {
|
||||
flt_d(t0, cmp2, cmp1);
|
||||
}
|
||||
czero_nez(dst, dst, t0);
|
||||
czero_eqz(t0 , src, t0);
|
||||
orr(dst, dst, t0);
|
||||
return;
|
||||
}
|
||||
Label no_set;
|
||||
if (is_single) {
|
||||
// jump if cmp1 <= cmp2 or either is NaN
|
||||
// fallthrough (i.e. move src to dst) if cmp1 > cmp2
|
||||
float_ble(cmp1, cmp2, no_set, false, true);
|
||||
} else {
|
||||
double_ble(cmp1, cmp2, no_set, false, true);
|
||||
}
|
||||
mv(dst, src);
|
||||
bind(no_set);
|
||||
}
|
||||
|
||||
// Float compare branch instructions
|
||||
|
||||
#define INSN(NAME, FLOATCMP, BRANCH) \
|
||||
@@ -3739,11 +3773,8 @@ void MacroAssembler::check_klass_subtype(Register sub_klass,
|
||||
bind(L_failure);
|
||||
}
|
||||
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp_reg) {
|
||||
void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp_reg) {
|
||||
ld(tmp_reg, Address(xthread, JavaThread::polling_word_offset()));
|
||||
if (acquire) {
|
||||
membar(MacroAssembler::LoadLoad | MacroAssembler::LoadStore);
|
||||
}
|
||||
if (at_return) {
|
||||
bgtu(in_nmethod ? sp : fp, tmp_reg, slow_path, /* is_far */ true);
|
||||
} else {
|
||||
@@ -5310,42 +5341,6 @@ void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, R
|
||||
add(final_dest_hi, dest_hi, carry);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply 32 bit by 32 bit first loop.
|
||||
*/
|
||||
void MacroAssembler::multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
Register idx, Register kdx) {
|
||||
// jlong carry, x[], y[], z[];
|
||||
// for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
|
||||
// long product = y[idx] * x[xstart] + carry;
|
||||
// z[kdx] = (int)product;
|
||||
// carry = product >>> 32;
|
||||
// }
|
||||
// z[xstart] = (int)carry;
|
||||
|
||||
Label L_first_loop, L_first_loop_exit;
|
||||
blez(idx, L_first_loop_exit);
|
||||
|
||||
shadd(t0, xstart, x, t0, LogBytesPerInt);
|
||||
lwu(x_xstart, Address(t0, 0));
|
||||
|
||||
bind(L_first_loop);
|
||||
subiw(idx, idx, 1);
|
||||
shadd(t0, idx, y, t0, LogBytesPerInt);
|
||||
lwu(y_idx, Address(t0, 0));
|
||||
mul(product, x_xstart, y_idx);
|
||||
add(product, product, carry);
|
||||
srli(carry, product, 32);
|
||||
subiw(kdx, kdx, 1);
|
||||
shadd(t0, kdx, z, t0, LogBytesPerInt);
|
||||
sw(product, Address(t0, 0));
|
||||
bgtz(idx, L_first_loop);
|
||||
|
||||
bind(L_first_loop_exit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Multiply 64 bit by 64 bit first loop.
|
||||
*/
|
||||
@@ -5562,77 +5557,16 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
|
||||
const Register carry = tmp5;
|
||||
const Register product = xlen;
|
||||
const Register x_xstart = tmp0;
|
||||
const Register jdx = tmp1;
|
||||
|
||||
mv(idx, ylen); // idx = ylen;
|
||||
addw(kdx, xlen, ylen); // kdx = xlen+ylen;
|
||||
mv(carry, zr); // carry = 0;
|
||||
|
||||
Label L_multiply_64_x_64_loop, L_done;
|
||||
|
||||
Label L_done;
|
||||
subiw(xstart, xlen, 1);
|
||||
bltz(xstart, L_done);
|
||||
|
||||
const Register jdx = tmp1;
|
||||
|
||||
if (AvoidUnalignedAccesses) {
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(T_INT);
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
if ((base_offset % 8) == 0) {
|
||||
// multiply_64_x_64_loop emits 8-byte load/store to access two elements
|
||||
// at a time from int arrays x and y. When base_offset is 8 bytes, these
|
||||
// accesses are naturally aligned if both xlen and ylen are even numbers.
|
||||
orr(t0, xlen, ylen);
|
||||
test_bit(t0, t0, 0);
|
||||
beqz(t0, L_multiply_64_x_64_loop);
|
||||
}
|
||||
|
||||
Label L_second_loop_unaligned, L_third_loop, L_third_loop_exit;
|
||||
|
||||
multiply_32_x_32_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
|
||||
shadd(t0, xstart, z, t0, LogBytesPerInt);
|
||||
sw(carry, Address(t0, 0));
|
||||
|
||||
bind(L_second_loop_unaligned);
|
||||
mv(carry, zr);
|
||||
mv(jdx, ylen);
|
||||
subiw(xstart, xstart, 1);
|
||||
bltz(xstart, L_done);
|
||||
|
||||
subi(sp, sp, 2 * wordSize);
|
||||
sd(z, Address(sp, 0));
|
||||
sd(zr, Address(sp, wordSize));
|
||||
shadd(t0, xstart, z, t0, LogBytesPerInt);
|
||||
addi(z, t0, 4);
|
||||
shadd(t0, xstart, x, t0, LogBytesPerInt);
|
||||
lwu(product, Address(t0, 0));
|
||||
|
||||
blez(jdx, L_third_loop_exit);
|
||||
|
||||
bind(L_third_loop);
|
||||
subiw(jdx, jdx, 1);
|
||||
shadd(t0, jdx, y, t0, LogBytesPerInt);
|
||||
lwu(t0, Address(t0, 0));
|
||||
mul(t1, t0, product);
|
||||
add(t0, t1, carry);
|
||||
shadd(tmp6, jdx, z, t1, LogBytesPerInt);
|
||||
lwu(t1, Address(tmp6, 0));
|
||||
add(t0, t0, t1);
|
||||
sw(t0, Address(tmp6, 0));
|
||||
srli(carry, t0, 32);
|
||||
bgtz(jdx, L_third_loop);
|
||||
|
||||
bind(L_third_loop_exit);
|
||||
ld(z, Address(sp, 0));
|
||||
addi(sp, sp, 2 * wordSize);
|
||||
shadd(t0, xstart, z, t0, LogBytesPerInt);
|
||||
sw(carry, Address(t0, 0));
|
||||
|
||||
j(L_second_loop_unaligned);
|
||||
}
|
||||
|
||||
bind(L_multiply_64_x_64_loop);
|
||||
multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
|
||||
|
||||
Label L_second_loop_aligned;
|
||||
|
||||
@@ -44,7 +44,7 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
MacroAssembler(CodeBuffer* code) : Assembler(code) {}
|
||||
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp_reg = t0);
|
||||
void safepoint_poll(Label& slow_path, bool at_return, bool in_nmethod, Register tmp_reg = t0);
|
||||
|
||||
// Alignment
|
||||
int align(int modulus, int extra_offset = 0);
|
||||
@@ -660,7 +660,9 @@ class MacroAssembler: public Assembler {
|
||||
void cmov_cmp_fp_eq(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ne(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_le(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_ge(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_lt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
void cmov_cmp_fp_gt(FloatRegister cmp1, FloatRegister cmp2, Register dst, Register src, bool is_single);
|
||||
|
||||
public:
|
||||
// We try to follow risc-v asm menomics.
|
||||
@@ -1382,10 +1384,6 @@ public:
|
||||
void adc(Register dst, Register src1, Register src2, Register carry);
|
||||
void add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo,
|
||||
Register src1, Register src2, Register carry);
|
||||
void multiply_32_x_32_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
Register idx, Register kdx);
|
||||
void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
|
||||
Register y, Register y_idx, Register z,
|
||||
Register carry, Register product,
|
||||
|
||||
@@ -356,18 +356,6 @@ void NativeMovRegMem::verify() {
|
||||
|
||||
void NativeJump::verify() { }
|
||||
|
||||
|
||||
void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
|
||||
// Patching to not_entrant can happen while activations of the method are
|
||||
// in use. The patching in that instance must happen only when certain
|
||||
// alignment restrictions are true. These guarantees check those
|
||||
// conditions.
|
||||
|
||||
// Must be 4 bytes aligned
|
||||
MacroAssembler::assert_alignment(verified_entry);
|
||||
}
|
||||
|
||||
|
||||
address NativeJump::jump_destination() const {
|
||||
address dest = MacroAssembler::target_addr_for_insn(instruction_address());
|
||||
|
||||
@@ -420,12 +408,6 @@ bool NativeInstruction::is_safepoint_poll() {
|
||||
return MacroAssembler::is_lwu_to_zr(address(this));
|
||||
}
|
||||
|
||||
// A 16-bit instruction with all bits ones is permanently reserved as an illegal instruction.
|
||||
bool NativeInstruction::is_sigill_not_entrant() {
|
||||
// jvmci
|
||||
return uint_at(0) == 0xffffffff;
|
||||
}
|
||||
|
||||
void NativeIllegalInstruction::insert(address code_pos) {
|
||||
assert_cond(code_pos != nullptr);
|
||||
Assembler::sd_instr(code_pos, 0xffffffff); // all bits ones is permanently reserved as an illegal instruction
|
||||
@@ -437,45 +419,6 @@ bool NativeInstruction::is_stop() {
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
// MT-safe inserting of a jump over a jump or a nop (used by
|
||||
// nmethod::make_not_entrant)
|
||||
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
|
||||
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop() ||
|
||||
nativeInstruction_at(verified_entry)->is_sigill_not_entrant(),
|
||||
"riscv cannot replace non-jump with jump");
|
||||
|
||||
check_verified_entry_alignment(entry, verified_entry);
|
||||
|
||||
// Patch this nmethod atomically.
|
||||
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {
|
||||
ptrdiff_t offset = dest - verified_entry;
|
||||
guarantee(Assembler::is_simm21(offset) && ((offset % 2) == 0),
|
||||
"offset is too large to be patched in one jal instruction."); // 1M
|
||||
|
||||
uint32_t insn = 0;
|
||||
address pInsn = (address)&insn;
|
||||
Assembler::patch(pInsn, 31, 31, (offset >> 20) & 0x1);
|
||||
Assembler::patch(pInsn, 30, 21, (offset >> 1) & 0x3ff);
|
||||
Assembler::patch(pInsn, 20, 20, (offset >> 11) & 0x1);
|
||||
Assembler::patch(pInsn, 19, 12, (offset >> 12) & 0xff);
|
||||
Assembler::patch(pInsn, 11, 7, 0); // zero, no link jump
|
||||
Assembler::patch(pInsn, 6, 0, 0b1101111); // j, (jal x0 offset)
|
||||
Assembler::sd_instr(verified_entry, insn);
|
||||
} else {
|
||||
// We use an illegal instruction for marking a method as
|
||||
// not_entrant.
|
||||
NativeIllegalInstruction::insert(verified_entry);
|
||||
}
|
||||
|
||||
ICache::invalidate_range(verified_entry, instruction_size);
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------------
|
||||
|
||||
void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
|
||||
CodeBuffer cb(code_pos, instruction_size);
|
||||
MacroAssembler a(&cb);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@@ -74,7 +74,6 @@ class NativeInstruction {
|
||||
bool is_nop() const;
|
||||
bool is_jump_or_nop();
|
||||
bool is_safepoint_poll();
|
||||
bool is_sigill_not_entrant();
|
||||
bool is_stop();
|
||||
|
||||
protected:
|
||||
@@ -274,9 +273,6 @@ class NativeJump: public NativeInstruction {
|
||||
|
||||
// Insertion of native jump instruction
|
||||
static void insert(address code_pos, address entry);
|
||||
// MT-safe insertion of native jump at verified method entry
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry);
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
};
|
||||
|
||||
inline NativeJump* nativeJump_at(address addr) {
|
||||
|
||||
@@ -1368,14 +1368,6 @@ void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
||||
// n.b. frame size includes space for return pc and fp
|
||||
const int framesize = C->output()->frame_size_in_bytes();
|
||||
|
||||
// insert a nop at the start of the prolog so we can patch in a
|
||||
// branch if we need to invalidate the method later
|
||||
{
|
||||
Assembler::IncompressibleScope scope(masm); // keep the nop as 4 bytes for patching.
|
||||
MacroAssembler::assert_alignment(__ pc());
|
||||
__ nop(); // 4 bytes
|
||||
}
|
||||
|
||||
assert_cond(C != nullptr);
|
||||
|
||||
if (C->clinit_barrier_on_entry()) {
|
||||
@@ -1493,7 +1485,7 @@ void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
||||
code_stub = &stub->entry();
|
||||
}
|
||||
__ relocate(relocInfo::poll_return_type);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
|
||||
__ safepoint_poll(*code_stub, true /* at_return */, true /* in_nmethod */);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1804,7 +1796,6 @@ void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
|
||||
// This is the unverified entry point.
|
||||
__ ic_check(CodeEntryAlignment);
|
||||
|
||||
// Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
|
||||
// ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
|
||||
assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
|
||||
}
|
||||
@@ -2619,6 +2610,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
|
||||
// compute_padding() function must be
|
||||
// provided for the instruction
|
||||
|
||||
// Whether this node is expanded during code emission into a sequence of
|
||||
// instructions and the first instruction can perform an implicit null check.
|
||||
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
||||
|
||||
//----------OPERANDS-----------------------------------------------------------
|
||||
// Operand definitions must precede instruction definitions for correct parsing
|
||||
// in the ADLC because operands constitute user defined types which are used in
|
||||
@@ -8195,7 +8190,7 @@ instruct unnecessary_membar_volatile_rvtso() %{
|
||||
ins_cost(0);
|
||||
|
||||
size(0);
|
||||
|
||||
|
||||
format %{ "#@unnecessary_membar_volatile_rvtso (unnecessary so empty encoding)" %}
|
||||
ins_encode %{
|
||||
__ block_comment("unnecessary_membar_volatile_rvtso");
|
||||
|
||||
@@ -1777,15 +1777,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// check for safepoint operation in progress and/or pending suspend requests
|
||||
{
|
||||
// We need an acquire here to ensure that any subsequent load of the
|
||||
// global SafepointSynchronize::_state flag is ordered after this load
|
||||
// of the thread-local polling word. We don't want this poll to
|
||||
// return false (i.e. not safepointing) and a later poll of the global
|
||||
// SafepointSynchronize::_state spuriously to return true.
|
||||
// This is to avoid a race when we're in a native->Java transition
|
||||
// racing the code which wakes up from a safepoint.
|
||||
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(safepoint_in_progress, true /* at_return */, false /* in_nmethod */);
|
||||
__ lwu(t0, Address(xthread, JavaThread::suspend_flags_offset()));
|
||||
__ bnez(t0, safepoint_in_progress);
|
||||
__ bind(safepoint_in_progress_done);
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
#define CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -6660,6 +6660,10 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
#undef __
|
||||
|
||||
// Initialization
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for riscv
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generate initial stubs and initializes the entry points
|
||||
|
||||
@@ -6815,6 +6819,9 @@ static const int64_t right_3_bits = right_n_bits(3);
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1229,15 +1229,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
{
|
||||
Label L, Continue;
|
||||
|
||||
// We need an acquire here to ensure that any subsequent load of the
|
||||
// global SafepointSynchronize::_state flag is ordered after this load
|
||||
// of the thread-local polling word. We don't want this poll to
|
||||
// return false (i.e. not safepointing) and a later poll of the global
|
||||
// SafepointSynchronize::_state spuriously to return true.
|
||||
//
|
||||
// This is to avoid a race when we're in a native->Java transition
|
||||
// racing the code which wakes up from a safepoint.
|
||||
__ safepoint_poll(L, true /* at_return */, true /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(L, true /* at_return */, false /* in_nmethod */);
|
||||
__ lwu(t1, Address(xthread, JavaThread::suspend_flags_offset()));
|
||||
__ beqz(t1, Continue);
|
||||
__ bind(L);
|
||||
@@ -1388,7 +1380,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
Label slow_path;
|
||||
Label fast_path;
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* acquire */, false /* in_nmethod */);
|
||||
__ safepoint_poll(slow_path, true /* at_return */, false /* in_nmethod */);
|
||||
__ j(fast_path);
|
||||
|
||||
__ bind(slow_path);
|
||||
|
||||
@@ -1608,7 +1608,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) {
|
||||
}
|
||||
|
||||
void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ profile_taken_branch(x10, x11);
|
||||
__ profile_taken_branch(x10);
|
||||
const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset();
|
||||
const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
|
||||
@@ -1657,7 +1657,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
if (UseLoopCounter) {
|
||||
// increment backedge counter for backward branches
|
||||
// x10: MDO
|
||||
// x11: MDO bumped taken-count
|
||||
// x12: target offset
|
||||
__ bgtz(x12, dispatch); // count only if backward branch
|
||||
|
||||
@@ -1666,12 +1665,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ ld(t0, Address(xmethod, Method::method_counters_offset()));
|
||||
__ bnez(t0, has_counters);
|
||||
__ push_reg(x10);
|
||||
__ push_reg(x11);
|
||||
__ push_reg(x12);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), xmethod);
|
||||
__ pop_reg(x12);
|
||||
__ pop_reg(x11);
|
||||
__ pop_reg(x10);
|
||||
__ ld(t0, Address(xmethod, Method::method_counters_offset()));
|
||||
__ beqz(t0, dispatch); // No MethodCounters allocated, OutOfMemory
|
||||
|
||||
@@ -203,15 +203,15 @@ void VM_Version::common_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
// Misc Intrinsics could depend on RVV
|
||||
// Misc Intrinsics that could depend on RVV.
|
||||
|
||||
if (UseZba || UseRVV) {
|
||||
if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) {
|
||||
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
|
||||
}
|
||||
} else {
|
||||
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
|
||||
warning("CRC32 intrinsic requires Zba or RVV instructions (not available on this CPU)");
|
||||
warning("CRC32 intrinsic are not available on this CPU.");
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
|
||||
}
|
||||
@@ -325,20 +325,40 @@ void VM_Version::c2_initialize() {
|
||||
FLAG_SET_DEFAULT(UseMulAddIntrinsic, true);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, true);
|
||||
}
|
||||
} else if (UseMultiplyToLenIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.multiplyToLen() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, true);
|
||||
}
|
||||
} else if (UseSquareToLenIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.squareToLen() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, true);
|
||||
}
|
||||
} else if (UseMontgomeryMultiplyIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.montgomeryMultiply() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
|
||||
if (!AvoidUnalignedAccesses) {
|
||||
if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, true);
|
||||
}
|
||||
} else if (UseMontgomerySquareIntrinsic) {
|
||||
warning("Intrinsics for BigInteger.montgomerySquare() not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
|
||||
}
|
||||
|
||||
// Adler32
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -44,17 +44,17 @@ define_pd_global(intx, CompileThreshold, 1000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 1400);
|
||||
define_pd_global(bool, ProfileInterpreter, false);
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 32*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 13*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 14*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 32*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 1);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 160*K);
|
||||
#endif // !COMPILER2
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -67,17 +67,17 @@ define_pd_global(uint, SuperWordStoreToLoadForwardingFailureDetection, 16);
|
||||
define_pd_global(bool, IdealizeClearArrayNode, false);
|
||||
|
||||
// InitialCodeCacheSize derived from specjbb2000 run.
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(uintx, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(uintx, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(uintx, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(uintx, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(uintx, CodeCacheExpansionSize, 64*K);
|
||||
define_pd_global(size_t, InitialCodeCacheSize, 2048*K); // Integral multiple of CodeCacheExpansionSize
|
||||
define_pd_global(size_t, ReservedCodeCacheSize, 48*M);
|
||||
define_pd_global(size_t, NonProfiledCodeHeapSize, 21*M);
|
||||
define_pd_global(size_t, ProfiledCodeHeapSize, 22*M);
|
||||
define_pd_global(size_t, NonNMethodCodeHeapSize, 5*M);
|
||||
define_pd_global(size_t, CodeCacheExpansionSize, 64*K);
|
||||
|
||||
// Ergonomics related flags
|
||||
define_pd_global(uint64_t, MaxRAM, 128ULL*G);
|
||||
define_pd_global(uintx, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K);
|
||||
define_pd_global(size_t, CodeCacheMinBlockLength, 4);
|
||||
define_pd_global(size_t, CodeCacheMinimumUseSpace, 400*K);
|
||||
|
||||
define_pd_global(bool, TrapBasedRangeChecks, false); // Not needed on z/Architecture.
|
||||
|
||||
|
||||
@@ -410,7 +410,7 @@
|
||||
|
||||
// C2I adapter frames:
|
||||
//
|
||||
// STACK (interpreted called from compiled, on entry to frame manager):
|
||||
// STACK (interpreted called from compiled, on entry to template interpreter):
|
||||
//
|
||||
// [TOP_C2I_FRAME]
|
||||
// [JIT_FRAME]
|
||||
|
||||
@@ -180,7 +180,7 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
|
||||
__ z_lg(Z_R0_scratch, in_bytes(bs_nm->thread_disarmed_guard_value_offset()), Z_thread); // 6 bytes
|
||||
|
||||
// Compare to current patched value:
|
||||
__ z_cfi(Z_R0_scratch, /* to be patched */ -1); // 6 bytes (2 + 4 byte imm val)
|
||||
__ z_cfi(Z_R0_scratch, /* to be patched */ 0); // 6 bytes (2 + 4 byte imm val)
|
||||
|
||||
// Conditional Jump
|
||||
__ z_larl(Z_R14, (Assembler::instr_len((unsigned long)LARL_ZOPC) + Assembler::instr_len((unsigned long)BCR_ZOPC)) / 2); // 6 bytes
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -38,7 +38,7 @@ define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nu
|
||||
|
||||
define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 256);
|
||||
define_pd_global(size_t, CodeCacheSegmentSize, 256);
|
||||
// This shall be at least 32 for proper branch target alignment.
|
||||
// Ideally, this is 256 (cache line size). This keeps code end data
|
||||
// on separate lines. But we reduced it to 64 since 256 increased
|
||||
|
||||
@@ -167,27 +167,6 @@ bool NativeInstruction::is_illegal() {
|
||||
return halfword_at(-2) == illegal_instruction();
|
||||
}
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
bool NativeInstruction::is_sigill_not_entrant() {
|
||||
if (!is_illegal()) return false; // Just a quick path.
|
||||
|
||||
// One-sided error of is_illegal tolerable here
|
||||
// (see implementation of is_illegal() for details).
|
||||
|
||||
CodeBlob* cb = CodeCache::find_blob(addr_at(0));
|
||||
if (cb == nullptr || !cb->is_nmethod()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
nmethod *nm = (nmethod *)cb;
|
||||
// This method is not_entrant if the illtrap instruction
|
||||
// is located at the verified entry point.
|
||||
// BE AWARE: the current pc (this) points to the instruction after the
|
||||
// "illtrap" location.
|
||||
address sig_addr = ((address) this) - 2;
|
||||
return nm->verified_entry_point() == sig_addr;
|
||||
}
|
||||
|
||||
bool NativeInstruction::is_jump() {
|
||||
unsigned long inst;
|
||||
Assembler::get_instruction((address)this, &inst);
|
||||
@@ -620,19 +599,6 @@ void NativeJump::verify() {
|
||||
fatal("this is not a `NativeJump' site");
|
||||
}
|
||||
|
||||
// Patch atomically with an illtrap.
|
||||
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
|
||||
ResourceMark rm;
|
||||
int code_size = 2;
|
||||
CodeBuffer cb(verified_entry, code_size + 1);
|
||||
MacroAssembler* a = new MacroAssembler(&cb);
|
||||
#ifdef COMPILER2
|
||||
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
|
||||
#endif
|
||||
a->z_illtrap();
|
||||
ICache::invalidate_range(verified_entry, code_size);
|
||||
}
|
||||
|
||||
#undef LUCY_DBG
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -85,9 +85,6 @@ class NativeInstruction {
|
||||
// Bcrl is currently the only accepted instruction here.
|
||||
bool is_jump();
|
||||
|
||||
// We use an illtrap for marking a method as not_entrant.
|
||||
bool is_sigill_not_entrant();
|
||||
|
||||
bool is_safepoint_poll() {
|
||||
// Is the current instruction a POTENTIAL read access to the polling page?
|
||||
// The instruction's current arguments are not checked!
|
||||
@@ -609,11 +606,6 @@ class NativeJump: public NativeInstruction {
|
||||
|
||||
// Insertion of native jump instruction.
|
||||
static void insert(address code_pos, address entry);
|
||||
|
||||
// MT-safe insertion of native jump at verified method entry.
|
||||
static void check_verified_entry_alignment(address entry, address verified_entry) { }
|
||||
|
||||
static void patch_verified_entry(address entry, address verified_entry, address dest);
|
||||
};
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -414,7 +414,7 @@ constexpr FloatRegister Z_FARG2 = Z_F2;
|
||||
constexpr FloatRegister Z_FARG3 = Z_F4;
|
||||
constexpr FloatRegister Z_FARG4 = Z_F6;
|
||||
|
||||
// Register declarations to be used in frame manager assembly code.
|
||||
// Register declarations to be used in template interpreter assembly code.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
|
||||
// Register to cache the integer value on top of the operand stack.
|
||||
@@ -439,7 +439,7 @@ constexpr Register Z_bcp = Z_R13;
|
||||
// Bytecode which is dispatched (short lived!).
|
||||
constexpr Register Z_bytecode = Z_R14;
|
||||
|
||||
// Temporary registers to be used within frame manager. We can use
|
||||
// Temporary registers to be used within template interpreter. We can use
|
||||
// the nonvolatile ones because the call stub has saved them.
|
||||
// Use only non-volatile registers in order to keep values across C-calls.
|
||||
constexpr Register Z_tmp_1 = Z_R10;
|
||||
|
||||
@@ -118,7 +118,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
|
||||
__ z_lgr(Z_SP, saved_sp);
|
||||
|
||||
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
|
||||
// C2I adapter extensions are now removed by a resize in the frame manager
|
||||
// C2I adapter extensions are now removed by a resize in the template interpreter
|
||||
// (unwind_initial_activation_pending_exception).
|
||||
#ifdef ASSERT
|
||||
__ z_ltgr(handle_exception, handle_exception);
|
||||
|
||||
@@ -2139,7 +2139,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
Register value = Z_R12;
|
||||
|
||||
// Remember the senderSP so we can pop the interpreter arguments off of the stack.
|
||||
// In addition, frame manager expects initial_caller_sp in Z_R10.
|
||||
// In addition, template interpreter expects initial_caller_sp in Z_R10.
|
||||
__ z_lgr(sender_SP, Z_SP);
|
||||
|
||||
// This should always fit in 14 bit immediate.
|
||||
|
||||
@@ -26,6 +26,13 @@
|
||||
#ifndef CPU_S390_STUBDECLARATIONS_HPP
|
||||
#define CPU_S390_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_PREUNIVERSE_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(preuniverse, 0) \
|
||||
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
|
||||
@@ -115,7 +115,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// [SP+176] - thread : Thread*
|
||||
//
|
||||
address generate_call_stub(address& return_address) {
|
||||
// Set up a new C frame, copy Java arguments, call frame manager
|
||||
// Set up a new C frame, copy Java arguments, call template interpreter
|
||||
// or native_entry, and process result.
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
@@ -272,10 +272,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
BLOCK_COMMENT("call {");
|
||||
{
|
||||
// Call frame manager or native entry.
|
||||
// Call template interpreter or native entry.
|
||||
|
||||
//
|
||||
// Register state on entry to frame manager / native entry:
|
||||
// Register state on entry to template interpreter / native entry:
|
||||
//
|
||||
// Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed)
|
||||
// Lesp = (SP) + copied_arguments_offset - 8
|
||||
@@ -290,7 +290,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ z_lgr(Z_esp, r_top_of_arguments_addr);
|
||||
|
||||
//
|
||||
// Stack on entry to frame manager / native entry:
|
||||
// Stack on entry to template interpreter / native entry:
|
||||
//
|
||||
// F0 [TOP_IJAVA_FRAME_ABI]
|
||||
// [outgoing Java arguments]
|
||||
@@ -300,7 +300,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
|
||||
// Do a light-weight C-call here, r_new_arg_entry holds the address
|
||||
// of the interpreter entry point (frame manager or native entry)
|
||||
// of the interpreter entry point (template interpreter or native entry)
|
||||
// and save runtime-value of return_pc in return_address
|
||||
// (call by reference argument).
|
||||
return_address = __ call_stub(r_new_arg_entry);
|
||||
@@ -309,11 +309,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
{
|
||||
BLOCK_COMMENT("restore registers {");
|
||||
// Returned from frame manager or native entry.
|
||||
// Returned from template interpreter or native entry.
|
||||
// Now pop frame, process result, and return to caller.
|
||||
|
||||
//
|
||||
// Stack on exit from frame manager / native entry:
|
||||
// Stack on exit from template interpreter / native entry:
|
||||
//
|
||||
// F0 [ABI]
|
||||
// ...
|
||||
@@ -330,7 +330,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ pop_frame();
|
||||
|
||||
// Reload some volatile registers which we've spilled before the call
|
||||
// to frame manager / native entry.
|
||||
// to template interpreter / native entry.
|
||||
// Access all locals via frame pointer, because we know nothing about
|
||||
// the topmost frame's size.
|
||||
__ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp);
|
||||
@@ -3197,7 +3197,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// VM-Call: BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr)
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetNMethod::nmethod_stub_entry_barrier));
|
||||
__ z_ltr(Z_R0_scratch, Z_RET);
|
||||
__ z_ltr(Z_RET, Z_RET);
|
||||
|
||||
// VM-Call Epilogue
|
||||
__ restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
|
||||
@@ -3283,6 +3283,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_preuniverse_stubs() {
|
||||
// preuniverse stubs are not needed for s390
|
||||
}
|
||||
|
||||
void generate_initial_stubs() {
|
||||
// Generates all stubs and initializes the entry points.
|
||||
|
||||
@@ -3418,6 +3422,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case preuniverse_id:
|
||||
generate_preuniverse_stubs();
|
||||
break;
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
|
||||
@@ -1217,7 +1217,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
|
||||
|
||||
// Various method entries
|
||||
|
||||
// Math function, frame manager must set up an interpreter state, etc.
|
||||
// Math function, template interpreter must set up an interpreter state, etc.
|
||||
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
|
||||
|
||||
// Decide what to do: Use same platform specific instructions and runtime calls as compilers.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user