mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-07 09:59:37 +01:00
Compare commits
523 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ab9f70dd5a | ||
|
|
986ecff5f9 | ||
|
|
1495dd94e9 | ||
|
|
222ae365c8 | ||
|
|
49fd6a0cb4 | ||
|
|
2527e9e58d | ||
|
|
a03302d41b | ||
|
|
62bc7b7c42 | ||
|
|
90a2db1ecb | ||
|
|
4d1dfabcb4 | ||
|
|
f4d73d2a3d | ||
|
|
11743b1ed3 | ||
|
|
ed62bda2e0 | ||
|
|
02dd21196e | ||
|
|
becc35f287 | ||
|
|
431f467246 | ||
|
|
8d236615b7 | ||
|
|
e3b36e3bab | ||
|
|
a40afdd08f | ||
|
|
d5935af228 | ||
|
|
3abaa83610 | ||
|
|
2a5f149bb8 | ||
|
|
3b2f3e53d7 | ||
|
|
6dda2f6fad | ||
|
|
7c70e73414 | ||
|
|
8c4090c2cf | ||
|
|
e268563a10 | ||
|
|
5052a7eee5 | ||
|
|
b50c11f907 | ||
|
|
991ac9e616 | ||
|
|
80fb7088a1 | ||
|
|
0d85f076cc | ||
|
|
c935d1ce1c | ||
|
|
48ba8ed243 | ||
|
|
ecf05ca541 | ||
|
|
444a8fa14e | ||
|
|
a029245a4e | ||
|
|
710354369e | ||
|
|
1feb9bd559 | ||
|
|
eea50fbc1b | ||
|
|
31847149c1 | ||
|
|
e66ed4d729 | ||
|
|
ef7872cc31 | ||
|
|
523bc77981 | ||
|
|
af532cc1b2 | ||
|
|
d19eab4f08 | ||
|
|
3fb9246af9 | ||
|
|
55e7af0560 | ||
|
|
efb81dafaf | ||
|
|
8f11d83a01 | ||
|
|
f58d612b61 | ||
|
|
b06459d3a8 | ||
|
|
99223eea03 | ||
|
|
5110d54d93 | ||
|
|
98af18921a | ||
|
|
7f0cd6488b | ||
|
|
fc77e7600f | ||
|
|
dacd9af9a0 | ||
|
|
56713817c0 | ||
|
|
fe4c7a0429 | ||
|
|
3ca44c8dea | ||
|
|
48f70d7ad8 | ||
|
|
a9f3cb23d1 | ||
|
|
d5d94db12a | ||
|
|
dbac620b99 | ||
|
|
a6e2a329a0 | ||
|
|
ba90ccc6a8 | ||
|
|
86f48ab559 | ||
|
|
12dc568b3d | ||
|
|
685da0323b | ||
|
|
2894240602 | ||
|
|
a668f437e4 | ||
|
|
2427c901b3 | ||
|
|
80ab094a75 | ||
|
|
bdc39818ce | ||
|
|
9339a6a232 | ||
|
|
12e6a0b6d0 | ||
|
|
0e7399318b | ||
|
|
f23c150709 | ||
|
|
d4ce630cea | ||
|
|
849570a94a | ||
|
|
d594ef3a3e | ||
|
|
ae9607725c | ||
|
|
937d61bfba | ||
|
|
86d6a2e05e | ||
|
|
a2da75a6b6 | ||
|
|
b8cdf31a2e | ||
|
|
05da2137f1 | ||
|
|
9f70965bb9 | ||
|
|
aaac8c0636 | ||
|
|
33d00a77f3 | ||
|
|
8c6d12250b | ||
|
|
452b052fe3 | ||
|
|
993babb326 | ||
|
|
8051aaf068 | ||
|
|
afa8e79ba1 | ||
|
|
22ae137400 | ||
|
|
79d8a34a92 | ||
|
|
8f864fd563 | ||
|
|
5c78c7cd83 | ||
|
|
b0f5b23ed2 | ||
|
|
a5a2340054 | ||
|
|
7469a274bb | ||
|
|
d06c66f7f5 | ||
|
|
ab1f2af4f0 | ||
|
|
57df267e42 | ||
|
|
443b172638 | ||
|
|
501e6aed44 | ||
|
|
075ddef831 | ||
|
|
bd4c0f4a7d | ||
|
|
f1c0b4ed72 | ||
|
|
b43c2c6635 | ||
|
|
79cea6dd17 | ||
|
|
1d53ac30f1 | ||
|
|
124575b4c2 | ||
|
|
32df2d17f3 | ||
|
|
19f0755c48 | ||
|
|
0ca38bdc4d | ||
|
|
b39c73696d | ||
|
|
88c3979367 | ||
|
|
aaff9dec24 | ||
|
|
0d54329304 | ||
|
|
1aca920f59 | ||
|
|
82289f6559 | ||
|
|
1ff73cb2ec | ||
|
|
69645fd4ba | ||
|
|
23670fd418 | ||
|
|
b426151a33 | ||
|
|
c755345177 | ||
|
|
c203e7093e | ||
|
|
2b44ed7070 | ||
|
|
6a480ad07a | ||
|
|
caaef3a04c | ||
|
|
173dedfb24 | ||
|
|
aae13af04b | ||
|
|
22d3a6dd34 | ||
|
|
28602f3d3e | ||
|
|
3641c32c11 | ||
|
|
2ae3ea2ad9 | ||
|
|
68abf76e90 | ||
|
|
5013d69d96 | ||
|
|
e38c6f9827 | ||
|
|
deec6aa76d | ||
|
|
e5ec464120 | ||
|
|
98e64cffff | ||
|
|
e7d2a52d35 | ||
|
|
0f7c0e956e | ||
|
|
e5077660c4 | ||
|
|
21efd25c11 | ||
|
|
a629424248 | ||
|
|
1a7ac16d23 | ||
|
|
63faa50428 | ||
|
|
040cc7aee0 | ||
|
|
d24449f696 | ||
|
|
45726a1f8b | ||
|
|
5cc8673841 | ||
|
|
57434c73ea | ||
|
|
28bd29f396 | ||
|
|
7fa501e398 | ||
|
|
d99fb09a20 | ||
|
|
1f0dfdbcca | ||
|
|
0b8ae26028 | ||
|
|
15601b4718 | ||
|
|
58e7581527 | ||
|
|
09aad0aea8 | ||
|
|
7b9969dc8f | ||
|
|
f28f618972 | ||
|
|
603526b55b | ||
|
|
c01b4fc348 | ||
|
|
ae0dac43c0 | ||
|
|
19882220ec | ||
|
|
e916ce8ce9 | ||
|
|
dba0d54505 | ||
|
|
f5f414f9fc | ||
|
|
e1c58f858a | ||
|
|
f0498c2aed | ||
|
|
8e44856992 | ||
|
|
558d06399c | ||
|
|
584137cf96 | ||
|
|
3468c6e5ef | ||
|
|
52747256bb | ||
|
|
11eccfc85f | ||
|
|
bdf9834b81 | ||
|
|
d757246823 | ||
|
|
cf70cb70bc | ||
|
|
fb651fd6d2 | ||
|
|
1548ac4f54 | ||
|
|
02fe095d29 | ||
|
|
a3fd4248b7 | ||
|
|
f61b247fe3 | ||
|
|
ed260e8cae | ||
|
|
f0e706698d | ||
|
|
9439d76309 | ||
|
|
b735ef99b2 | ||
|
|
5ede5b47d4 | ||
|
|
5febc4e3bb | ||
|
|
a7c0f4b845 | ||
|
|
c74c60fb8b | ||
|
|
78d50c0215 | ||
|
|
2e06a91765 | ||
|
|
ecab52c09b | ||
|
|
ed7d5fe840 | ||
|
|
be6c15ecb4 | ||
|
|
9041f4c47f | ||
|
|
3e60ab51fe | ||
|
|
5ca8d7c2a7 | ||
|
|
ebf5ae8435 | ||
|
|
e912977a66 | ||
|
|
1383b8ef87 | ||
|
|
51d710e3cc | ||
|
|
908f3c9697 | ||
|
|
169d145e99 | ||
|
|
70f3469310 | ||
|
|
9c338f6f87 | ||
|
|
4ffd2a8aa4 | ||
|
|
c220a6c7bb | ||
|
|
40bc083267 | ||
|
|
320235ccb8 | ||
|
|
b453eb63c6 | ||
|
|
506625b768 | ||
|
|
640b71da48 | ||
|
|
eca2032c06 | ||
|
|
95577ca97f | ||
|
|
55e7494dee | ||
|
|
3bbaa772b0 | ||
|
|
0858743dee | ||
|
|
884076f6e2 | ||
|
|
024292ac4d | ||
|
|
0755477c9a | ||
|
|
4ed268ff9a | ||
|
|
0b2d0817f1 | ||
|
|
999761d0f6 | ||
|
|
812434c420 | ||
|
|
626bea80ab | ||
|
|
4c80780f6a | ||
|
|
655dc516c2 | ||
|
|
f2f7a490c0 | ||
|
|
e04a310375 | ||
|
|
ec7361e082 | ||
|
|
a0053012a4 | ||
|
|
bad38a0f92 | ||
|
|
285adff24e | ||
|
|
c9ecedd226 | ||
|
|
2a16cc890b | ||
|
|
81c6ed3882 | ||
|
|
c1198bba0e | ||
|
|
a42ba1ff1a | ||
|
|
6e91ccd1c3 | ||
|
|
2b756ab1e8 | ||
|
|
ca753ebad6 | ||
|
|
190e113031 | ||
|
|
166ea12d73 | ||
|
|
e7ca8c7d55 | ||
|
|
f364fcab79 | ||
|
|
bd65d483df | ||
|
|
57210af9bc | ||
|
|
a70521c62e | ||
|
|
b023fea062 | ||
|
|
b69a3849b2 | ||
|
|
6e760b9b74 | ||
|
|
39a3652968 | ||
|
|
08db4b9962 | ||
|
|
dbae90c950 | ||
|
|
059b49b955 | ||
|
|
b6d5f49b8d | ||
|
|
5856dc34c8 | ||
|
|
fa2eb61648 | ||
|
|
e3aeebec17 | ||
|
|
6fb6f3d39b | ||
|
|
44b19c01ac | ||
|
|
a65f200220 | ||
|
|
8c363b3e3e | ||
|
|
c5cbcac828 | ||
|
|
dccca0fb7a | ||
|
|
ba23105231 | ||
|
|
26ccb3cef1 | ||
|
|
b0f98df75a | ||
|
|
dd113c8df0 | ||
|
|
41520998aa | ||
|
|
98f54d90ea | ||
|
|
7698c373a6 | ||
|
|
e320162815 | ||
|
|
3e3298509f | ||
|
|
a6be228642 | ||
|
|
c22e01d776 | ||
|
|
9dcc502cc8 | ||
|
|
9c266ae83c | ||
|
|
9660320041 | ||
|
|
4680dc9831 | ||
|
|
ecbdd3405a | ||
|
|
38a261415d | ||
|
|
899e13f40a | ||
|
|
001aaa1e49 | ||
|
|
f3b34d32d6 | ||
|
|
e77cdd93ea | ||
|
|
72e22b4de5 | ||
|
|
25480f0011 | ||
|
|
636c61a386 | ||
|
|
87d734012e | ||
|
|
d023982600 | ||
|
|
4c03e5938d | ||
|
|
ad0fd13f20 | ||
|
|
a382996bb4 | ||
|
|
391ea15118 | ||
|
|
19a76a45e9 | ||
|
|
95b7a8b3e3 | ||
|
|
16e461ef31 | ||
|
|
3c0eed8e47 | ||
|
|
f155f7d6e5 | ||
|
|
b81f4faed7 | ||
|
|
5a442197d2 | ||
|
|
db12f1934a | ||
|
|
d78fa5a9f6 | ||
|
|
72d3a2a977 | ||
|
|
6927fc3904 | ||
|
|
9593730a23 | ||
|
|
8cd79752c6 | ||
|
|
958383d69c | ||
|
|
e9e331b2a9 | ||
|
|
bdb1646a1e | ||
|
|
23985c29b4 | ||
|
|
0ad919c1e5 | ||
|
|
43cfd80c1c | ||
|
|
a60e523f88 | ||
|
|
fd766b27b9 | ||
|
|
8b5bb01355 | ||
|
|
1fc0b01601 | ||
|
|
0c39228ec1 | ||
|
|
10762d408b | ||
|
|
f28126ebc2 | ||
|
|
c31f4861fb | ||
|
|
15e8609a2c | ||
|
|
022e29a775 | ||
|
|
e13b4c8de9 | ||
|
|
f83454cd61 | ||
|
|
8ad1fcc48a | ||
|
|
c1c0155604 | ||
|
|
cd50d78d44 | ||
|
|
241808e13f | ||
|
|
1b3e23110b | ||
|
|
a26a6f3152 | ||
|
|
47017e3864 | ||
|
|
bcca5cee2d | ||
|
|
198782c957 | ||
|
|
d0624f8b62 | ||
|
|
4c9eaddaef | ||
|
|
c71be802b5 | ||
|
|
b8acbc3ed8 | ||
|
|
244e6293c3 | ||
|
|
c0e6ffabc2 | ||
|
|
5116d9e5fe | ||
|
|
78117eff56 | ||
|
|
02e187119d | ||
|
|
90ea42f716 | ||
|
|
e29346dbd6 | ||
|
|
e606278fc8 | ||
|
|
83953c458e | ||
|
|
bc3d865640 | ||
|
|
8d73fe91bc | ||
|
|
c56fb0b6ef | ||
|
|
487cc3c5be | ||
|
|
078d0d4968 | ||
|
|
7e484e2a63 | ||
|
|
f95af744b0 | ||
|
|
72d1066ae3 | ||
|
|
0ceb366dc2 | ||
|
|
9dffbc9c4c | ||
|
|
e304d37996 | ||
|
|
8d529bc4f3 | ||
|
|
ca41644538 | ||
|
|
6656e767db | ||
|
|
68a35511eb | ||
|
|
d906e45026 | ||
|
|
8a571ee7f2 | ||
|
|
ba0ae4cb28 | ||
|
|
df736eb582 | ||
|
|
d25b9befe0 | ||
|
|
67ba8b45dd | ||
|
|
febd4b26b2 | ||
|
|
c0c7d39b59 | ||
|
|
6b360ac99a | ||
|
|
0f4c3dc944 | ||
|
|
84a4a3647c | ||
|
|
f96b6bcd4d | ||
|
|
dc4d9b4849 | ||
|
|
da3a5da81b | ||
|
|
0d0d93e8f6 | ||
|
|
d1e362e9a8 | ||
|
|
05f8a6fca8 | ||
|
|
b5f450a599 | ||
|
|
6c52b73465 | ||
|
|
567c0c9335 | ||
|
|
fc4755535d | ||
|
|
a9f3d3a290 | ||
|
|
500462fb69 | ||
|
|
68a4396dbc | ||
|
|
da0d9598d0 | ||
|
|
b96b9c3d5b | ||
|
|
3bc449797e | ||
|
|
cf5a25538e | ||
|
|
ea7e943874 | ||
|
|
3387b3195c | ||
|
|
8269fdc78e | ||
|
|
57553ca1db | ||
|
|
158e59ab91 | ||
|
|
1a206d2a6c | ||
|
|
a5e0c9d0c5 | ||
|
|
819de07117 | ||
|
|
f40381e41d | ||
|
|
e801e51311 | ||
|
|
7ea08d3928 | ||
|
|
8e921aee5a | ||
|
|
6d0bbc8a18 | ||
|
|
ee3665bca0 | ||
|
|
2ba8a06f0c | ||
|
|
8ac4a88f3c | ||
|
|
6c5804722b | ||
|
|
812bd8e94d | ||
|
|
7fbeede14c | ||
|
|
d80b5c8728 | ||
|
|
7d63c9fa4d | ||
|
|
beda14e3cb | ||
|
|
ae11d8f446 | ||
|
|
e82d7f5810 | ||
|
|
577ac0610a | ||
|
|
c9b8bd6ff4 | ||
|
|
913d318c97 | ||
|
|
724e8c076e | ||
|
|
e0e82066fe | ||
|
|
443afdc77f | ||
|
|
fe09e93b8f | ||
|
|
d19442399c | ||
|
|
c4fbfa2103 | ||
|
|
1b9efaa11e | ||
|
|
b2b56cfc00 | ||
|
|
53d152e7db | ||
|
|
d4705947d8 | ||
|
|
5f357fa27d | ||
|
|
e544cd9920 | ||
|
|
458f033d4d | ||
|
|
3f21c8bd1f | ||
|
|
ddb64836e5 | ||
|
|
8ed214f3b1 | ||
|
|
b9f7caed77 | ||
|
|
ebb7f5d39b | ||
|
|
05c63e8009 | ||
|
|
559795b0eb | ||
|
|
413ac74ecc | ||
|
|
3bdac53178 | ||
|
|
57d02d9ac5 | ||
|
|
a2e86ff3c5 | ||
|
|
e2feff8599 | ||
|
|
16da81eb43 | ||
|
|
c851735631 | ||
|
|
317dacc308 | ||
|
|
a6fb87dbe8 | ||
|
|
7af3cd9558 | ||
|
|
4da1c9df52 | ||
|
|
302508df9e | ||
|
|
c6d64edfbe | ||
|
|
1be412dca0 | ||
|
|
164d0368f6 | ||
|
|
c671089d6e | ||
|
|
3488f53d2c | ||
|
|
330ee87131 | ||
|
|
d5d311f026 | ||
|
|
ea754316fd | ||
|
|
c239c0ab00 | ||
|
|
0226c0298f | ||
|
|
965b68107f | ||
|
|
a34994476e | ||
|
|
2202156acc | ||
|
|
3fe0d29ec3 | ||
|
|
28297411b1 | ||
|
|
4669005123 | ||
|
|
3e4e5dd06f | ||
|
|
ea0b49c36d | ||
|
|
fe4d7f8c1b | ||
|
|
f53d0519b9 | ||
|
|
70ebb5e8c9 | ||
|
|
b4028c91d5 | ||
|
|
08b0b34623 | ||
|
|
7bf4c608e7 | ||
|
|
b7703f7948 | ||
|
|
d25ad881eb | ||
|
|
011de4c894 | ||
|
|
3e2d12d85a | ||
|
|
bdc1ef2021 | ||
|
|
af5932efb5 | ||
|
|
3b0da29879 | ||
|
|
e307b5cca8 | ||
|
|
4189fcbac4 | ||
|
|
3263361a28 | ||
|
|
8fcbb110e9 | ||
|
|
de59da27a6 | ||
|
|
d288ca28be | ||
|
|
e756c0dbbb | ||
|
|
89fe586edd | ||
|
|
9e209fef86 | ||
|
|
75ff7e15fe | ||
|
|
06fdb61e1c | ||
|
|
41c94eed37 | ||
|
|
518d5f4bbb | ||
|
|
f79bd54bbb | ||
|
|
52155dbbb0 | ||
|
|
ac9e51023f | ||
|
|
971ea23c95 | ||
|
|
4e53a9d9df | ||
|
|
8477630970 | ||
|
|
2f1aed2a16 | ||
|
|
67e93281a4 | ||
|
|
ed9066bdf4 | ||
|
|
7a22b76b73 | ||
|
|
0ba2942c6e | ||
|
|
fc8038441d | ||
|
|
b746701e57 | ||
|
|
2da0cdadb8 | ||
|
|
ad510fb25e | ||
|
|
2292246f8c | ||
|
|
03e9ea169b | ||
|
|
594c080b2b | ||
|
|
e6ebefaa40 | ||
|
|
38cd860daa |
8
.github/actions/get-msys2/action.yml
vendored
8
.github/actions/get-msys2/action.yml
vendored
@@ -30,15 +30,15 @@ runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: 'Install MSYS2'
|
||||
uses: msys2/setup-msys2@v2.22.0
|
||||
id: msys2
|
||||
uses: msys2/setup-msys2@v2.28.0
|
||||
with:
|
||||
install: 'autoconf tar unzip zip make'
|
||||
path-type: minimal
|
||||
location: ${{ runner.tool_cache }}/msys2
|
||||
release: false
|
||||
|
||||
# We can't run bash until this is completed, so stick with pwsh
|
||||
- name: 'Set MSYS2 path'
|
||||
run: |
|
||||
# Prepend msys2/msys64/usr/bin to the PATH
|
||||
echo "$env:RUNNER_TOOL_CACHE/msys2/msys64/usr/bin" >> $env:GITHUB_PATH
|
||||
echo "${{ steps.msys2.outputs.msys2-location }}/usr/bin" >> $env:GITHUB_PATH
|
||||
shell: pwsh
|
||||
|
||||
12
.github/workflows/build-cross-compile.yml
vendored
12
.github/workflows/build-cross-compile.yml
vendored
@@ -64,33 +64,33 @@ jobs:
|
||||
gnu-arch: aarch64
|
||||
debian-arch: arm64
|
||||
debian-repository: https://httpredir.debian.org/debian/
|
||||
debian-version: bullseye
|
||||
debian-version: trixie
|
||||
tolerate-sysroot-errors: false
|
||||
- target-cpu: arm
|
||||
gnu-arch: arm
|
||||
debian-arch: armhf
|
||||
debian-repository: https://httpredir.debian.org/debian/
|
||||
debian-version: bullseye
|
||||
debian-version: trixie
|
||||
tolerate-sysroot-errors: false
|
||||
gnu-abi: eabihf
|
||||
- target-cpu: s390x
|
||||
gnu-arch: s390x
|
||||
debian-arch: s390x
|
||||
debian-repository: https://httpredir.debian.org/debian/
|
||||
debian-version: bullseye
|
||||
debian-version: trixie
|
||||
tolerate-sysroot-errors: false
|
||||
- target-cpu: ppc64le
|
||||
gnu-arch: powerpc64le
|
||||
debian-arch: ppc64el
|
||||
debian-repository: https://httpredir.debian.org/debian/
|
||||
debian-version: bullseye
|
||||
debian-version: trixie
|
||||
tolerate-sysroot-errors: false
|
||||
- target-cpu: riscv64
|
||||
gnu-arch: riscv64
|
||||
debian-arch: riscv64
|
||||
debian-repository: https://httpredir.debian.org/debian/
|
||||
debian-version: sid
|
||||
tolerate-sysroot-errors: true
|
||||
debian-version: trixie
|
||||
tolerate-sysroot-errors: false
|
||||
|
||||
steps:
|
||||
- name: 'Checkout the JDK source'
|
||||
|
||||
@@ -125,7 +125,8 @@ if [ -d "$TOPLEVEL_DIR/.hg" ] ; then
|
||||
VCS_TYPE="hg4idea"
|
||||
fi
|
||||
|
||||
if [ -d "$TOPLEVEL_DIR/.git" ] ; then
|
||||
# Git worktrees use a '.git' file rather than directory, so test both.
|
||||
if [ -d "$TOPLEVEL_DIR/.git" -o -f "$TOPLEVEL_DIR/.git" ] ; then
|
||||
VCS_TYPE="Git"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -78,7 +78,6 @@ src/jdk.jdi : jdk/src/jdk.jdi
|
||||
src/jdk.jdwp.agent : jdk/src/jdk.jdwp.agent
|
||||
src/jdk.jlink : jdk/src/jdk.jlink
|
||||
src/jdk.jshell : langtools/src/jdk.jshell
|
||||
src/jdk.jsobject : jdk/src/jdk.jsobject
|
||||
src/jdk.jstatd : jdk/src/jdk.jstatd
|
||||
src/jdk.localedata : jdk/src/jdk.localedata
|
||||
src/jdk.management : jdk/src/jdk.management
|
||||
|
||||
@@ -1451,10 +1451,10 @@ of a cross-compiling toolchain and a sysroot environment which can
|
||||
easily be used together with the <code>--with-devkit</code> configure
|
||||
option to cross compile the JDK. On Linux/x86_64, the following
|
||||
command:</p>
|
||||
<pre><code>bash configure --with-devkit=<devkit-path> --openjdk-target=ppc64-linux-gnu && make</code></pre>
|
||||
<p>will configure and build the JDK for Linux/ppc64 assuming that
|
||||
<code><devkit-path></code> points to a Linux/x86_64 to Linux/ppc64
|
||||
devkit.</p>
|
||||
<pre><code>bash configure --with-devkit=<devkit-path> --openjdk-target=ppc64le-linux-gnu && make</code></pre>
|
||||
<p>will configure and build the JDK for Linux/ppc64le assuming that
|
||||
<code><devkit-path></code> points to a Linux/x86_64 to
|
||||
Linux/ppc64le devkit.</p>
|
||||
<p>Devkits can be created from the <code>make/devkit</code> directory by
|
||||
executing:</p>
|
||||
<pre><code>make [ TARGETS="<TARGET_TRIPLET>+" ] [ BASE_OS=<OS> ] [ BASE_OS_VERSION=<VER> ]</code></pre>
|
||||
@@ -1481,22 +1481,22 @@ following targets are known to work:</p>
|
||||
<td>arm-linux-gnueabihf</td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>ppc64-linux-gnu</td>
|
||||
<td>ppc64le-linux-gnu</td>
|
||||
</tr>
|
||||
<tr class="odd">
|
||||
<td>ppc64le-linux-gnu</td>
|
||||
<td>riscv64-linux-gnu</td>
|
||||
</tr>
|
||||
<tr class="even">
|
||||
<td>s390x-linux-gnu</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<p><code>BASE_OS</code> must be one of "OEL6" for Oracle Enterprise
|
||||
Linux 6 or "Fedora" (if not specified "OEL6" will be the default). If
|
||||
the base OS is "Fedora" the corresponding Fedora release can be
|
||||
specified with the help of the <code>BASE_OS_VERSION</code> option (with
|
||||
"27" as default version). If the build is successful, the new devkits
|
||||
can be found in the <code>build/devkit/result</code> subdirectory:</p>
|
||||
<p><code>BASE_OS</code> must be one of <code>OL</code> for Oracle
|
||||
Enterprise Linux or <code>Fedora</code>. If the base OS is
|
||||
<code>Fedora</code> the corresponding Fedora release can be specified
|
||||
with the help of the <code>BASE_OS_VERSION</code> option. If the build
|
||||
is successful, the new devkits can be found in the
|
||||
<code>build/devkit/result</code> subdirectory:</p>
|
||||
<pre><code>cd make/devkit
|
||||
make TARGETS="ppc64le-linux-gnu aarch64-linux-gnu" BASE_OS=Fedora BASE_OS_VERSION=21
|
||||
ls -1 ../../build/devkit/result/
|
||||
|
||||
@@ -1258,11 +1258,11 @@ toolchain and a sysroot environment which can easily be used together with the
|
||||
following command:
|
||||
|
||||
```
|
||||
bash configure --with-devkit=<devkit-path> --openjdk-target=ppc64-linux-gnu && make
|
||||
bash configure --with-devkit=<devkit-path> --openjdk-target=ppc64le-linux-gnu && make
|
||||
```
|
||||
|
||||
will configure and build the JDK for Linux/ppc64 assuming that `<devkit-path>`
|
||||
points to a Linux/x86_64 to Linux/ppc64 devkit.
|
||||
will configure and build the JDK for Linux/ppc64le assuming that `<devkit-path>`
|
||||
points to a Linux/x86_64 to Linux/ppc64le devkit.
|
||||
|
||||
Devkits can be created from the `make/devkit` directory by executing:
|
||||
|
||||
@@ -1281,16 +1281,14 @@ at least the following targets are known to work:
|
||||
| x86_64-linux-gnu |
|
||||
| aarch64-linux-gnu |
|
||||
| arm-linux-gnueabihf |
|
||||
| ppc64-linux-gnu |
|
||||
| ppc64le-linux-gnu |
|
||||
| riscv64-linux-gnu |
|
||||
| s390x-linux-gnu |
|
||||
|
||||
`BASE_OS` must be one of "OEL6" for Oracle Enterprise Linux 6 or "Fedora" (if
|
||||
not specified "OEL6" will be the default). If the base OS is "Fedora" the
|
||||
corresponding Fedora release can be specified with the help of the
|
||||
`BASE_OS_VERSION` option (with "27" as default version). If the build is
|
||||
successful, the new devkits can be found in the `build/devkit/result`
|
||||
subdirectory:
|
||||
`BASE_OS` must be one of `OL` for Oracle Enterprise Linux or `Fedora`. If the
|
||||
base OS is `Fedora` the corresponding Fedora release can be specified with the
|
||||
help of the `BASE_OS_VERSION` option. If the build is successful, the new
|
||||
devkits can be found in the `build/devkit/result` subdirectory:
|
||||
|
||||
```
|
||||
cd make/devkit
|
||||
|
||||
@@ -209,6 +209,16 @@ lines of code. Name what you must repeat.</p></li>
|
||||
attribute, the change should be done with a "setter" accessor matched to
|
||||
the simple "getter".</p></li>
|
||||
</ul>
|
||||
<h4 id="conventions-for-lock-free-code">Conventions for Lock-free
|
||||
Code</h4>
|
||||
<p>Sometimes variables are accessed concurrently without appropriate
|
||||
synchronization context, such as a held mutex or at a safepoint. In such
|
||||
cases the variable should be declared <code>volatile</code> and it
|
||||
should NOT be accessed as a normal C++ lvalue. Rather, access should be
|
||||
performed via functions from <code>Atomic</code>, such as
|
||||
<code>Atomic::load</code>, <code>Atomic::store</code>, etc.</p>
|
||||
<p>This special formulation makes it more clear to maintainers that the
|
||||
variable is accessed concurrently in a lock-free manner.</p>
|
||||
<h3 id="source-files">Source Files</h3>
|
||||
<ul>
|
||||
<li><p>All source files must have a globally unique basename. The build
|
||||
|
||||
@@ -135,6 +135,17 @@ lines of code. Name what you must repeat.
|
||||
change should be done with a "setter" accessor matched to the simple
|
||||
"getter".
|
||||
|
||||
#### Conventions for Lock-free Code
|
||||
|
||||
Sometimes variables are accessed concurrently without appropriate synchronization
|
||||
context, such as a held mutex or at a safepoint. In such cases the variable should
|
||||
be declared `volatile` and it should NOT be accessed as a normal C++ lvalue. Rather,
|
||||
access should be performed via functions from `Atomic`, such as `Atomic::load`,
|
||||
`Atomic::store`, etc.
|
||||
|
||||
This special formulation makes it more clear to maintainers that the variable is
|
||||
accessed concurrently in a lock-free manner.
|
||||
|
||||
### Source Files
|
||||
|
||||
* All source files must have a globally unique basename. The build
|
||||
|
||||
@@ -11,11 +11,8 @@
|
||||
div.columns{display: flex; gap: min(4vw, 1.5em);}
|
||||
div.column{flex: auto; overflow-x: auto;}
|
||||
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
|
||||
/* The extra [class] is a hack that increases specificity enough to
|
||||
override a similar rule in reveal.js */
|
||||
ul.task-list[class]{list-style: none;}
|
||||
ul.task-list{list-style: none;}
|
||||
ul.task-list li input[type="checkbox"] {
|
||||
font-size: inherit;
|
||||
width: 0.8em;
|
||||
margin: 0 0.8em 0.2em -1.6em;
|
||||
vertical-align: middle;
|
||||
|
||||
@@ -72,11 +72,9 @@ id="toc-notes-for-specific-tests">Notes for Specific Tests</a>
|
||||
<li><a href="#non-us-locale" id="toc-non-us-locale">Non-US
|
||||
locale</a></li>
|
||||
<li><a href="#pkcs11-tests" id="toc-pkcs11-tests">PKCS11 Tests</a></li>
|
||||
</ul></li>
|
||||
<li><a href="#testing-ahead-of-time-optimizations"
|
||||
id="toc-testing-ahead-of-time-optimizations">### Testing Ahead-of-time
|
||||
Optimizations</a>
|
||||
<ul>
|
||||
id="toc-testing-ahead-of-time-optimizations">Testing Ahead-of-time
|
||||
Optimizations</a></li>
|
||||
<li><a href="#testing-with-alternative-security-providers"
|
||||
id="toc-testing-with-alternative-security-providers">Testing with
|
||||
alternative security providers</a></li>
|
||||
@@ -400,7 +398,8 @@ TEST_OPTS keywords.</p>
|
||||
<h4 id="jobs">JOBS</h4>
|
||||
<p>Currently only applies to JTReg.</p>
|
||||
<h4 id="timeout_factor">TIMEOUT_FACTOR</h4>
|
||||
<p>Currently only applies to JTReg.</p>
|
||||
<p>Currently only applies to <a href="#timeout_factor-1">JTReg
|
||||
-timeoutFactor</a>.</p>
|
||||
<h4 id="java_options">JAVA_OPTIONS</h4>
|
||||
<p>Applies to JTReg, GTest and Micro.</p>
|
||||
<h4 id="vm_options">VM_OPTIONS</h4>
|
||||
@@ -435,6 +434,9 @@ the diff between the specified revision and the repository tip.</p>
|
||||
<p>The report is stored in
|
||||
<code>build/$BUILD/test-results/jcov-output/diff_coverage_report</code>
|
||||
file.</p>
|
||||
<h4 id="aot_jdk">AOT_JDK</h4>
|
||||
<p>See <a href="#testing-ahead-of-time-optimizations">Testing
|
||||
Ahead-of-time optimizations</a>.</p>
|
||||
<h3 id="jtreg-keywords">JTReg keywords</h3>
|
||||
<h4 id="jobs-1">JOBS</h4>
|
||||
<p>The test concurrency (<code>-concurrency</code>).</p>
|
||||
@@ -443,8 +445,12 @@ otherwise it defaults to JOBS, except for Hotspot, where the default is
|
||||
<em>number of CPU cores/2</em>, but never more than <em>memory size in
|
||||
GB/2</em>.</p>
|
||||
<h4 id="timeout_factor-1">TIMEOUT_FACTOR</h4>
|
||||
<p>The timeout factor (<code>-timeoutFactor</code>).</p>
|
||||
<p>Defaults to 4.</p>
|
||||
<p>The <code>TIMEOUT_FACTOR</code> is forwarded to JTReg framework
|
||||
itself (<code>-timeoutFactor</code>). Also, some test cases that
|
||||
programmatically wait a certain amount of time will apply this factor.
|
||||
If we run in forced compilation mode (<code>-Xcomp</code>), the build
|
||||
system will automatically adjust this factor to compensate for less
|
||||
performance. Defaults to 1.</p>
|
||||
<h4 id="failure_handler_timeout">FAILURE_HANDLER_TIMEOUT</h4>
|
||||
<p>Sets the argument <code>-timeoutHandlerTimeout</code> for JTReg. The
|
||||
default value is 0. This is only valid if the failure handler is
|
||||
@@ -457,6 +463,12 @@ class, named Virtual, is currently part of the JDK build in the
|
||||
<code>test/jtreg_test_thread_factory/</code> directory. This class gets
|
||||
compiled during the test image build. The implementation of the Virtual
|
||||
class creates a new virtual thread for executing each test class.</p>
|
||||
<h4 id="jvmti_stress_agent">JVMTI_STRESS_AGENT</h4>
|
||||
<p>Executes JTReg tests with JVM TI stress agent. The stress agent is
|
||||
the part of test library and located in
|
||||
<code>test/lib/jdk/test/lib/jvmti/libJvmtiStressAgent.cpp</code>. The
|
||||
value of this argument is set as JVM TI agent options. This mode uses
|
||||
ProblemList-jvmti-stress-agent.txt as an additional exclude list.</p>
|
||||
<h4 id="test_mode">TEST_MODE</h4>
|
||||
<p>The test mode (<code>agentvm</code> or <code>othervm</code>).</p>
|
||||
<p>Defaults to <code>agentvm</code>.</p>
|
||||
@@ -556,6 +568,12 @@ each fork. Same as specifying <code>-wi <num></code>.</p>
|
||||
same values as <code>-rff</code>, i.e., <code>text</code>,
|
||||
<code>csv</code>, <code>scsv</code>, <code>json</code>, or
|
||||
<code>latex</code>.</p>
|
||||
<h4 id="test_jdk">TEST_JDK</h4>
|
||||
<p>The path to the JDK that will be used to run the benchmarks.</p>
|
||||
<p>Defaults to <code>build/<CONF-NAME>/jdk</code>.</p>
|
||||
<h4 id="benchmarks_jar">BENCHMARKS_JAR</h4>
|
||||
<p>The path to the JAR containing the benchmarks.</p>
|
||||
<p>Defaults to <code>test/micro/benchmarks.jar</code>.</p>
|
||||
<h4 id="vm_options-2">VM_OPTIONS</h4>
|
||||
<p>Additional VM arguments to provide to forked off VMs. Same as
|
||||
<code>-jvmArgs <args></code></p>
|
||||
@@ -601,8 +619,8 @@ element of the appropriate <code>@Artifact</code> class. (See
|
||||
JTREG="JAVA_OPTIONS=-Djdk.test.lib.artifacts.nsslib-linux_aarch64=/path/to/NSS-libs"</code></pre>
|
||||
<p>For more notes about the PKCS11 tests, please refer to
|
||||
test/jdk/sun/security/pkcs11/README.</p>
|
||||
<h2 id="testing-ahead-of-time-optimizations">### Testing Ahead-of-time
|
||||
Optimizations</h2>
|
||||
<h3 id="testing-ahead-of-time-optimizations">Testing Ahead-of-time
|
||||
Optimizations</h3>
|
||||
<p>One way to improve test coverage of ahead-of-time (AOT) optimizations
|
||||
in the JDK is to run existing jtreg test cases in a special "AOT_JDK"
|
||||
mode. Example:</p>
|
||||
|
||||
@@ -324,7 +324,7 @@ Currently only applies to JTReg.
|
||||
|
||||
#### TIMEOUT_FACTOR
|
||||
|
||||
Currently only applies to JTReg.
|
||||
Currently only applies to [JTReg -timeoutFactor](#timeout_factor-1).
|
||||
|
||||
#### JAVA_OPTIONS
|
||||
|
||||
@@ -367,6 +367,10 @@ between the specified revision and the repository tip.
|
||||
The report is stored in
|
||||
`build/$BUILD/test-results/jcov-output/diff_coverage_report` file.
|
||||
|
||||
#### AOT_JDK
|
||||
|
||||
See [Testing Ahead-of-time optimizations](#testing-ahead-of-time-optimizations).
|
||||
|
||||
### JTReg keywords
|
||||
|
||||
#### JOBS
|
||||
@@ -379,9 +383,11 @@ never more than *memory size in GB/2*.
|
||||
|
||||
#### TIMEOUT_FACTOR
|
||||
|
||||
The timeout factor (`-timeoutFactor`).
|
||||
|
||||
Defaults to 4.
|
||||
The `TIMEOUT_FACTOR` is forwarded to JTReg framework itself
|
||||
(`-timeoutFactor`). Also, some test cases that programmatically wait a
|
||||
certain amount of time will apply this factor. If we run in forced
|
||||
compilation mode (`-Xcomp`), the build system will automatically
|
||||
adjust this factor to compensate for less performance. Defaults to 1.
|
||||
|
||||
#### FAILURE_HANDLER_TIMEOUT
|
||||
|
||||
@@ -397,6 +403,13 @@ the `test/jtreg_test_thread_factory/` directory. This class gets compiled
|
||||
during the test image build. The implementation of the Virtual class creates a
|
||||
new virtual thread for executing each test class.
|
||||
|
||||
#### JVMTI_STRESS_AGENT
|
||||
|
||||
Executes JTReg tests with JVM TI stress agent. The stress agent is the part of
|
||||
test library and located in `test/lib/jdk/test/lib/jvmti/libJvmtiStressAgent.cpp`.
|
||||
The value of this argument is set as JVM TI agent options.
|
||||
This mode uses ProblemList-jvmti-stress-agent.txt as an additional exclude list.
|
||||
|
||||
#### TEST_MODE
|
||||
|
||||
The test mode (`agentvm` or `othervm`).
|
||||
@@ -545,6 +558,18 @@ Amount of time to spend in each warmup iteration. Same as specifying `-w
|
||||
Specify to have the test run save a log of the values. Accepts the same values
|
||||
as `-rff`, i.e., `text`, `csv`, `scsv`, `json`, or `latex`.
|
||||
|
||||
#### TEST_JDK
|
||||
|
||||
The path to the JDK that will be used to run the benchmarks.
|
||||
|
||||
Defaults to `build/<CONF-NAME>/jdk`.
|
||||
|
||||
#### BENCHMARKS_JAR
|
||||
|
||||
The path to the JAR containing the benchmarks.
|
||||
|
||||
Defaults to `test/micro/benchmarks.jar`.
|
||||
|
||||
#### VM_OPTIONS
|
||||
|
||||
Additional VM arguments to provide to forked off VMs. Same as `-jvmArgs <args>`
|
||||
@@ -612,7 +637,7 @@ For more notes about the PKCS11 tests, please refer to
|
||||
test/jdk/sun/security/pkcs11/README.
|
||||
|
||||
### Testing Ahead-of-time Optimizations
|
||||
-------------------------------------------------------------------------------
|
||||
|
||||
One way to improve test coverage of ahead-of-time (AOT) optimizations in
|
||||
the JDK is to run existing jtreg test cases in a special "AOT_JDK" mode.
|
||||
Example:
|
||||
|
||||
@@ -85,7 +85,7 @@ CreateHkTargets = \
|
||||
################################################################################
|
||||
# Include module specific build settings
|
||||
|
||||
THIS_SNIPPET := modules/$(MODULE)/Java.gmk
|
||||
THIS_SNIPPET := $(call GetModuleSnippetName, Java)
|
||||
|
||||
ifneq ($(wildcard $(THIS_SNIPPET)), )
|
||||
include MakeSnippetStart.gmk
|
||||
@@ -115,6 +115,7 @@ $(eval $(call SetupJavaCompilation, $(MODULE), \
|
||||
EXCLUDE_FILES := $(EXCLUDE_FILES), \
|
||||
EXCLUDE_PATTERNS := -files, \
|
||||
KEEP_ALL_TRANSLATIONS := $(KEEP_ALL_TRANSLATIONS), \
|
||||
TARGET_RELEASE := $(TARGET_RELEASE), \
|
||||
JAVAC_FLAGS := \
|
||||
$(DOCLINT) \
|
||||
$(JAVAC_FLAGS) \
|
||||
|
||||
@@ -184,7 +184,7 @@ endif
|
||||
################################################################################
|
||||
# Include module specific build settings
|
||||
|
||||
THIS_SNIPPET := modules/$(MODULE)/Jmod.gmk
|
||||
THIS_SNIPPET := $(call GetModuleSnippetName, Jmod)
|
||||
|
||||
ifneq ($(wildcard $(THIS_SNIPPET)), )
|
||||
include MakeSnippetStart.gmk
|
||||
|
||||
@@ -236,8 +236,8 @@ define create_overview_file
|
||||
#
|
||||
ifneq ($$($1_GROUPS), )
|
||||
$1_OVERVIEW_TEXT += \
|
||||
<p>This document is divided into \
|
||||
$$(subst 2,two,$$(subst 3,three,$$(words $$($1_GROUPS)))) sections:</p> \
|
||||
<p>This document has \
|
||||
$$(subst 2,two,$$(subst 3,three,$$(words $$($1_GROUPS)))) major sections:</p> \
|
||||
<blockquote><dl> \
|
||||
#
|
||||
$1_OVERVIEW_TEXT += $$(foreach g, $$($1_GROUPS), \
|
||||
@@ -246,7 +246,10 @@ define create_overview_file
|
||||
)
|
||||
$1_OVERVIEW_TEXT += \
|
||||
</dl></blockquote> \
|
||||
#
|
||||
<p><a href="../specs/index.html">Related documents</a> specify the Java \
|
||||
programming language, the Java Virtual Machine, various protocols and file \
|
||||
formats pertaining to the Java platform, and tools included in the JDK.</p> \
|
||||
#
|
||||
endif
|
||||
$1_OVERVIEW_TEXT += \
|
||||
</body></html> \
|
||||
|
||||
@@ -270,6 +270,7 @@ endif
|
||||
# Since debug symbols are not included in the jmod files, they need to be copied
|
||||
# in manually after generating the images.
|
||||
|
||||
# These variables are read by SetupCopyDebuginfo
|
||||
ALL_JDK_MODULES := $(JDK_MODULES)
|
||||
ALL_JRE_MODULES := $(sort $(JRE_MODULES), $(foreach m, $(JRE_MODULES), \
|
||||
$(call FindTransitiveDepsForModule, $m)))
|
||||
|
||||
@@ -1407,7 +1407,7 @@ CLEAN_SUPPORT_DIRS += demos
|
||||
CLEAN_SUPPORT_DIR_TARGETS := $(addprefix clean-, $(CLEAN_SUPPORT_DIRS))
|
||||
CLEAN_TESTS += hotspot-jtreg-native jdk-jtreg-native lib
|
||||
CLEAN_TEST_TARGETS += $(addprefix clean-test-, $(CLEAN_TESTS))
|
||||
CLEAN_PHASES := gensrc java native include
|
||||
CLEAN_PHASES += gensrc java native include
|
||||
CLEAN_PHASE_TARGETS := $(addprefix clean-, $(CLEAN_PHASES))
|
||||
CLEAN_MODULE_TARGETS := $(addprefix clean-, $(ALL_MODULES))
|
||||
# Construct targets of the form clean-$module-$phase
|
||||
|
||||
@@ -149,7 +149,7 @@ endef
|
||||
|
||||
################################################################################
|
||||
|
||||
PHASE_MAKEDIRS := $(TOPDIR)/make
|
||||
PHASE_MAKEDIRS += $(TOPDIR)/make
|
||||
|
||||
# Helper macro for DeclareRecipesForPhase
|
||||
# Declare a recipe for calling the module and phase specific makefile.
|
||||
|
||||
@@ -34,18 +34,23 @@ include MakeFileStart.gmk
|
||||
################################################################################
|
||||
|
||||
include CopyFiles.gmk
|
||||
include Modules.gmk
|
||||
|
||||
MODULE_SRC := $(TOPDIR)/src/$(MODULE)
|
||||
|
||||
# Define the snippet for MakeSnippetStart/End
|
||||
THIS_SNIPPET := modules/$(MODULE)/$(MAKEFILE_PREFIX).gmk
|
||||
################################################################################
|
||||
# Include module specific build settings
|
||||
|
||||
include MakeSnippetStart.gmk
|
||||
THIS_SNIPPET := $(call GetModuleSnippetName, $(MAKEFILE_PREFIX))
|
||||
|
||||
# Include the file being wrapped.
|
||||
include $(THIS_SNIPPET)
|
||||
ifneq ($(wildcard $(THIS_SNIPPET)), )
|
||||
include MakeSnippetStart.gmk
|
||||
|
||||
include MakeSnippetEnd.gmk
|
||||
# Include the file being wrapped.
|
||||
include $(THIS_SNIPPET)
|
||||
|
||||
include MakeSnippetEnd.gmk
|
||||
endif
|
||||
|
||||
ifeq ($(MAKEFILE_PREFIX), Lib)
|
||||
# We need to keep track of what libraries are generated/needed by this
|
||||
|
||||
@@ -204,8 +204,9 @@ $(eval $(call SetTestOpt,AOT_JDK,JTREG))
|
||||
|
||||
$(eval $(call ParseKeywordVariable, JTREG, \
|
||||
SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR FAILURE_HANDLER_TIMEOUT \
|
||||
TEST_MODE ASSERT VERBOSE RETAIN TEST_THREAD_FACTORY MAX_MEM RUN_PROBLEM_LISTS \
|
||||
RETRY_COUNT REPEAT_COUNT MAX_OUTPUT REPORT AOT_JDK $(CUSTOM_JTREG_SINGLE_KEYWORDS), \
|
||||
TEST_MODE ASSERT VERBOSE RETAIN TEST_THREAD_FACTORY JVMTI_STRESS_AGENT \
|
||||
MAX_MEM RUN_PROBLEM_LISTS RETRY_COUNT REPEAT_COUNT MAX_OUTPUT REPORT \
|
||||
AOT_JDK $(CUSTOM_JTREG_SINGLE_KEYWORDS), \
|
||||
STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
|
||||
EXTRA_PROBLEM_LISTS LAUNCHER_OPTIONS \
|
||||
$(CUSTOM_JTREG_STRING_KEYWORDS), \
|
||||
@@ -876,6 +877,15 @@ define SetupRunJtregTestBody
|
||||
))
|
||||
endif
|
||||
|
||||
ifneq ($$(JTREG_JVMTI_STRESS_AGENT), )
|
||||
AGENT := $$(LIBRARY_PREFIX)JvmtiStressAgent$$(SHARED_LIBRARY_SUFFIX)=$$(JTREG_JVMTI_STRESS_AGENT)
|
||||
$1_JTREG_BASIC_OPTIONS += -javaoption:'-agentpath:$(TEST_IMAGE_DIR)/hotspot/jtreg/native/$$(AGENT)'
|
||||
$1_JTREG_BASIC_OPTIONS += $$(addprefix $$(JTREG_PROBLEM_LIST_PREFIX), $$(wildcard \
|
||||
$$(addprefix $$($1_TEST_ROOT)/, ProblemList-jvmti-stress-agent.txt) \
|
||||
))
|
||||
endif
|
||||
|
||||
|
||||
ifneq ($$(JTREG_LAUNCHER_OPTIONS), )
|
||||
$1_JTREG_LAUNCHER_OPTIONS += $$(JTREG_LAUNCHER_OPTIONS)
|
||||
endif
|
||||
@@ -936,7 +946,8 @@ define SetupRunJtregTestBody
|
||||
JTREG_ALL_OPTIONS := $$(JTREG_JAVA_OPTIONS) $$(JTREG_VM_OPTIONS)
|
||||
|
||||
JTREG_AUTO_PROBLEM_LISTS :=
|
||||
JTREG_AUTO_TIMEOUT_FACTOR := 4
|
||||
# Please reach consensus before changing this. It was not easy changing it to a `1`.
|
||||
JTREG_AUTO_TIMEOUT_FACTOR := 1
|
||||
|
||||
ifneq ($$(findstring -Xcomp, $$(JTREG_ALL_OPTIONS)), )
|
||||
JTREG_AUTO_PROBLEM_LISTS += ProblemList-Xcomp.txt
|
||||
@@ -1243,7 +1254,7 @@ UseSpecialTestHandler = \
|
||||
# Now process each test to run and setup a proper make rule
|
||||
$(foreach test, $(TESTS_TO_RUN), \
|
||||
$(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \
|
||||
$(TR) -cs '[a-z][A-Z][0-9]\n' '[_*1000]')) \
|
||||
$(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \
|
||||
$(eval ALL_TEST_IDS += $(TEST_ID)) \
|
||||
$(if $(call UseCustomTestHandler, $(test)), \
|
||||
$(eval $(call SetupRunCustomTest, $(TEST_ID), \
|
||||
@@ -1323,9 +1334,9 @@ run-test-report: post-run-test
|
||||
TEST TOTAL PASS FAIL ERROR SKIP " "
|
||||
$(foreach test, $(TESTS_TO_RUN), \
|
||||
$(eval TEST_ID := $(shell $(ECHO) $(strip $(test)) | \
|
||||
$(TR) -cs '[a-z][A-Z][0-9]\n' '[_*1000]')) \
|
||||
$(TR) -cs '[a-z][A-Z][0-9]\n' '_')) \
|
||||
$(ECHO) >> $(TEST_LAST_IDS) $(TEST_ID) $(NEWLINE) \
|
||||
$(eval NAME_PATTERN := $(shell $(ECHO) $(test) | $(TR) -c '\n' '[_*1000]')) \
|
||||
$(eval NAME_PATTERN := $(shell $(ECHO) $(test) | $(TR) -c '\n' '_')) \
|
||||
$(if $(filter __________________________________________________%, $(NAME_PATTERN)), \
|
||||
$(eval TEST_NAME := ) \
|
||||
$(PRINTF) >> $(TEST_SUMMARY) "%2s %-49s\n" " " "$(test)" $(NEWLINE) \
|
||||
|
||||
@@ -176,3 +176,19 @@ ULIMIT := ulimit
|
||||
ifeq ($(OPENJDK_BUILD_OS), windows)
|
||||
PATHTOOL := cygpath
|
||||
endif
|
||||
|
||||
# These settings are needed to run testing with jvmti agent
|
||||
ifeq ($(OPENJDK_BUILD_OS), linux)
|
||||
LIBRARY_PREFIX := lib
|
||||
SHARED_LIBRARY_SUFFIX := .so
|
||||
endif
|
||||
|
||||
ifeq ($(OPENJDK_BUILD_OS), windows)
|
||||
LIBRARY_PREFIX :=
|
||||
SHARED_LIBRARY_SUFFIX := .dll
|
||||
endif
|
||||
|
||||
ifeq ($(OPENJDK_BUILD_OS), macosx)
|
||||
LIBRARY_PREFIX := lib
|
||||
SHARED_LIBRARY_SUFFIX := .dylib
|
||||
endif
|
||||
|
||||
@@ -36,7 +36,7 @@ $(eval $(call SetupJavaCompilation, BUILD_TOOLS_LANGTOOLS, \
|
||||
COMPILER := bootjdk, \
|
||||
TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK), \
|
||||
SRC := $(TOPDIR)/make/langtools/tools, \
|
||||
INCLUDES := compileproperties propertiesparser, \
|
||||
INCLUDES := compileproperties flagsgenerator propertiesparser, \
|
||||
COPY := .properties, \
|
||||
BIN := $(BUILDTOOLS_OUTPUTDIR)/langtools_tools_classes, \
|
||||
))
|
||||
|
||||
@@ -395,11 +395,9 @@ AC_DEFUN_ONCE([BOOTJDK_SETUP_BOOT_JDK],
|
||||
|
||||
# When compiling code to be executed by the Boot JDK, force compatibility with the
|
||||
# oldest supported bootjdk.
|
||||
OLDEST_BOOT_JDK=`$ECHO $DEFAULT_ACCEPTABLE_BOOT_VERSIONS \
|
||||
OLDEST_BOOT_JDK_VERSION=`$ECHO $DEFAULT_ACCEPTABLE_BOOT_VERSIONS \
|
||||
| $TR " " "\n" | $SORT -n | $HEAD -n1`
|
||||
# -Xlint:-options is added to avoid "warning: [options] system modules path not set in conjunction with -source"
|
||||
BOOT_JDK_SOURCETARGET="-source $OLDEST_BOOT_JDK -target $OLDEST_BOOT_JDK -Xlint:-options"
|
||||
AC_SUBST(BOOT_JDK_SOURCETARGET)
|
||||
AC_SUBST(OLDEST_BOOT_JDK_VERSION)
|
||||
|
||||
# Check if the boot jdk is 32 or 64 bit
|
||||
if $JAVA -version 2>&1 | $GREP -q "64-Bit"; then
|
||||
|
||||
@@ -221,6 +221,9 @@ JDKOPT_SETUP_UNDEFINED_BEHAVIOR_SANITIZER
|
||||
# LeakSanitizer
|
||||
JDKOPT_SETUP_LEAK_SANITIZER
|
||||
|
||||
# Setup static analyzer
|
||||
JDKOPT_SETUP_STATIC_ANALYZER
|
||||
|
||||
# Fallback linker
|
||||
# This needs to go before 'LIB_DETERMINE_DEPENDENCIES'
|
||||
JDKOPT_SETUP_FALLBACK_LINKER
|
||||
|
||||
@@ -736,8 +736,15 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
|
||||
$1_CFLAGS_CPU_JVM="${$1_CFLAGS_CPU_JVM} -mminimal-toc"
|
||||
elif test "x$FLAGS_CPU" = xppc64le; then
|
||||
# Little endian machine uses ELFv2 ABI.
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
$1_CFLAGS_CPU="-mcpu=power8 -mtune=power10"
|
||||
# Use Power8 for target cpu, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
# Use Power10 for tuning target, this is supported by gcc >= 10
|
||||
POWER_TUNE_VERSION="-mtune=power10"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [${POWER_TUNE_VERSION}],
|
||||
IF_FALSE: [
|
||||
POWER_TUNE_VERSION="-mtune=power8"
|
||||
]
|
||||
)
|
||||
$1_CFLAGS_CPU="-mcpu=power8 ${POWER_TUNE_VERSION}"
|
||||
$1_CFLAGS_CPU_JVM="${$1_CFLAGS_CPU_JVM} -DABI_ELFv2"
|
||||
fi
|
||||
elif test "x$FLAGS_CPU" = xs390x; then
|
||||
@@ -933,7 +940,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
|
||||
# ACLE and this flag are required to build the aarch64 SVE related functions in
|
||||
# libvectormath. Apple Silicon does not support SVE; use macOS as a proxy for
|
||||
# that check.
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xaarch64" && test "x$OPENJDK_TARGET_CPU" = "xlinux"; then
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xaarch64" && test "x$OPENJDK_TARGET_OS" = "xlinux"; then
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
AC_LANG_PUSH(C)
|
||||
OLD_CFLAGS="$CFLAGS"
|
||||
@@ -947,6 +954,17 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
|
||||
[
|
||||
AC_MSG_RESULT([yes])
|
||||
$2SVE_CFLAGS="-march=armv8-a+sve"
|
||||
# Switching the initialization mode with gcc from 'pattern' to 'zero'
|
||||
# avoids the use of unsupported `__builtin_clear_padding` for variable
|
||||
# length aggregates
|
||||
if test "x$DEBUG_LEVEL" != xrelease && test "x$TOOLCHAIN_TYPE" = xgcc ; then
|
||||
INIT_ZERO_FLAG="-ftrivial-auto-var-init=zero"
|
||||
FLAGS_COMPILER_CHECK_ARGUMENTS(ARGUMENT: [$INIT_ZERO_FLAG],
|
||||
IF_TRUE: [
|
||||
$2SVE_CFLAGS="${$2SVE_CFLAGS} $INIT_ZERO_FLAG"
|
||||
]
|
||||
)
|
||||
fi
|
||||
],
|
||||
[
|
||||
AC_MSG_RESULT([no])
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -115,7 +115,11 @@ AC_DEFUN([FLAGS_SETUP_ASFLAGS],
|
||||
# Force preprocessor to run, just to make sure
|
||||
BASIC_ASFLAGS="-x assembler-with-cpp"
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
BASIC_ASFLAGS="-nologo -c"
|
||||
if test "x$OPENJDK_TARGET_CPU" = xaarch64; then
|
||||
BASIC_ASFLAGS="-nologo"
|
||||
else
|
||||
BASIC_ASFLAGS="-nologo -c"
|
||||
fi
|
||||
fi
|
||||
AC_SUBST(BASIC_ASFLAGS)
|
||||
|
||||
|
||||
@@ -320,12 +320,16 @@ AC_DEFUN([FLAGS_SETUP_TOOLCHAIN_CONTROL],
|
||||
[
|
||||
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
CC_OUT_OPTION=-Fo
|
||||
if test "x$OPENJDK_TARGET_CPU" != xaarch64; then
|
||||
AS_NON_ASM_EXTENSION_OPTION=-Ta
|
||||
fi
|
||||
else
|
||||
# The option used to specify the target .o,.a or .so file.
|
||||
# When compiling, how to specify the to be created object file.
|
||||
CC_OUT_OPTION='-o$(SPACE)'
|
||||
fi
|
||||
AC_SUBST(CC_OUT_OPTION)
|
||||
AC_SUBST(AS_NON_ASM_EXTENSION_OPTION)
|
||||
|
||||
# Generate make dependency files
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
|
||||
@@ -479,6 +479,31 @@ AC_DEFUN_ONCE([JDKOPT_SETUP_ADDRESS_SANITIZER],
|
||||
AC_SUBST(ASAN_ENABLED)
|
||||
])
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Static analyzer
|
||||
#
|
||||
AC_DEFUN_ONCE([JDKOPT_SETUP_STATIC_ANALYZER],
|
||||
[
|
||||
UTIL_ARG_ENABLE(NAME: static-analyzer, DEFAULT: false, RESULT: STATIC_ANALYZER_ENABLED,
|
||||
DESC: [enable the GCC static analyzer],
|
||||
CHECK_AVAILABLE: [
|
||||
AC_MSG_CHECKING([if static analyzer is available])
|
||||
if test "x$TOOLCHAIN_TYPE" = "xgcc"; then
|
||||
AC_MSG_RESULT([yes])
|
||||
else
|
||||
AC_MSG_RESULT([no])
|
||||
AVAILABLE=false
|
||||
fi
|
||||
],
|
||||
IF_ENABLED: [
|
||||
STATIC_ANALYZER_CFLAGS="-fanalyzer -Wno-analyzer-fd-leak"
|
||||
CFLAGS_JDKLIB="$CFLAGS_JDKLIB $STATIC_ANALYZER_CFLAGS"
|
||||
CFLAGS_JDKEXE="$CFLAGS_JDKEXE $STATIC_ANALYZER_CFLAGS"
|
||||
])
|
||||
AC_SUBST(STATIC_ANALYZER_ENABLED)
|
||||
])
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# LeakSanitizer
|
||||
|
||||
@@ -513,6 +513,10 @@ AC_DEFUN([JVM_FEATURES_VERIFY],
|
||||
[
|
||||
variant=$1
|
||||
|
||||
if JVM_FEATURES_IS_ACTIVE(jfr) && ! JVM_FEATURES_IS_ACTIVE(services); then
|
||||
AC_MSG_ERROR([Specified JVM feature 'jfr' requires feature 'services' for variant '$variant'])
|
||||
fi
|
||||
|
||||
if JVM_FEATURES_IS_ACTIVE(jvmci) && ! (JVM_FEATURES_IS_ACTIVE(compiler1) || \
|
||||
JVM_FEATURES_IS_ACTIVE(compiler2)); then
|
||||
AC_MSG_ERROR([Specified JVM feature 'jvmci' requires feature 'compiler2' or 'compiler1' for variant '$variant'])
|
||||
|
||||
@@ -393,9 +393,8 @@ EXTERNAL_BUILDJDK := @EXTERNAL_BUILDJDK@
|
||||
# Whether the boot jdk jar supports --date=TIMESTAMP
|
||||
BOOT_JDK_JAR_SUPPORTS_DATE := @BOOT_JDK_JAR_SUPPORTS_DATE@
|
||||
|
||||
# When compiling Java source to be run by the boot jdk
|
||||
# use these extra flags, eg -source 6 -target 6
|
||||
BOOT_JDK_SOURCETARGET := @BOOT_JDK_SOURCETARGET@
|
||||
# The oldest supported boot jdk version
|
||||
OLDEST_BOOT_JDK_VERSION := @OLDEST_BOOT_JDK_VERSION@
|
||||
|
||||
# Information about the build system
|
||||
NUM_CORES := @NUM_CORES@
|
||||
@@ -493,6 +492,7 @@ CXX_VERSION_NUMBER := @CXX_VERSION_NUMBER@
|
||||
HOTSPOT_TOOLCHAIN_TYPE := @HOTSPOT_TOOLCHAIN_TYPE@
|
||||
|
||||
CC_OUT_OPTION := @CC_OUT_OPTION@
|
||||
AS_NON_ASM_EXTENSION_OPTION := @AS_NON_ASM_EXTENSION_OPTION@
|
||||
|
||||
# Flags used for overriding the default opt setting for a C/C++ source file.
|
||||
C_O_FLAG_HIGHEST_JVM := @C_O_FLAG_HIGHEST_JVM@
|
||||
|
||||
@@ -655,8 +655,11 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
|
||||
if test "x$TOOLCHAIN_TYPE" != xmicrosoft; then
|
||||
AS="$CC -c"
|
||||
else
|
||||
if test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
|
||||
# On 64 bit windows, the assembler is "ml64.exe"
|
||||
if test "x$OPENJDK_TARGET_CPU" = "xaarch64"; then
|
||||
# On Windows aarch64, the assembler is "armasm64.exe"
|
||||
UTIL_LOOKUP_TOOLCHAIN_PROGS(AS, armasm64)
|
||||
elif test "x$OPENJDK_TARGET_CPU_BITS" = "x64"; then
|
||||
# On Windows x64, the assembler is "ml64.exe"
|
||||
UTIL_LOOKUP_TOOLCHAIN_PROGS(AS, ml64)
|
||||
else
|
||||
# otherwise, the assembler is "ml.exe"
|
||||
|
||||
@@ -38,10 +38,15 @@ include JarArchive.gmk
|
||||
###
|
||||
|
||||
# Create classes that can run on the bootjdk
|
||||
TARGET_RELEASE_BOOTJDK := $(BOOT_JDK_SOURCETARGET)
|
||||
# -Xlint:-options is added to avoid the warning
|
||||
# "system modules path not set in conjunction with -source"
|
||||
TARGET_RELEASE_BOOTJDK := -source $(OLDEST_BOOT_JDK_VERSION) \
|
||||
-target $(OLDEST_BOOT_JDK_VERSION) -Xlint:-options
|
||||
|
||||
# Create classes that can be used in (or be a part of) the new jdk we're building
|
||||
TARGET_RELEASE_NEWJDK := -source $(JDK_SOURCE_TARGET_VERSION) -target $(JDK_SOURCE_TARGET_VERSION)
|
||||
# Create classes that can be used in (or be a part of) the new jdk we're
|
||||
# building
|
||||
TARGET_RELEASE_NEWJDK := -source $(JDK_SOURCE_TARGET_VERSION) \
|
||||
-target $(JDK_SOURCE_TARGET_VERSION)
|
||||
|
||||
# Create classes that can be used in JDK 8, for legacy support
|
||||
TARGET_RELEASE_JDK8 := --release 8
|
||||
@@ -178,6 +183,10 @@ define SetupJavaCompilationBody
|
||||
|
||||
$1_SAFE_NAME := $$(strip $$(subst /,_, $1))
|
||||
|
||||
ifeq ($$($1_LOG_ACTION), )
|
||||
$1_LOG_ACTION := Compiling
|
||||
endif
|
||||
|
||||
ifeq ($$($1_SMALL_JAVA), )
|
||||
# If unspecified, default to true
|
||||
$1_SMALL_JAVA := true
|
||||
@@ -472,7 +481,7 @@ define SetupJavaCompilationBody
|
||||
# list of files.
|
||||
$$($1_FILELIST): $$($1_SRCS) $$($1_VARDEPS_FILE)
|
||||
$$(call MakeDir, $$(@D))
|
||||
$$(call LogWarn, Compiling up to $$(words $$($1_SRCS)) files for $1)
|
||||
$$(call LogWarn, $$($1_LOG_ACTION) up to $$(words $$($1_SRCS)) files for $1)
|
||||
$$(eval $$(call ListPathsSafely, $1_SRCS, $$($1_FILELIST)))
|
||||
|
||||
# Create a $$($1_MODFILELIST) file with significant modified dependencies
|
||||
|
||||
@@ -33,7 +33,7 @@ include $(TOPDIR)/make/conf/module-loader-map.conf
|
||||
|
||||
# Append platform-specific and upgradeable modules
|
||||
PLATFORM_MODULES += $(PLATFORM_MODULES_$(OPENJDK_TARGET_OS)) \
|
||||
$(UPGRADEABLE_PLATFORM_MODULES)
|
||||
$(UPGRADEABLE_PLATFORM_MODULES) $(CUSTOM_UPGRADEABLE_PLATFORM_MODULES)
|
||||
|
||||
################################################################################
|
||||
# Setup module sets for docs
|
||||
@@ -216,7 +216,7 @@ endif
|
||||
# Find dependencies ("requires") for a given module.
|
||||
# Param 1: Module to find dependencies for.
|
||||
FindDepsForModule = \
|
||||
$(DEPS_$(strip $1))
|
||||
$(filter-out $(IMPORT_MODULES), $(DEPS_$(strip $1)))
|
||||
|
||||
# Find dependencies ("requires") transitively in 3 levels for a given module.
|
||||
# Param 1: Module to find dependencies for.
|
||||
@@ -254,7 +254,8 @@ FindTransitiveIndirectDepsForModules = \
|
||||
# Upgradeable modules are those that are either defined as upgradeable or that
|
||||
# require an upradeable module.
|
||||
FindAllUpgradeableModules = \
|
||||
$(sort $(filter-out $(MODULES_FILTER), $(UPGRADEABLE_PLATFORM_MODULES)))
|
||||
$(sort $(filter-out $(MODULES_FILTER), \
|
||||
$(UPGRADEABLE_PLATFORM_MODULES) $(CUSTOM_UPGRADEABLE_PLATFORM_MODULES)))
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -316,6 +317,19 @@ define ReadImportMetaData
|
||||
$$(eval $$(call ReadSingleImportMetaData, $$m)))
|
||||
endef
|
||||
|
||||
################################################################################
|
||||
# Get a full snippet path for the current module and a given base name.
|
||||
#
|
||||
# Param 1 - The base name of the snippet file to include
|
||||
GetModuleSnippetName = \
|
||||
$(if $(CUSTOM_MODULE_MAKE_ROOT), \
|
||||
$(if $(wildcard $(CUSTOM_MODULE_MAKE_ROOT)/$(MODULE)/$(strip $1).gmk), \
|
||||
$(CUSTOM_MODULE_MAKE_ROOT)/$(MODULE)/$(strip $1).gmk, \
|
||||
$(wildcard modules/$(MODULE)/$(strip $1).gmk) \
|
||||
), \
|
||||
$(wildcard modules/$(MODULE)/$(strip $1).gmk) \
|
||||
)
|
||||
|
||||
################################################################################
|
||||
|
||||
endif # include guard
|
||||
|
||||
@@ -236,7 +236,7 @@ define CreateCompiledNativeFileBody
|
||||
# For assembler calls just create empty dependency lists
|
||||
$$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
|
||||
$$($1_COMPILER) $$($1_FLAGS) \
|
||||
$(CC_OUT_OPTION)$$($1_OBJ) -Ta $$($1_SRC_FILE))) \
|
||||
$(CC_OUT_OPTION)$$($1_OBJ) $(AS_NON_ASM_EXTENSION_OPTION) $$($1_SRC_FILE))) \
|
||||
| $(TR) -d '\r' | $(GREP) -v -e "Assembling:" || test "$$$$?" = "1" ; \
|
||||
$(ECHO) > $$($1_DEPS_FILE) ; \
|
||||
$(ECHO) > $$($1_DEPS_TARGETS_FILE)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -51,7 +51,6 @@ DOCS_MODULES= \
|
||||
jdk.jdwp.agent \
|
||||
jdk.jfr \
|
||||
jdk.jlink \
|
||||
jdk.jsobject \
|
||||
jdk.jshell \
|
||||
jdk.jstatd \
|
||||
jdk.localedata \
|
||||
|
||||
@@ -62,7 +62,6 @@ UPGRADEABLE_PLATFORM_MODULES= \
|
||||
java.compiler \
|
||||
jdk.graal.compiler \
|
||||
jdk.graal.compiler.management \
|
||||
jdk.jsobject \
|
||||
#
|
||||
|
||||
PLATFORM_MODULES= \
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
#
|
||||
# make TARGETS="aarch64-linux-gnu" BASE_OS=Fedora
|
||||
# or
|
||||
# make TARGETS="arm-linux-gnueabihf ppc64-linux-gnu" BASE_OS=Fedora BASE_OS_VERSION=17
|
||||
# make TARGETS="arm-linux-gnueabihf ppc64le-linux-gnu" BASE_OS=Fedora BASE_OS_VERSION=17
|
||||
#
|
||||
# to build several devkits for a specific OS version at once.
|
||||
# You can find the final results under ../../build/devkit/result/<host>-to-<target>
|
||||
@@ -50,7 +50,7 @@
|
||||
# makefile again for cross compilation. Ex:
|
||||
#
|
||||
# PATH=$PWD/../../build/devkit/result/x86_64-linux-gnu-to-x86_64-linux-gnu/bin:$PATH \
|
||||
# make TARGETS="arm-linux-gnueabihf,ppc64-linux-gnu" BASE_OS=Fedora
|
||||
# make TARGETS="arm-linux-gnueabihf ppc64le-linux-gnu" BASE_OS=Fedora
|
||||
#
|
||||
# This is the makefile which iterates over all host and target platforms.
|
||||
#
|
||||
|
||||
@@ -69,15 +69,26 @@ else ifeq ($(BASE_OS), Fedora)
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
ifeq ($(filter aarch64 armhfp ppc64le riscv64 s390x x86_64, $(ARCH)), )
|
||||
$(error Only "aarch64 armhfp ppc64le riscv64 s390x x86_64" architectures are supported for Fedora, but "$(ARCH)" was requested)
|
||||
endif
|
||||
ifeq ($(ARCH), riscv64)
|
||||
ifeq ($(filter 38 39 40 41, $(BASE_OS_VERSION)), )
|
||||
$(error Only Fedora 38-41 are supported for "$(ARCH)", but Fedora $(BASE_OS_VERSION) was requested)
|
||||
endif
|
||||
BASE_URL := http://fedora.riscv.rocks/repos-dist/f$(BASE_OS_VERSION)/latest/$(ARCH)/Packages/
|
||||
else
|
||||
LATEST_ARCHIVED_OS_VERSION := 35
|
||||
ifeq ($(filter x86_64 armhfp, $(ARCH)), )
|
||||
LATEST_ARCHIVED_OS_VERSION := 36
|
||||
ifeq ($(filter aarch64 armhfp x86_64, $(ARCH)), )
|
||||
FEDORA_TYPE := fedora-secondary
|
||||
else
|
||||
FEDORA_TYPE := fedora/linux
|
||||
endif
|
||||
ifeq ($(ARCH), armhfp)
|
||||
ifneq ($(BASE_OS_VERSION), 36)
|
||||
$(error Fedora 36 is the last release supporting "armhfp", but $(BASE_OS) was requested)
|
||||
endif
|
||||
endif
|
||||
NOT_ARCHIVED := $(shell [ $(BASE_OS_VERSION) -gt $(LATEST_ARCHIVED_OS_VERSION) ] && echo true)
|
||||
ifeq ($(NOT_ARCHIVED),true)
|
||||
BASE_URL := https://dl.fedoraproject.org/pub/$(FEDORA_TYPE)/releases/$(BASE_OS_VERSION)/Everything/$(ARCH)/os/Packages/
|
||||
@@ -464,7 +475,7 @@ ifeq ($(ARCH), armhfp)
|
||||
$(BUILDDIR)/$(gcc_ver)/Makefile : CONFIG += --with-float=hard
|
||||
endif
|
||||
|
||||
ifneq ($(filter riscv64 ppc64 ppc64le s390x, $(ARCH)), )
|
||||
ifneq ($(filter riscv64 ppc64le s390x, $(ARCH)), )
|
||||
# We only support 64-bit on these platforms anyway
|
||||
CONFIG += --disable-multilib
|
||||
endif
|
||||
|
||||
@@ -97,11 +97,13 @@ CFLAGS_VM_VERSION := \
|
||||
|
||||
DISABLED_WARNINGS_gcc := array-bounds comment delete-non-virtual-dtor \
|
||||
empty-body format-zero-length implicit-fallthrough int-in-bool-context \
|
||||
invalid-offsetof \
|
||||
maybe-uninitialized missing-field-initializers \
|
||||
shift-negative-value unknown-pragmas unused-but-set-variable \
|
||||
unused-local-typedefs unused-variable
|
||||
|
||||
DISABLED_WARNINGS_clang := delete-non-abstract-non-virtual-dtor missing-braces \
|
||||
DISABLED_WARNINGS_clang := delete-non-abstract-non-virtual-dtor \
|
||||
invalid-offsetof missing-braces \
|
||||
sometimes-uninitialized unknown-pragmas unused-but-set-variable \
|
||||
unused-function unused-local-typedef unused-private-field unused-variable
|
||||
|
||||
|
||||
@@ -12,12 +12,17 @@
|
||||
],
|
||||
"extensions": {
|
||||
"recommendations": [
|
||||
"oracle.oracle-java",
|
||||
// {{INDEXER_EXTENSIONS}}
|
||||
]
|
||||
},
|
||||
"settings": {
|
||||
// {{INDEXER_SETTINGS}}
|
||||
|
||||
// Java extension
|
||||
"jdk.project.jdkhome": "{{OUTPUTDIR}}/jdk",
|
||||
"jdk.java.onSave.organizeImports": false, // prevents unnecessary changes
|
||||
|
||||
// Additional conventions
|
||||
"files.associations": {
|
||||
"*.gmk": "makefile"
|
||||
|
||||
@@ -79,6 +79,7 @@ class Bundle {
|
||||
"NumberElements/nan",
|
||||
"NumberElements/currencyDecimal",
|
||||
"NumberElements/currencyGroup",
|
||||
"NumberElements/lenientMinusSigns",
|
||||
};
|
||||
|
||||
private static final String[] TIME_PATTERN_KEYS = {
|
||||
|
||||
@@ -844,6 +844,26 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
});
|
||||
break;
|
||||
|
||||
// Lenient parsing
|
||||
case "parseLenients":
|
||||
if ("lenient".equals(attributes.getValue("level"))) {
|
||||
pushKeyContainer(qName, attributes, attributes.getValue("scope"));
|
||||
} else {
|
||||
pushIgnoredContainer(qName);
|
||||
}
|
||||
break;
|
||||
|
||||
case "parseLenient":
|
||||
// Use only the lenient minus sign for now
|
||||
if (currentContainer instanceof KeyContainer kc
|
||||
&& kc.getKey().equals("number")
|
||||
&& attributes.getValue("sample").equals("-")) {
|
||||
pushStringEntry(qName, attributes, currentNumberingSystem + "NumberElements/lenientMinusSigns");
|
||||
} else {
|
||||
pushIgnoredContainer(qName);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
// treat anything else as a container
|
||||
pushContainer(qName, attributes);
|
||||
@@ -1150,6 +1170,14 @@ class LDMLParseHandler extends AbstractLDMLHandler<Object> {
|
||||
currentStyle = "";
|
||||
putIfEntry();
|
||||
break;
|
||||
case "parseLenient":
|
||||
if (currentContainer instanceof StringEntry se) {
|
||||
// Convert to a simple concatenation of lenient minuses
|
||||
// e.g. "[\--﹣ ‐‑ ‒ – −⁻₋ ➖]" -> "--﹣‐‑‒–−⁻₋➖" for the root locale
|
||||
put(se.getKey(), se.getValue().replaceAll("[\\[\\]\\\\ ]", ""));
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
putIfEntry();
|
||||
}
|
||||
|
||||
161
make/langtools/tools/flagsgenerator/FlagsGenerator.java
Normal file
161
make/langtools/tools/flagsgenerator/FlagsGenerator.java
Normal file
@@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package flagsgenerator;
|
||||
|
||||
import com.sun.source.tree.CompilationUnitTree;
|
||||
import com.sun.source.util.JavacTask;
|
||||
import com.sun.source.util.TreePath;
|
||||
import com.sun.source.util.Trees;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.EnumMap;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.lang.model.element.AnnotationMirror;
|
||||
import javax.lang.model.element.TypeElement;
|
||||
import javax.lang.model.element.VariableElement;
|
||||
import javax.lang.model.util.ElementFilter;
|
||||
import javax.tools.ToolProvider;
|
||||
|
||||
public class FlagsGenerator {
|
||||
public static void main(String... args) throws IOException {
|
||||
var compiler = ToolProvider.getSystemJavaCompiler();
|
||||
|
||||
try (var fm = compiler.getStandardFileManager(null, null, null)) {
|
||||
JavacTask task = (JavacTask) compiler.getTask(null, null, d -> {}, null, null, fm.getJavaFileObjects(args[0]));
|
||||
Trees trees = Trees.instance(task);
|
||||
CompilationUnitTree cut = task.parse().iterator().next();
|
||||
|
||||
task.analyze();
|
||||
|
||||
TypeElement clazz = (TypeElement) trees.getElement(new TreePath(new TreePath(cut), cut.getTypeDecls().get(0)));
|
||||
Map<Integer, List<String>> flag2Names = new TreeMap<>();
|
||||
Map<FlagTarget, Map<Integer, List<String>>> target2FlagBit2Fields = new EnumMap<>(FlagTarget.class);
|
||||
Map<String, String> customToString = new HashMap<>();
|
||||
Set<String> noToString = new HashSet<>();
|
||||
|
||||
for (VariableElement field : ElementFilter.fieldsIn(clazz.getEnclosedElements())) {
|
||||
String flagName = field.getSimpleName().toString();
|
||||
for (AnnotationMirror am : field.getAnnotationMirrors()) {
|
||||
switch (am.getAnnotationType().toString()) {
|
||||
case "com.sun.tools.javac.code.Flags.Use" -> {
|
||||
long flagValue = ((Number) field.getConstantValue()).longValue();
|
||||
int flagBit = 63 - Long.numberOfLeadingZeros(flagValue);
|
||||
|
||||
flag2Names.computeIfAbsent(flagBit, _ -> new ArrayList<>())
|
||||
.add(flagName);
|
||||
|
||||
List<?> originalTargets = (List<?>) valueOfValueAttribute(am);
|
||||
originalTargets.stream()
|
||||
.map(value -> FlagTarget.valueOf(value.toString()))
|
||||
.forEach(target -> target2FlagBit2Fields.computeIfAbsent(target, _ -> new HashMap<>())
|
||||
.computeIfAbsent(flagBit, _ -> new ArrayList<>())
|
||||
.add(flagName));
|
||||
}
|
||||
case "com.sun.tools.javac.code.Flags.CustomToStringValue" -> {
|
||||
customToString.put(flagName, (String) valueOfValueAttribute(am));
|
||||
}
|
||||
case "com.sun.tools.javac.code.Flags.NoToStringValue" -> {
|
||||
noToString.add(flagName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//verify there are no flag overlaps:
|
||||
for (Entry<FlagTarget, Map<Integer, List<String>>> targetAndFlag : target2FlagBit2Fields.entrySet()) {
|
||||
for (Entry<Integer, List<String>> flagAndFields : targetAndFlag.getValue().entrySet()) {
|
||||
if (flagAndFields.getValue().size() > 1) {
|
||||
throw new AssertionError("duplicate flag for target: " + targetAndFlag.getKey() +
|
||||
", flag: " + flagAndFields.getKey() +
|
||||
", flags fields: " + flagAndFields.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try (PrintWriter out = new PrintWriter(Files.newBufferedWriter(Paths.get(args[1])))) {
|
||||
out.println("""
|
||||
package com.sun.tools.javac.code;
|
||||
|
||||
public enum FlagsEnum {
|
||||
""");
|
||||
for (Entry<Integer, List<String>> e : flag2Names.entrySet()) {
|
||||
String constantName = e.getValue().stream().collect(Collectors.joining("_OR_"));
|
||||
String toString = e.getValue()
|
||||
.stream()
|
||||
.filter(n -> !noToString.contains(n))
|
||||
.map(n -> customToString.getOrDefault(n, n.toLowerCase(Locale.US)))
|
||||
.collect(Collectors.joining(" or "));
|
||||
out.println(" " + constantName + "(1L<<" + e.getKey() + ", \"" + toString + "\"),");
|
||||
}
|
||||
out.println("""
|
||||
;
|
||||
|
||||
private final long value;
|
||||
private final String toString;
|
||||
private FlagsEnum(long value, String toString) {
|
||||
this.value = value;
|
||||
this.toString = toString;
|
||||
}
|
||||
public long value() {
|
||||
return value;
|
||||
}
|
||||
public String toString() {
|
||||
return toString;
|
||||
}
|
||||
}
|
||||
""");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static Object valueOfValueAttribute(AnnotationMirror am) {
|
||||
return am.getElementValues()
|
||||
.values()
|
||||
.iterator()
|
||||
.next()
|
||||
.getValue();
|
||||
}
|
||||
|
||||
private enum FlagTarget {
|
||||
BLOCK,
|
||||
CLASS,
|
||||
METHOD,
|
||||
MODULE,
|
||||
PACKAGE,
|
||||
TYPE_VAR,
|
||||
VARIABLE;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -76,7 +76,7 @@ public interface MessageType {
|
||||
ANNOTATION("annotation", "Compound", "com.sun.tools.javac.code.Attribute"),
|
||||
BOOLEAN("boolean", "boolean", null),
|
||||
COLLECTION("collection", "Collection", "java.util"),
|
||||
FLAG("flag", "Flag", "com.sun.tools.javac.code.Flags"),
|
||||
FLAG("flag", "FlagsEnum", "com.sun.tools.javac.code"),
|
||||
FRAGMENT("fragment", "Fragment", null),
|
||||
DIAGNOSTIC("diagnostic", "JCDiagnostic", "com.sun.tools.javac.util"),
|
||||
MODIFIER("modifier", "Modifier", "javax.lang.model.element"),
|
||||
|
||||
@@ -177,7 +177,8 @@ ifeq ($(ENABLE_HEADLESS_ONLY), false)
|
||||
endif
|
||||
|
||||
LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE \
|
||||
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0
|
||||
-DPNG_ARM_NEON_OPT=0 -DPNG_ARM_NEON_IMPLEMENTATION=0 \
|
||||
-DPNG_LOONGARCH_LSX_OPT=0
|
||||
|
||||
ifeq ($(call isTargetOs, linux)+$(call isTargetCpuArch, ppc), true+true)
|
||||
LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0
|
||||
|
||||
@@ -41,17 +41,17 @@ $(eval $(call SetupCompileProperties, COMPILE_PROPERTIES, \
|
||||
|
||||
TARGETS += $(COMPILE_PROPERTIES)
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Compile properties files into enum-like classes using the propertiesparser tool
|
||||
#
|
||||
|
||||
# To avoid reevaluating the compilation setup for the tools each time this file
|
||||
# is included, the following trick is used to be able to declare a dependency on
|
||||
# the built tools.
|
||||
BUILD_TOOLS_LANGTOOLS := $(call SetupJavaCompilationCompileTarget, \
|
||||
BUILD_TOOLS_LANGTOOLS, $(BUILDTOOLS_OUTPUTDIR)/langtools_tools_classes)
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Compile properties files into enum-like classes using the propertiesparser tool
|
||||
#
|
||||
|
||||
TOOL_PARSEPROPERTIES_CMD := $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/langtools_tools_classes \
|
||||
propertiesparser.PropertiesParser
|
||||
|
||||
@@ -76,3 +76,26 @@ $(eval $(call SetupExecute, PARSEPROPERTIES, \
|
||||
TARGETS += $(PARSEPROPERTIES)
|
||||
|
||||
################################################################################
|
||||
#
|
||||
# Generate FlagsEnum from Flags constants
|
||||
#
|
||||
|
||||
TOOL_FLAGSGENERATOR_CMD := $(JAVA_SMALL) -cp $(BUILDTOOLS_OUTPUTDIR)/langtools_tools_classes \
|
||||
flagsgenerator.FlagsGenerator
|
||||
|
||||
FLAGS_SRC := \
|
||||
$(MODULE_SRC)/share/classes/com/sun/tools/javac/code/Flags.java
|
||||
|
||||
FLAGS_OUT := \
|
||||
$(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE)/com/sun/tools/javac/code/FlagsEnum.java
|
||||
|
||||
$(eval $(call SetupExecute, FLAGSGENERATOR, \
|
||||
WARN := Generating FlagsEnum, \
|
||||
DEPS := $(FLAGS_SRC) $(BUILD_TOOLS_LANGTOOLS), \
|
||||
OUTPUT_FILE := $(FLAGS_OUT), \
|
||||
COMMAND := $(TOOL_FLAGSGENERATOR_CMD) $(FLAGS_SRC) $(FLAGS_OUT), \
|
||||
))
|
||||
|
||||
TARGETS += $(FLAGSGENERATOR)
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -33,4 +33,6 @@ DISABLED_WARNINGS_java += dangling-doc-comments this-escape
|
||||
|
||||
JAVAC_FLAGS += -parameters -XDstringConcat=inline
|
||||
|
||||
TARGET_RELEASE := $(TARGET_RELEASE_BOOTJDK)
|
||||
|
||||
################################################################################
|
||||
|
||||
@@ -68,7 +68,7 @@ $(eval $(call SetupJdkExecutable, BUILD_JPACKAGEAPPLAUNCHER, \
|
||||
-rpath @executable_path/../PlugIns/, \
|
||||
LIBS_macosx := -framework Cocoa, \
|
||||
LIBS_windows := msi.lib ole32.lib shell32.lib shlwapi.lib user32.lib, \
|
||||
LIBS_linux := $(LIBDL), \
|
||||
LIBS_linux := $(LIBDL) $(LIBPTHREAD), \
|
||||
MANIFEST := $(JAVA_MANIFEST), \
|
||||
MANIFEST_VERSION := $(VERSION_NUMBER_FOUR_POSITIONS) \
|
||||
))
|
||||
@@ -97,7 +97,7 @@ ifeq ($(call isTargetOs, linux), true)
|
||||
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
|
||||
DISABLED_WARNINGS_clang_tstrings.cpp := format-nonliteral, \
|
||||
LD_SET_ORIGIN := false, \
|
||||
LIBS_linux := $(LIBDL), \
|
||||
LIBS_linux := $(LIBDL) $(LIBPTHREAD), \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBJPACKAGEAPPLAUNCHERAUX)
|
||||
@@ -121,15 +121,15 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
TARGETS += $(BUILD_LIBJPACKAGE)
|
||||
|
||||
##############################################################################
|
||||
## Build libwixhelper
|
||||
## Build libmsica
|
||||
##############################################################################
|
||||
|
||||
# Build Wix custom action helper
|
||||
# Build MSI custom action library
|
||||
# Output library in resources dir, and symbols in the object dir
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBWIXHELPER, \
|
||||
NAME := wixhelper, \
|
||||
$(eval $(call SetupJdkLibrary, BUILD_LIBMSICA, \
|
||||
NAME := msica, \
|
||||
OUTPUT_DIR := $(JPACKAGE_OUTPUT_DIR), \
|
||||
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libwixhelper, \
|
||||
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libmsica, \
|
||||
ONLY_EXPORTED := true, \
|
||||
OPTIMIZATION := LOW, \
|
||||
EXTRA_SRC := common, \
|
||||
@@ -139,7 +139,7 @@ ifeq ($(call isTargetOs, windows), true)
|
||||
LIBS_windows := msi.lib ole32.lib shell32.lib shlwapi.lib user32.lib, \
|
||||
))
|
||||
|
||||
TARGETS += $(BUILD_LIBWIXHELPER)
|
||||
TARGETS += $(BUILD_LIBMSICA)
|
||||
|
||||
##############################################################################
|
||||
## Build msiwrapper
|
||||
|
||||
@@ -62,17 +62,22 @@ Help()
|
||||
echo "options:"
|
||||
echo "-c Specifies the company. Set to Oracle by default."
|
||||
echo "-y Specifies the copyright year. Set to current year by default."
|
||||
echo "-b Specifies the base reference for change set lookup."
|
||||
echo "-f Updates the copyright for all change sets in a given year,"
|
||||
echo " as specified by -y."
|
||||
echo " as specified by -y. Overrides -b flag."
|
||||
echo "-h Print this help."
|
||||
echo
|
||||
}
|
||||
|
||||
full_year=false
|
||||
base_reference=master
|
||||
|
||||
# Process options
|
||||
while getopts "c:fhy:" option; do
|
||||
while getopts "b:c:fhy:" option; do
|
||||
case $option in
|
||||
b) # supplied base reference
|
||||
base_reference=${OPTARG}
|
||||
;;
|
||||
c) # supplied company year
|
||||
company=${OPTARG}
|
||||
;;
|
||||
@@ -111,7 +116,7 @@ else
|
||||
if [ "$full_year" = "true" ]; then
|
||||
vcs_list_changesets=(git log --no-merges --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
|
||||
else
|
||||
vcs_list_changesets=(git log --no-merges 'master..HEAD' --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
|
||||
vcs_list_changesets=(git log --no-merges "${base_reference}..HEAD" --since="${year}-01-01T00:00:00Z" --until="${year}-12-31T23:59:59Z" --pretty=tformat:"%H")
|
||||
fi
|
||||
vcs_changeset_message=(git log -1 --pretty=tformat:"%B") # followed by ${changeset}
|
||||
vcs_changeset_files=(git diff-tree --no-commit-id --name-only -r) # followed by ${changeset}
|
||||
|
||||
101
make/scripts/update_pch.sh
Normal file
101
make/scripts/update_pch.sh
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
|
||||
# The output of this script may require some degree of human curation:
|
||||
# - Redundant headers, e.g. both x.hpp, x.inline.hpp are included;
|
||||
# - Headers relative to a non-default feature should be protected by an
|
||||
# appropriate 'if' clause to make sure all variants can build without
|
||||
# errors.
|
||||
|
||||
# Time threshold for header compilation, if the time exceeds the
|
||||
# threshold the header will be precompiled.
|
||||
if [ -z "$MIN_MS" ]; then
|
||||
MIN_MS=100000
|
||||
fi
|
||||
|
||||
if [ -z "$CLEAN" ]; then
|
||||
CLEAN=true
|
||||
elif [ "$CLEAN" != "true" ] && [ "$CLEAN" != "false" ]; then
|
||||
echo "Expected either 'true' or 'false' for CLEAN"
|
||||
fi
|
||||
|
||||
# CBA_PATH should point to a valid ClangBuildAnalyzer executable.
|
||||
# Build steps:
|
||||
# git clone --depth 1 git@github.com:aras-p/ClangBuildAnalyzer.git
|
||||
# cd ClangBuildAnalyzer
|
||||
# make -f projects/make/Makefile
|
||||
if [ -z "$CBA_PATH" ]; then
|
||||
CBA_PATH="./ClangBuildAnalyzer/build/ClangBuildAnalyzer"
|
||||
fi
|
||||
|
||||
set -eux
|
||||
|
||||
PRECOMPILED_HPP="src/hotspot/share/precompiled/precompiled.hpp"
|
||||
CBA_CONFIG="ClangBuildAnalyzer.ini"
|
||||
TIMESTAMP="$(date +%Y%m%d-%H%M)"
|
||||
RUN_NAME="pch_update_$TIMESTAMP"
|
||||
CBA_OUTPUT="cba_out_$TIMESTAMP"
|
||||
|
||||
if [ "$CLEAN" = "true" ]; then
|
||||
trap 'rm -rf "build/'"$RUN_NAME"'" "$CBA_OUTPUT" "$CBA_CONFIG"' EXIT
|
||||
fi
|
||||
|
||||
sh configure --with-toolchain-type=clang \
|
||||
--with-conf-name="$RUN_NAME" \
|
||||
--disable-precompiled-headers \
|
||||
--with-extra-cxxflags="-ftime-trace" \
|
||||
--with-extra-cflags="-ftime-trace"
|
||||
|
||||
make clean CONF_NAME="$RUN_NAME"
|
||||
make hotspot CONF_NAME="$RUN_NAME"
|
||||
"$CBA_PATH" --all "./build/$RUN_NAME/hotspot/variant-server/libjvm/objs" \
|
||||
"$CBA_OUTPUT"
|
||||
|
||||
# Preserve license and comments on top
|
||||
cat "$PRECOMPILED_HPP" | awk '/^#include/ {exit} {print}' > "$PRECOMPILED_HPP.tmp"
|
||||
|
||||
if [ ! -f "$CBA_CONFIG" ]; then
|
||||
cat <<EOF > "$CBA_CONFIG"
|
||||
[counts]
|
||||
header=100
|
||||
headerChain=0
|
||||
template=0
|
||||
function=0
|
||||
fileCodegen=0
|
||||
fileParse=0
|
||||
|
||||
[misc]
|
||||
onlyRootHeaders=true
|
||||
EOF
|
||||
fi
|
||||
|
||||
"$CBA_PATH" --analyze "$CBA_OUTPUT" | \
|
||||
grep " ms: " | \
|
||||
# Keep the headers more expensive than ${1}ms
|
||||
awk -v x="$MIN_MS" '$1 < x { exit } { print $3 }' | \
|
||||
# Filter away non-hotspot headers
|
||||
grep hotspot/share | \
|
||||
awk -F "hotspot/share/" '{ printf "#include \"%s\"\n", $2 }' \
|
||||
>> "$PRECOMPILED_HPP.tmp"
|
||||
mv "$PRECOMPILED_HPP.tmp" "$PRECOMPILED_HPP"
|
||||
|
||||
java test/hotspot/jtreg/sources/SortIncludes.java --update "$PRECOMPILED_HPP"
|
||||
@@ -922,8 +922,10 @@ public class GenerateJfrFiles {
|
||||
}
|
||||
out.write(" using JfrEvent<Event" + event.name
|
||||
+ ">::commit; // else commit() is hidden by overloaded versions in this class");
|
||||
printConstructor2(out, event, empty);
|
||||
printCommitMethod(out, event, empty);
|
||||
if (!event.fields.isEmpty()) {
|
||||
printConstructor2(out, event, empty);
|
||||
printCommitMethod(out, event, empty);
|
||||
}
|
||||
if (!empty) {
|
||||
printVerify(out, event.fields);
|
||||
}
|
||||
|
||||
@@ -62,7 +62,8 @@ BUILD_JDK_JTREG_LIBRARIES_JDK_LIBS_libGetXSpace := java.base:libjava
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
BUILD_JDK_JTREG_EXCLUDE += libDirectIO.c libInheritedChannel.c \
|
||||
libExplicitAttach.c libImplicitAttach.c \
|
||||
exelauncher.c libFDLeaker.c exeFDLeakTester.c
|
||||
exelauncher.c libFDLeaker.c exeFDLeakTester.c \
|
||||
libChangeSignalDisposition.c exePrintSignalDisposition.c
|
||||
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exeNullCallerTest := $(LIBCXX)
|
||||
BUILD_JDK_JTREG_EXECUTABLES_LIBS_exerevokeall := advapi32.lib
|
||||
@@ -137,6 +138,7 @@ ifneq ($(filter build-test-jdk-jtreg-native, $(MAKECMDGOALS)), )
|
||||
OUTPUT_DIR := $(BUILD_JDK_JTREG_OUTPUT_DIR), \
|
||||
EXCLUDE := $(BUILD_JDK_JTREG_EXCLUDE), \
|
||||
EXTRA_FILES := $(BUILD_JDK_JTREG_EXTRA_FILES), \
|
||||
LIBS := $(LIBPTHREAD), \
|
||||
))
|
||||
endif
|
||||
|
||||
|
||||
@@ -881,6 +881,46 @@ reg_class vectorx_reg(
|
||||
V31, V31_H, V31_J, V31_K
|
||||
);
|
||||
|
||||
// Class for vector register V10
|
||||
reg_class v10_veca_reg(
|
||||
V10, V10_H, V10_J, V10_K
|
||||
);
|
||||
|
||||
// Class for vector register V11
|
||||
reg_class v11_veca_reg(
|
||||
V11, V11_H, V11_J, V11_K
|
||||
);
|
||||
|
||||
// Class for vector register V12
|
||||
reg_class v12_veca_reg(
|
||||
V12, V12_H, V12_J, V12_K
|
||||
);
|
||||
|
||||
// Class for vector register V13
|
||||
reg_class v13_veca_reg(
|
||||
V13, V13_H, V13_J, V13_K
|
||||
);
|
||||
|
||||
// Class for vector register V17
|
||||
reg_class v17_veca_reg(
|
||||
V17, V17_H, V17_J, V17_K
|
||||
);
|
||||
|
||||
// Class for vector register V18
|
||||
reg_class v18_veca_reg(
|
||||
V18, V18_H, V18_J, V18_K
|
||||
);
|
||||
|
||||
// Class for vector register V23
|
||||
reg_class v23_veca_reg(
|
||||
V23, V23_H, V23_J, V23_K
|
||||
);
|
||||
|
||||
// Class for vector register V24
|
||||
reg_class v24_veca_reg(
|
||||
V24, V24_H, V24_J, V24_K
|
||||
);
|
||||
|
||||
// Class for 128 bit register v0
|
||||
reg_class v0_reg(
|
||||
V0, V0_H
|
||||
@@ -4372,10 +4412,9 @@ operand immI8()
|
||||
%}
|
||||
|
||||
// 8 bit signed value (simm8), or #simm8 LSL 8.
|
||||
operand immI8_shift8()
|
||||
operand immIDupV()
|
||||
%{
|
||||
predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
|
||||
(n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
|
||||
predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->get_int()));
|
||||
match(ConI);
|
||||
|
||||
op_cost(0);
|
||||
@@ -4384,10 +4423,9 @@ operand immI8_shift8()
|
||||
%}
|
||||
|
||||
// 8 bit signed value (simm8), or #simm8 LSL 8.
|
||||
operand immL8_shift8()
|
||||
operand immLDupV()
|
||||
%{
|
||||
predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
|
||||
(n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
|
||||
predicate(Assembler::operand_valid_for_sve_dup_immediate(n->get_long()));
|
||||
match(ConL);
|
||||
|
||||
op_cost(0);
|
||||
@@ -4395,6 +4433,17 @@ operand immL8_shift8()
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// 8 bit signed value (simm8), or #simm8 LSL 8.
|
||||
operand immHDupV()
|
||||
%{
|
||||
predicate(Assembler::operand_valid_for_sve_dup_immediate((int64_t)n->geth()));
|
||||
match(ConH);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// 8 bit integer valid for vector add sub immediate
|
||||
operand immBAddSubV()
|
||||
%{
|
||||
@@ -4969,6 +5018,86 @@ operand vReg()
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V10()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v10_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V11()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v11_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V12()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v12_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V13()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v13_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V17()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v17_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V18()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v18_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V23()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v23_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vReg_V24()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(v24_veca_reg));
|
||||
match(vReg);
|
||||
|
||||
op_cost(0);
|
||||
format %{ %}
|
||||
interface(REG_INTER);
|
||||
%}
|
||||
|
||||
operand vecA()
|
||||
%{
|
||||
constraint(ALLOC_IN_RC(vectora_reg));
|
||||
@@ -6957,18 +7086,16 @@ instruct loadConD(vRegD dst, immD con) %{
|
||||
%}
|
||||
|
||||
// Load Half Float Constant
|
||||
// The "ldr" instruction loads a 32-bit word from the constant pool into a
|
||||
// 32-bit register but only the bottom half will be populated and the top
|
||||
// 16 bits are zero.
|
||||
instruct loadConH(vRegF dst, immH con) %{
|
||||
match(Set dst con);
|
||||
format %{
|
||||
"ldrs $dst, [$constantaddress]\t# load from constant table: half float=$con\n\t"
|
||||
%}
|
||||
format %{ "mov rscratch1, $con\n\t"
|
||||
"fmov $dst, rscratch1"
|
||||
%}
|
||||
ins_encode %{
|
||||
__ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
__ movw(rscratch1, (uint32_t)$con$$constant);
|
||||
__ fmovs($dst$$FloatRegister, rscratch1);
|
||||
%}
|
||||
ins_pipe(fp_load_constant_s);
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Store Instructions
|
||||
@@ -16161,41 +16288,8 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
|
||||
// ============================================================================
|
||||
// inlined locking and unlocking
|
||||
|
||||
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
||||
%{
|
||||
predicate(LockingMode != LM_LIGHTWEIGHT);
|
||||
match(Set cr (FastLock object box));
|
||||
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
|
||||
|
||||
ins_cost(5 * INSN_COST);
|
||||
format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
|
||||
|
||||
ins_encode %{
|
||||
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
|
||||
%{
|
||||
predicate(LockingMode != LM_LIGHTWEIGHT);
|
||||
match(Set cr (FastUnlock object box));
|
||||
effect(TEMP tmp, TEMP tmp2);
|
||||
|
||||
ins_cost(5 * INSN_COST);
|
||||
format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
||||
%{
|
||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
||||
match(Set cr (FastLock object box));
|
||||
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
|
||||
|
||||
@@ -16211,7 +16305,6 @@ instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp
|
||||
|
||||
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
|
||||
%{
|
||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
||||
match(Set cr (FastUnlock object box));
|
||||
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
|
||||
|
||||
|
||||
@@ -257,6 +257,28 @@ source %{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case Op_SelectFromTwoVector:
|
||||
// The "tbl" instruction for two vector table is supported only in Neon and SVE2. Return
|
||||
// false if vector length > 16B but supported SVE version < 2.
|
||||
// For vector length of 16B, generate SVE2 "tbl" instruction if SVE2 is supported, else
|
||||
// generate Neon "tbl" instruction to select from two vectors.
|
||||
// This operation is disabled for doubles and longs on machines with SVE < 2 and instead
|
||||
// the default VectorRearrange + VectorBlend is generated because the performance of the default
|
||||
// implementation was better than or equal to the implementation for SelectFromTwoVector.
|
||||
if (UseSVE < 2 && (type2aelembytes(bt) == 8 || length_in_bytes > 16)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Because the SVE2 "tbl" instruction is unpredicated and partial operations cannot be generated
|
||||
// using masks, we disable this operation on machines where length_in_bytes < MaxVectorSize
|
||||
// on that machine with the only exception of 8B vector length. This is because at the time of
|
||||
// writing this, there is no SVE2 machine available with length_in_bytes > 8 and
|
||||
// length_in_bytes < MaxVectorSize to test this operation on (for example - there isn't an
|
||||
// SVE2 machine available with MaxVectorSize = 32 to test a case with length_in_bytes = 16).
|
||||
if (UseSVE == 2 && length_in_bytes > 8 && length_in_bytes < MaxVectorSize) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -4853,7 +4875,7 @@ instruct replicateB_imm8_gt128b(vReg dst, immI8 con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct replicateI_imm8_gt128b(vReg dst, immI8_shift8 con) %{
|
||||
instruct replicateI_imm8_gt128b(vReg dst, immIDupV con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) > 16 &&
|
||||
(Matcher::vector_element_basic_type(n) == T_SHORT ||
|
||||
Matcher::vector_element_basic_type(n) == T_INT));
|
||||
@@ -4876,7 +4898,7 @@ instruct replicateL_imm_128b(vReg dst, immL con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct replicateL_imm8_gt128b(vReg dst, immL8_shift8 con) %{
|
||||
instruct replicateL_imm8_gt128b(vReg dst, immLDupV con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) > 16);
|
||||
match(Set dst (Replicate con));
|
||||
format %{ "replicateL_imm8_gt128b $dst, $con\t# vector > 128 bits" %}
|
||||
@@ -4887,19 +4909,27 @@ instruct replicateL_imm8_gt128b(vReg dst, immL8_shift8 con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Replicate a 16-bit half precision float value
|
||||
instruct replicateHF_imm(vReg dst, immH con) %{
|
||||
// Replicate an immediate 16-bit half precision float value
|
||||
instruct replicateHF_imm_le128b(vReg dst, immH con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) <= 16);
|
||||
match(Set dst (Replicate con));
|
||||
format %{ "replicateHF_imm $dst, $con\t# replicate immediate half-precision float" %}
|
||||
format %{ "replicateHF_imm_le128b $dst, $con\t# vector <= 128 bits" %}
|
||||
ins_encode %{
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
int imm = (int)($con$$constant) & 0xffff;
|
||||
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
|
||||
__ mov($dst$$FloatRegister, get_arrangement(this), imm);
|
||||
} else { // length_in_bytes must be > 16 and SVE should be enabled
|
||||
assert(UseSVE > 0, "must be sve");
|
||||
__ sve_dup($dst$$FloatRegister, __ H, imm);
|
||||
}
|
||||
__ mov($dst$$FloatRegister, get_arrangement(this), imm);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Replicate a 16-bit half precision float which is within the limits
|
||||
// for the operand - immHDupV
|
||||
instruct replicateHF_imm8_gt128b(vReg dst, immHDupV con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) > 16);
|
||||
match(Set dst (Replicate con));
|
||||
format %{ "replicateHF_imm8_gt128b $dst, $con\t# vector > 128 bits" %}
|
||||
ins_encode %{
|
||||
assert(UseSVE > 0, "must be sve");
|
||||
__ sve_dup($dst$$FloatRegister, __ H, (int)($con$$constant));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@@ -7172,3 +7202,71 @@ instruct vexpandBits(vReg dst, vReg src1, vReg src2) %{
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// ------------------------------------- SelectFromTwoVector ------------------------------------
|
||||
// The Neon and SVE2 tbl instruction for two vector lookup requires both the source vectors to be
|
||||
// consecutive. The match rules for SelectFromTwoVector reserve two consecutive vector registers
|
||||
// for src1 and src2.
|
||||
// Four combinations of vector registers for vselect_from_two_vectors are chosen at random
|
||||
// (two from volatile and two from non-volatile set) which gives more freedom to the register
|
||||
// allocator to choose the best pair of source registers at that point.
|
||||
|
||||
instruct vselect_from_two_vectors_10_11(vReg dst, vReg_V10 src1, vReg_V11 src2,
|
||||
vReg index, vReg tmp) %{
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
|
||||
format %{ "vselect_from_two_vectors_10_11 $dst, $src1, $src2, $index\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
|
||||
$src2$$FloatRegister, $index$$FloatRegister,
|
||||
$tmp$$FloatRegister, bt, length_in_bytes);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vselect_from_two_vectors_12_13(vReg dst, vReg_V12 src1, vReg_V13 src2,
|
||||
vReg index, vReg tmp) %{
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
|
||||
format %{ "vselect_from_two_vectors_12_13 $dst, $src1, $src2, $index\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
|
||||
$src2$$FloatRegister, $index$$FloatRegister,
|
||||
$tmp$$FloatRegister, bt, length_in_bytes);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vselect_from_two_vectors_17_18(vReg dst, vReg_V17 src1, vReg_V18 src2,
|
||||
vReg index, vReg tmp) %{
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
|
||||
format %{ "vselect_from_two_vectors_17_18 $dst, $src1, $src2, $index\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
|
||||
$src2$$FloatRegister, $index$$FloatRegister,
|
||||
$tmp$$FloatRegister, bt, length_in_bytes);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct vselect_from_two_vectors_23_24(vReg dst, vReg_V23 src1, vReg_V24 src2,
|
||||
vReg index, vReg tmp) %{
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
|
||||
format %{ "vselect_from_two_vectors_23_24 $dst, $src1, $src2, $index\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
|
||||
$src2$$FloatRegister, $index$$FloatRegister,
|
||||
$tmp$$FloatRegister, bt, length_in_bytes);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@@ -247,6 +247,28 @@ source %{
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case Op_SelectFromTwoVector:
|
||||
// The "tbl" instruction for two vector table is supported only in Neon and SVE2. Return
|
||||
// false if vector length > 16B but supported SVE version < 2.
|
||||
// For vector length of 16B, generate SVE2 "tbl" instruction if SVE2 is supported, else
|
||||
// generate Neon "tbl" instruction to select from two vectors.
|
||||
// This operation is disabled for doubles and longs on machines with SVE < 2 and instead
|
||||
// the default VectorRearrange + VectorBlend is generated because the performance of the default
|
||||
// implementation was better than or equal to the implementation for SelectFromTwoVector.
|
||||
if (UseSVE < 2 && (type2aelembytes(bt) == 8 || length_in_bytes > 16)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Because the SVE2 "tbl" instruction is unpredicated and partial operations cannot be generated
|
||||
// using masks, we disable this operation on machines where length_in_bytes < MaxVectorSize
|
||||
// on that machine with the only exception of 8B vector length. This is because at the time of
|
||||
// writing this, there is no SVE2 machine available with length_in_bytes > 8 and
|
||||
// length_in_bytes < MaxVectorSize to test this operation on (for example - there isn't an
|
||||
// SVE2 machine available with MaxVectorSize = 32 to test a case with length_in_bytes = 16).
|
||||
if (UseSVE == 2 && length_in_bytes > 8 && length_in_bytes < MaxVectorSize) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -3085,7 +3107,7 @@ instruct replicateB_imm8_gt128b(vReg dst, immI8 con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct replicateI_imm8_gt128b(vReg dst, immI8_shift8 con) %{
|
||||
instruct replicateI_imm8_gt128b(vReg dst, immIDupV con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) > 16 &&
|
||||
(Matcher::vector_element_basic_type(n) == T_SHORT ||
|
||||
Matcher::vector_element_basic_type(n) == T_INT));
|
||||
@@ -3108,7 +3130,7 @@ instruct replicateL_imm_128b(vReg dst, immL con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct replicateL_imm8_gt128b(vReg dst, immL8_shift8 con) %{
|
||||
instruct replicateL_imm8_gt128b(vReg dst, immLDupV con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) > 16);
|
||||
match(Set dst (Replicate con));
|
||||
format %{ "replicateL_imm8_gt128b $dst, $con\t# vector > 128 bits" %}
|
||||
@@ -3119,19 +3141,27 @@ instruct replicateL_imm8_gt128b(vReg dst, immL8_shift8 con) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Replicate a 16-bit half precision float value
|
||||
instruct replicateHF_imm(vReg dst, immH con) %{
|
||||
// Replicate an immediate 16-bit half precision float value
|
||||
instruct replicateHF_imm_le128b(vReg dst, immH con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) <= 16);
|
||||
match(Set dst (Replicate con));
|
||||
format %{ "replicateHF_imm $dst, $con\t# replicate immediate half-precision float" %}
|
||||
format %{ "replicateHF_imm_le128b $dst, $con\t# vector <= 128 bits" %}
|
||||
ins_encode %{
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
int imm = (int)($con$$constant) & 0xffff;
|
||||
if (VM_Version::use_neon_for_vector(length_in_bytes)) {
|
||||
__ mov($dst$$FloatRegister, get_arrangement(this), imm);
|
||||
} else { // length_in_bytes must be > 16 and SVE should be enabled
|
||||
assert(UseSVE > 0, "must be sve");
|
||||
__ sve_dup($dst$$FloatRegister, __ H, imm);
|
||||
}
|
||||
__ mov($dst$$FloatRegister, get_arrangement(this), imm);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// Replicate a 16-bit half precision float which is within the limits
|
||||
// for the operand - immHDupV
|
||||
instruct replicateHF_imm8_gt128b(vReg dst, immHDupV con) %{
|
||||
predicate(Matcher::vector_length_in_bytes(n) > 16);
|
||||
match(Set dst (Replicate con));
|
||||
format %{ "replicateHF_imm8_gt128b $dst, $con\t# vector > 128 bits" %}
|
||||
ins_encode %{
|
||||
assert(UseSVE > 0, "must be sve");
|
||||
__ sve_dup($dst$$FloatRegister, __ H, (int)($con$$constant));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
@@ -5154,3 +5184,34 @@ BITPERM(vcompressBits, CompressBitsV, sve_bext)
|
||||
|
||||
// ----------------------------------- ExpandBitsV ---------------------------------
|
||||
BITPERM(vexpandBits, ExpandBitsV, sve_bdep)
|
||||
|
||||
// ------------------------------------- SelectFromTwoVector ------------------------------------
|
||||
// The Neon and SVE2 tbl instruction for two vector lookup requires both the source vectors to be
|
||||
// consecutive. The match rules for SelectFromTwoVector reserve two consecutive vector registers
|
||||
// for src1 and src2.
|
||||
// Four combinations of vector registers for vselect_from_two_vectors are chosen at random
|
||||
// (two from volatile and two from non-volatile set) which gives more freedom to the register
|
||||
// allocator to choose the best pair of source registers at that point.
|
||||
dnl
|
||||
dnl SELECT_FROM_TWO_VECTORS($1, $2 )
|
||||
dnl SELECT_FROM_TWO_VECTORS(first_reg, second_reg)
|
||||
define(`SELECT_FROM_TWO_VECTORS', `
|
||||
instruct vselect_from_two_vectors_$1_$2(vReg dst, vReg_V$1 src1, vReg_V$2 src2,
|
||||
vReg index, vReg tmp) %{
|
||||
effect(TEMP_DEF dst, TEMP tmp);
|
||||
match(Set dst (SelectFromTwoVector (Binary index src1) src2));
|
||||
format %{ "vselect_from_two_vectors_$1_$2 $dst, $src1, $src2, $index\t# KILL $tmp" %}
|
||||
ins_encode %{
|
||||
BasicType bt = Matcher::vector_element_basic_type(this);
|
||||
uint length_in_bytes = Matcher::vector_length_in_bytes(this);
|
||||
__ select_from_two_vectors($dst$$FloatRegister, $src1$$FloatRegister,
|
||||
$src2$$FloatRegister, $index$$FloatRegister,
|
||||
$tmp$$FloatRegister, bt, length_in_bytes);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}')dnl
|
||||
dnl
|
||||
SELECT_FROM_TWO_VECTORS(10, 11)
|
||||
SELECT_FROM_TWO_VECTORS(12, 13)
|
||||
SELECT_FROM_TWO_VECTORS(17, 18)
|
||||
SELECT_FROM_TWO_VECTORS(23, 24)
|
||||
|
||||
@@ -434,6 +434,11 @@ int Assembler::operand_valid_for_movi_immediate(uint64_t imm64, SIMD_Arrangement
|
||||
return -1;
|
||||
}
|
||||
|
||||
bool Assembler::operand_valid_for_sve_dup_immediate(int64_t imm) {
|
||||
return ((imm >= -128 && imm <= 127) ||
|
||||
(((imm & 0xff) == 0) && imm >= -32768 && imm <= 32512));
|
||||
}
|
||||
|
||||
bool Assembler::operand_valid_for_sve_logical_immediate(unsigned elembits, uint64_t imm) {
|
||||
return encode_sve_logical_immediate(elembits, imm) != 0xffffffff;
|
||||
}
|
||||
|
||||
@@ -4231,12 +4231,29 @@ public:
|
||||
sf(imm1, 9, 5), rf(Zd, 0);
|
||||
}
|
||||
|
||||
// SVE programmable table lookup/permute using vector of element indices
|
||||
void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
|
||||
private:
|
||||
void _sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, unsigned reg_count, FloatRegister Zm) {
|
||||
starti;
|
||||
assert(T != Q, "invalid size");
|
||||
// Only supports one or two vector lookup. One vector lookup was introduced in SVE1
|
||||
// and two vector lookup in SVE2
|
||||
assert(0 < reg_count && reg_count <= 2, "invalid number of registers");
|
||||
|
||||
int op11 = (reg_count == 1) ? 0b10 : 0b01;
|
||||
|
||||
f(0b00000101, 31, 24), f(T, 23, 22), f(0b1, 21), rf(Zm, 16);
|
||||
f(0b001100, 15, 10), rf(Zn, 5), rf(Zd, 0);
|
||||
f(0b001, 15, 13), f(op11, 12, 11), f(0b0, 10), rf(Zn, 5), rf(Zd, 0);
|
||||
}
|
||||
|
||||
public:
|
||||
// SVE/SVE2 Programmable table lookup in one or two vector table (zeroing)
|
||||
void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn, FloatRegister Zm) {
|
||||
_sve_tbl(Zd, T, Zn, 1, Zm);
|
||||
}
|
||||
|
||||
void sve_tbl(FloatRegister Zd, SIMD_RegVariant T, FloatRegister Zn1, FloatRegister Zn2, FloatRegister Zm) {
|
||||
assert(Zn1->successor() == Zn2, "invalid order of registers");
|
||||
_sve_tbl(Zd, T, Zn1, 2, Zm);
|
||||
}
|
||||
|
||||
// Shuffle active elements of vector to the right and fill with zero
|
||||
@@ -4307,6 +4324,7 @@ public:
|
||||
static bool operand_valid_for_sve_add_sub_immediate(int64_t imm);
|
||||
static bool operand_valid_for_float_immediate(double imm);
|
||||
static int operand_valid_for_movi_immediate(uint64_t imm64, SIMD_Arrangement T);
|
||||
static bool operand_valid_for_sve_dup_immediate(int64_t imm);
|
||||
|
||||
void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
|
||||
void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
|
||||
|
||||
@@ -410,11 +410,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::r0_opr);
|
||||
stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
__ b(*stub->entry());
|
||||
} else {
|
||||
__ unlock_object(r5, r4, r0, r6, *stub->entry());
|
||||
}
|
||||
__ unlock_object(r5, r4, r0, r6, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
@@ -2484,13 +2480,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register hdr = op->hdr_opr()->as_register();
|
||||
Register lock = op->lock_opr()->as_register();
|
||||
Register temp = op->scratch_opr()->as_register();
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ null_check(obj, -1);
|
||||
}
|
||||
__ b(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// add debug info for NullPointerException only if one is possible
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, temp, *op->stub()->entry());
|
||||
@@ -2823,7 +2813,7 @@ void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest, LIR_PatchCode patch_code, C
|
||||
return;
|
||||
}
|
||||
|
||||
__ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
|
||||
__ lea(dest->as_pointer_register(), as_Address(addr->as_address_ptr()));
|
||||
}
|
||||
|
||||
|
||||
@@ -3133,7 +3123,9 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ membar(__ AnyAny);
|
||||
if(!UseLSE) {
|
||||
__ membar(__ AnyAny);
|
||||
}
|
||||
}
|
||||
|
||||
#undef __
|
||||
|
||||
@@ -981,7 +981,7 @@ void LIRGenerator::do_update_CRC32(Intrinsic* x) {
|
||||
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
|
||||
const LIR_Opr result_reg = result_register_for(x->type());
|
||||
|
||||
LIR_Opr addr = new_pointer_register();
|
||||
LIR_Opr addr = new_register(T_ADDRESS);
|
||||
__ leal(LIR_OprFact::address(a), addr);
|
||||
|
||||
crc.load_item_force(cc->at(0));
|
||||
@@ -1058,7 +1058,7 @@ void LIRGenerator::do_update_CRC32C(Intrinsic* x) {
|
||||
CallingConvention* cc = frame_map()->c_calling_convention(&signature);
|
||||
const LIR_Opr result_reg = result_register_for(x->type());
|
||||
|
||||
LIR_Opr addr = new_pointer_register();
|
||||
LIR_Opr addr = new_register(T_ADDRESS);
|
||||
__ leal(LIR_OprFact::address(a), addr);
|
||||
|
||||
crc.load_item_force(cc->at(0));
|
||||
|
||||
@@ -60,8 +60,6 @@ void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
|
||||
}
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
|
||||
const int aligned_mask = BytesPerWord -1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
|
||||
int null_check_offset = -1;
|
||||
|
||||
@@ -72,95 +70,20 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
|
||||
null_check_offset = offset();
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(disp_hdr, obj, hdr, temp, rscratch2, slow_case);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
lightweight_lock(disp_hdr, obj, hdr, temp, rscratch2, slow_case);
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(hdr, obj);
|
||||
ldrb(hdr, Address(hdr, Klass::misc_flags_offset()));
|
||||
tst(hdr, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
Label done;
|
||||
// Load object header
|
||||
ldr(hdr, Address(obj, hdr_offset));
|
||||
// and mark it as unlocked
|
||||
orr(hdr, hdr, markWord::unlocked_value);
|
||||
// save unlocked object header into the displaced header location on the stack
|
||||
str(hdr, Address(disp_hdr, 0));
|
||||
// test if object header is still the same (i.e. unlocked), and if so, store the
|
||||
// displaced header address in the object header - if it is not the same, get the
|
||||
// object header instead
|
||||
lea(rscratch2, Address(obj, hdr_offset));
|
||||
cmpxchgptr(hdr, disp_hdr, rscratch2, rscratch1, done, /*fallthough*/nullptr);
|
||||
// if the object header was the same, we're done
|
||||
// if the object header was not the same, it is now in the hdr register
|
||||
// => test if it is a stack pointer into the same stack (recursive locking), i.e.:
|
||||
//
|
||||
// 1) (hdr & aligned_mask) == 0
|
||||
// 2) sp <= hdr
|
||||
// 3) hdr <= sp + page_size
|
||||
//
|
||||
// these 3 tests can be done by evaluating the following expression:
|
||||
//
|
||||
// (hdr - sp) & (aligned_mask - page_size)
|
||||
//
|
||||
// assuming both the stack pointer and page_size have their least
|
||||
// significant 2 bits cleared and page_size is a power of 2
|
||||
mov(rscratch1, sp);
|
||||
sub(hdr, hdr, rscratch1);
|
||||
ands(hdr, hdr, aligned_mask - (int)os::vm_page_size());
|
||||
// for recursive locking, the result is zero => save it in the displaced header
|
||||
// location (null in the displaced hdr location indicates recursive locking)
|
||||
str(hdr, Address(disp_hdr, 0));
|
||||
// otherwise we don't care about the result and handle locking via runtime call
|
||||
cbnz(hdr, slow_case);
|
||||
// done
|
||||
bind(done);
|
||||
inc_held_monitor_count(rscratch1);
|
||||
}
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
|
||||
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Register temp, Label& slow_case) {
|
||||
const int aligned_mask = BytesPerWord -1;
|
||||
const int hdr_offset = oopDesc::mark_offset_in_bytes();
|
||||
assert_different_registers(hdr, obj, disp_hdr, temp, rscratch2);
|
||||
Label done;
|
||||
|
||||
if (LockingMode != LM_LIGHTWEIGHT) {
|
||||
// load displaced header
|
||||
ldr(hdr, Address(disp_hdr, 0));
|
||||
// if the loaded hdr is null we had recursive locking
|
||||
// if we had recursive locking, we are done
|
||||
cbz(hdr, done);
|
||||
}
|
||||
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
|
||||
verify_oop(obj);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// test if object header is pointing to the displaced header, and if so, restore
|
||||
// the displaced header in the object - if the object header is not pointing to
|
||||
// the displaced header, get the object header instead
|
||||
// if the object header was not pointing to the displaced header,
|
||||
// we do unlocking via runtime call
|
||||
if (hdr_offset) {
|
||||
lea(rscratch1, Address(obj, hdr_offset));
|
||||
cmpxchgptr(disp_hdr, hdr, rscratch1, rscratch2, done, &slow_case);
|
||||
} else {
|
||||
cmpxchgptr(disp_hdr, hdr, obj, rscratch2, done, &slow_case);
|
||||
}
|
||||
// done
|
||||
bind(done);
|
||||
dec_held_monitor_count(rscratch1);
|
||||
}
|
||||
lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -147,215 +147,8 @@ address C2_MacroAssembler::arrays_hashcode(Register ary, Register cnt, Register
|
||||
return pc();
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register tmpReg,
|
||||
Register tmp2Reg, Register tmp3Reg) {
|
||||
Register oop = objectReg;
|
||||
Register box = boxReg;
|
||||
Register disp_hdr = tmpReg;
|
||||
Register tmp = tmp2Reg;
|
||||
Label cont;
|
||||
Label object_has_monitor;
|
||||
Label count, no_count;
|
||||
|
||||
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
|
||||
assert_different_registers(oop, box, tmp, disp_hdr, rscratch2);
|
||||
|
||||
// Load markWord from object into displaced_header.
|
||||
ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, oop);
|
||||
ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
|
||||
tst(tmp, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, cont);
|
||||
}
|
||||
|
||||
// Check for existing monitor
|
||||
tbnz(disp_hdr, exact_log2(markWord::monitor_value), object_has_monitor);
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
|
||||
b(cont);
|
||||
} else {
|
||||
assert(LockingMode == LM_LEGACY, "must be");
|
||||
// Set tmp to be (markWord of object | UNLOCK_VALUE).
|
||||
orr(tmp, disp_hdr, markWord::unlocked_value);
|
||||
|
||||
// Initialize the box. (Must happen before we update the object mark!)
|
||||
str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
// Compare object markWord with an unlocked value (tmp) and if
|
||||
// equal exchange the stack address of our box with object markWord.
|
||||
// On failure disp_hdr contains the possibly locked markWord.
|
||||
cmpxchg(oop, tmp, box, Assembler::xword, /*acquire*/ true,
|
||||
/*release*/ true, /*weak*/ false, disp_hdr);
|
||||
br(Assembler::EQ, cont);
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object, will have now locked it will continue at label cont
|
||||
|
||||
// Check if the owner is self by comparing the value in the
|
||||
// markWord of object (disp_hdr) with the stack pointer.
|
||||
mov(rscratch1, sp);
|
||||
sub(disp_hdr, disp_hdr, rscratch1);
|
||||
mov(tmp, (address) (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
|
||||
// If condition is true we are cont and hence we can store 0 as the
|
||||
// displaced header in the box, which indicates that it is a recursive lock.
|
||||
ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
|
||||
str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
b(cont);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
bind(object_has_monitor);
|
||||
|
||||
// Try to CAS owner (no owner => current thread's _monitor_owner_id).
|
||||
ldr(rscratch2, Address(rthread, JavaThread::monitor_owner_id_offset()));
|
||||
add(tmp, disp_hdr, (in_bytes(ObjectMonitor::owner_offset())-markWord::monitor_value));
|
||||
cmpxchg(tmp, zr, rscratch2, Assembler::xword, /*acquire*/ true,
|
||||
/*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
|
||||
|
||||
// Store a non-null value into the box to avoid looking like a re-entrant
|
||||
// lock. The fast-path monitor unlock code checks for
|
||||
// markWord::monitor_value so use markWord::unused_mark which has the
|
||||
// relevant bit set, and also matches ObjectSynchronizer::enter.
|
||||
mov(tmp, (address)markWord::unused_mark().value());
|
||||
str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
br(Assembler::EQ, cont); // CAS success means locking succeeded
|
||||
|
||||
cmp(tmp3Reg, rscratch2);
|
||||
br(Assembler::NE, cont); // Check for recursive locking
|
||||
|
||||
// Recursive lock case
|
||||
increment(Address(disp_hdr, in_bytes(ObjectMonitor::recursions_offset()) - markWord::monitor_value), 1);
|
||||
// flag == EQ still from the cmp above, checking if this is a reentrant lock
|
||||
|
||||
bind(cont);
|
||||
// flag == EQ indicates success
|
||||
// flag == NE indicates failure
|
||||
br(Assembler::NE, no_count);
|
||||
|
||||
bind(count);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
inc_held_monitor_count(rscratch1);
|
||||
}
|
||||
|
||||
bind(no_count);
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Register tmpReg,
|
||||
Register tmp2Reg) {
|
||||
Register oop = objectReg;
|
||||
Register box = boxReg;
|
||||
Register disp_hdr = tmpReg;
|
||||
Register owner_addr = tmpReg;
|
||||
Register tmp = tmp2Reg;
|
||||
Label cont;
|
||||
Label object_has_monitor;
|
||||
Label count, no_count;
|
||||
Label unlocked;
|
||||
|
||||
assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
|
||||
assert_different_registers(oop, box, tmp, disp_hdr);
|
||||
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
// If the displaced header is 0, we have a recursive unlock.
|
||||
cmp(disp_hdr, zr);
|
||||
br(Assembler::EQ, cont);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes()));
|
||||
tbnz(tmp, exact_log2(markWord::monitor_value), object_has_monitor);
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
|
||||
b(cont);
|
||||
} else {
|
||||
assert(LockingMode == LM_LEGACY, "must be");
|
||||
// Check if it is still a light weight lock, this is is true if we
|
||||
// see the stack address of the basicLock in the markWord of the
|
||||
// object.
|
||||
|
||||
cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
|
||||
/*release*/ true, /*weak*/ false, tmp);
|
||||
b(cont);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
|
||||
// Handle existing monitor.
|
||||
bind(object_has_monitor);
|
||||
STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
|
||||
add(tmp, tmp, -(int)markWord::monitor_value); // monitor
|
||||
|
||||
ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
|
||||
|
||||
Label notRecursive;
|
||||
cbz(disp_hdr, notRecursive);
|
||||
|
||||
// Recursive lock
|
||||
sub(disp_hdr, disp_hdr, 1u);
|
||||
str(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
|
||||
cmp(disp_hdr, disp_hdr); // Sets flags for result
|
||||
b(cont);
|
||||
|
||||
bind(notRecursive);
|
||||
|
||||
// Compute owner address.
|
||||
lea(owner_addr, Address(tmp, ObjectMonitor::owner_offset()));
|
||||
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
stlr(zr, owner_addr);
|
||||
// We need a full fence after clearing owner to avoid stranding.
|
||||
// StoreLoad achieves this.
|
||||
membar(StoreLoad);
|
||||
|
||||
// Check if the entry_list is empty.
|
||||
ldr(rscratch1, Address(tmp, ObjectMonitor::entry_list_offset()));
|
||||
cmp(rscratch1, zr);
|
||||
br(Assembler::EQ, cont); // If so we are done.
|
||||
|
||||
// Check if there is a successor.
|
||||
ldr(rscratch1, Address(tmp, ObjectMonitor::succ_offset()));
|
||||
cmp(rscratch1, zr);
|
||||
br(Assembler::NE, unlocked); // If so we are done.
|
||||
|
||||
// Save the monitor pointer in the current thread, so we can try to
|
||||
// reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||
str(tmp, Address(rthread, JavaThread::unlocked_inflated_monitor_offset()));
|
||||
|
||||
cmp(zr, rthread); // Set Flag to NE => slow path
|
||||
b(cont);
|
||||
|
||||
bind(unlocked);
|
||||
cmp(zr, zr); // Set Flag to EQ => fast path
|
||||
|
||||
// Intentional fall-through
|
||||
|
||||
bind(cont);
|
||||
// flag == EQ indicates success
|
||||
// flag == NE indicates failure
|
||||
br(Assembler::NE, no_count);
|
||||
|
||||
bind(count);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
dec_held_monitor_count(rscratch1);
|
||||
}
|
||||
|
||||
bind(no_count);
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register t1,
|
||||
Register t2, Register t3) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||
assert_different_registers(obj, box, t1, t2, t3, rscratch2);
|
||||
|
||||
// Handle inflated monitor.
|
||||
@@ -512,7 +305,6 @@ void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Regist
|
||||
|
||||
void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register box, Register t1,
|
||||
Register t2, Register t3) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||
assert_different_registers(obj, box, t1, t2, t3);
|
||||
|
||||
// Handle inflated monitor.
|
||||
@@ -2858,3 +2650,124 @@ void C2_MacroAssembler::reconstruct_frame_pointer(Register rtmp) {
|
||||
add(rfp, sp, framesize - 2 * wordSize);
|
||||
}
|
||||
}
|
||||
|
||||
// Selects elements from two source vectors (src1, src2) based on index values in the index register
|
||||
// using Neon instructions and places it in the destination vector element corresponding to the
|
||||
// index vector element. Each index in the index register must be in the range - [0, 2 * NUM_ELEM),
|
||||
// where NUM_ELEM is the number of BasicType elements per vector.
|
||||
// If idx < NUM_ELEM --> selects src1[idx] (idx is an element of the index register)
|
||||
// Otherwise, selects src2[idx – NUM_ELEM]
|
||||
void C2_MacroAssembler::select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
|
||||
FloatRegister src2, FloatRegister index,
|
||||
FloatRegister tmp, unsigned vector_length_in_bytes) {
|
||||
assert_different_registers(dst, src1, src2, tmp);
|
||||
SIMD_Arrangement size = vector_length_in_bytes == 16 ? T16B : T8B;
|
||||
|
||||
if (vector_length_in_bytes == 16) {
|
||||
assert(UseSVE <= 1, "sve must be <= 1");
|
||||
assert(src1->successor() == src2, "Source registers must be ordered");
|
||||
// If the vector length is 16B, then use the Neon "tbl" instruction with two vector table
|
||||
tbl(dst, size, src1, 2, index);
|
||||
} else { // vector length == 8
|
||||
assert(UseSVE == 0, "must be Neon only");
|
||||
// We need to fit both the source vectors (src1, src2) in a 128-bit register because the
|
||||
// Neon "tbl" instruction supports only looking up 16B vectors. We then use the Neon "tbl"
|
||||
// instruction with one vector lookup
|
||||
ins(tmp, D, src1, 0, 0);
|
||||
ins(tmp, D, src2, 1, 0);
|
||||
tbl(dst, size, tmp, 1, index);
|
||||
}
|
||||
}
|
||||
|
||||
// Selects elements from two source vectors (src1, src2) based on index values in the index register
|
||||
// using SVE/SVE2 instructions and places it in the destination vector element corresponding to the
|
||||
// index vector element. Each index in the index register must be in the range - [0, 2 * NUM_ELEM),
|
||||
// where NUM_ELEM is the number of BasicType elements per vector.
|
||||
// If idx < NUM_ELEM --> selects src1[idx] (idx is an element of the index register)
|
||||
// Otherwise, selects src2[idx – NUM_ELEM]
|
||||
void C2_MacroAssembler::select_from_two_vectors_sve(FloatRegister dst, FloatRegister src1,
|
||||
FloatRegister src2, FloatRegister index,
|
||||
FloatRegister tmp, SIMD_RegVariant T,
|
||||
unsigned vector_length_in_bytes) {
|
||||
assert_different_registers(dst, src1, src2, index, tmp);
|
||||
|
||||
if (vector_length_in_bytes == 8) {
|
||||
// We need to fit both the source vectors (src1, src2) in a single vector register because the
|
||||
// SVE "tbl" instruction is unpredicated and works on the entire vector which can lead to
|
||||
// incorrect results if each source vector is only partially filled. We then use the SVE "tbl"
|
||||
// instruction with one vector lookup
|
||||
assert(UseSVE >= 1, "sve must be >= 1");
|
||||
ins(tmp, D, src1, 0, 0);
|
||||
ins(tmp, D, src2, 1, 0);
|
||||
sve_tbl(dst, T, tmp, index);
|
||||
} else { // UseSVE == 2 and vector_length_in_bytes > 8
|
||||
// If the vector length is > 8, then use the SVE2 "tbl" instruction with the two vector table.
|
||||
// The assertion - vector_length_in_bytes == MaxVectorSize ensures that this operation
|
||||
// is not executed on machines where vector_length_in_bytes < MaxVectorSize
|
||||
// with the only exception of 8B vector length.
|
||||
assert(UseSVE == 2 && vector_length_in_bytes == MaxVectorSize, "must be");
|
||||
assert(src1->successor() == src2, "Source registers must be ordered");
|
||||
sve_tbl(dst, T, src1, src2, index);
|
||||
}
|
||||
}
|
||||
|
||||
void C2_MacroAssembler::select_from_two_vectors(FloatRegister dst, FloatRegister src1,
|
||||
FloatRegister src2, FloatRegister index,
|
||||
FloatRegister tmp, BasicType bt,
|
||||
unsigned vector_length_in_bytes) {
|
||||
|
||||
assert_different_registers(dst, src1, src2, index, tmp);
|
||||
|
||||
// The cases that can reach this method are -
|
||||
// - UseSVE = 0, vector_length_in_bytes = 8 or 16
|
||||
// - UseSVE = 1, vector_length_in_bytes = 8 or 16
|
||||
// - UseSVE = 2, vector_length_in_bytes >= 8
|
||||
//
|
||||
// SVE/SVE2 tbl instructions are generated when UseSVE = 1 with vector_length_in_bytes = 8
|
||||
// and UseSVE = 2 with vector_length_in_bytes >= 8
|
||||
//
|
||||
// Neon instructions are generated when UseSVE = 0 with vector_length_in_bytes = 8 or 16 and
|
||||
// UseSVE = 1 with vector_length_in_bytes = 16
|
||||
|
||||
if ((UseSVE == 1 && vector_length_in_bytes == 8) || UseSVE == 2) {
|
||||
SIMD_RegVariant T = elemType_to_regVariant(bt);
|
||||
select_from_two_vectors_sve(dst, src1, src2, index, tmp, T, vector_length_in_bytes);
|
||||
return;
|
||||
}
|
||||
|
||||
// The only BasicTypes that can reach here are T_SHORT, T_BYTE, T_INT and T_FLOAT
|
||||
assert(bt != T_DOUBLE && bt != T_LONG, "unsupported basic type");
|
||||
assert(vector_length_in_bytes <= 16, "length_in_bytes must be <= 16");
|
||||
|
||||
bool isQ = vector_length_in_bytes == 16;
|
||||
|
||||
SIMD_Arrangement size1 = isQ ? T16B : T8B;
|
||||
SIMD_Arrangement size2 = esize2arrangement((uint)type2aelembytes(bt), isQ);
|
||||
|
||||
// Neon "tbl" instruction only supports byte tables, so we need to look at chunks of
|
||||
// 2B for selecting shorts or chunks of 4B for selecting ints/floats from the table.
|
||||
// The index values in "index" register are in the range of [0, 2 * NUM_ELEM) where NUM_ELEM
|
||||
// is the number of elements that can fit in a vector. For ex. for T_SHORT with 64-bit vector length,
|
||||
// the indices can range from [0, 8).
|
||||
// As an example with 64-bit vector length and T_SHORT type - let index = [2, 5, 1, 0]
|
||||
// Move a constant 0x02 in every byte of tmp - tmp = [0x0202, 0x0202, 0x0202, 0x0202]
|
||||
// Multiply index vector with tmp to yield - dst = [0x0404, 0x0a0a, 0x0202, 0x0000]
|
||||
// Move a constant 0x0100 in every 2B of tmp - tmp = [0x0100, 0x0100, 0x0100, 0x0100]
|
||||
// Add the multiplied result to the vector in tmp to obtain the byte level
|
||||
// offsets - dst = [0x0504, 0x0b0a, 0x0302, 0x0100]
|
||||
// Use these offsets in the "tbl" instruction to select chunks of 2B.
|
||||
|
||||
if (bt == T_BYTE) {
|
||||
select_from_two_vectors_neon(dst, src1, src2, index, tmp, vector_length_in_bytes);
|
||||
} else {
|
||||
int elem_size = (bt == T_SHORT) ? 2 : 4;
|
||||
uint64_t tbl_offset = (bt == T_SHORT) ? 0x0100u : 0x03020100u;
|
||||
|
||||
mov(tmp, size1, elem_size);
|
||||
mulv(dst, size2, index, tmp);
|
||||
mov(tmp, size2, tbl_offset);
|
||||
addv(dst, size1, dst, tmp); // "dst" now contains the processed index elements
|
||||
// to select a set of 2B/4B
|
||||
select_from_two_vectors_neon(dst, src1, src2, dst, tmp, vector_length_in_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,15 @@
|
||||
void neon_reduce_logical_helper(int opc, bool sf, Register Rd, Register Rn, Register Rm,
|
||||
enum shift_kind kind = Assembler::LSL, unsigned shift = 0);
|
||||
|
||||
void select_from_two_vectors_neon(FloatRegister dst, FloatRegister src1,
|
||||
FloatRegister src2, FloatRegister index,
|
||||
FloatRegister tmp, unsigned vector_length_in_bytes);
|
||||
|
||||
void select_from_two_vectors_sve(FloatRegister dst, FloatRegister src1,
|
||||
FloatRegister src2, FloatRegister index,
|
||||
FloatRegister tmp, SIMD_RegVariant T,
|
||||
unsigned vector_length_in_bytes);
|
||||
|
||||
public:
|
||||
// jdk.internal.util.ArraysSupport.vectorizedHashCode
|
||||
address arrays_hashcode(Register ary, Register cnt, Register result, FloatRegister vdata0,
|
||||
@@ -42,9 +51,6 @@
|
||||
FloatRegister vmul3, FloatRegister vpow, FloatRegister vpowm,
|
||||
BasicType eltype);
|
||||
|
||||
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
|
||||
void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3);
|
||||
void fast_unlock(Register object, Register box, Register tmp, Register tmp2);
|
||||
// Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
|
||||
void fast_lock_lightweight(Register object, Register box, Register t1, Register t2, Register t3);
|
||||
void fast_unlock_lightweight(Register object, Register box, Register t1, Register t2, Register t3);
|
||||
@@ -193,4 +199,9 @@
|
||||
|
||||
void reconstruct_frame_pointer(Register rtmp);
|
||||
|
||||
// Select from a table of two vectors
|
||||
void select_from_two_vectors(FloatRegister dst, FloatRegister src1, FloatRegister src2,
|
||||
FloatRegister index, FloatRegister tmp, BasicType bt,
|
||||
unsigned vector_length_in_bytes);
|
||||
|
||||
#endif // CPU_AARCH64_C2_MACROASSEMBLER_AARCH64_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -101,9 +101,12 @@ frame FreezeBase::new_heap_frame(frame& f, frame& caller) {
|
||||
*hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset;
|
||||
return hf;
|
||||
} else {
|
||||
// We need to re-read fp out of the frame because it may be an oop and we might have
|
||||
// had a safepoint in finalize_freeze, after constructing f.
|
||||
fp = *(intptr_t**)(f.sp() - frame::sender_sp_offset);
|
||||
// For a compiled frame we need to re-read fp out of the frame because it may be an
|
||||
// oop and we might have had a safepoint in finalize_freeze, after constructing f.
|
||||
// For stub/native frames the value is not used while frozen, and will be constructed again
|
||||
// when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to
|
||||
// help with debugging, particularly when inspecting frames and identifying invalid accesses.
|
||||
fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal;
|
||||
|
||||
int fsize = FKind::size(f);
|
||||
sp = caller.unextended_sp() - fsize;
|
||||
@@ -192,6 +195,11 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
|
||||
}
|
||||
}
|
||||
|
||||
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
|
||||
intptr_t* fp_addr = sp - frame::sender_sp_offset;
|
||||
*fp_addr = badAddressVal;
|
||||
}
|
||||
|
||||
//////// Thaw
|
||||
|
||||
// Fast path
|
||||
|
||||
@@ -702,10 +702,10 @@ static void printbc(Method *m, intptr_t bcx) {
|
||||
if (m->validate_bci_from_bcp((address)bcx) < 0
|
||||
|| !m->contains((address)bcx)) {
|
||||
name = "???";
|
||||
snprintf(buf, sizeof buf, "(bad)");
|
||||
os::snprintf_checked(buf, sizeof buf, "(bad)");
|
||||
} else {
|
||||
int bci = m->bci_from((address)bcx);
|
||||
snprintf(buf, sizeof buf, "%d", bci);
|
||||
os::snprintf_checked(buf, sizeof buf, "%d", bci);
|
||||
name = Bytecodes::name(m->code_at(bci));
|
||||
}
|
||||
ResourceMark rm;
|
||||
|
||||
@@ -172,9 +172,9 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
|
||||
|
||||
if (expand_call) {
|
||||
assert(pre_val != c_rarg1, "smashed arg");
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
|
||||
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
|
||||
} else {
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
|
||||
}
|
||||
|
||||
__ pop(saved, sp);
|
||||
@@ -292,7 +292,8 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm,
|
||||
} else {
|
||||
assert(is_phantom, "only remaining strength");
|
||||
assert(!is_narrow, "phantom access cannot be narrow");
|
||||
__ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom));
|
||||
// AOT saved adapters need relocation for this call.
|
||||
__ lea(lr, RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom)));
|
||||
}
|
||||
__ blr(lr);
|
||||
__ mov(rscratch1, r0);
|
||||
@@ -752,7 +753,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
__ bind(runtime);
|
||||
__ push_call_clobbered_registers();
|
||||
__ load_parameter(0, pre_val);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
|
||||
__ pop_call_clobbered_registers();
|
||||
__ bind(done);
|
||||
|
||||
|
||||
@@ -691,104 +691,27 @@ void InterpreterMacroAssembler::leave_jfr_critical_section() {
|
||||
void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
{
|
||||
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be c_rarg1");
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_preemptable(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||
lock_reg);
|
||||
} else {
|
||||
Label count, done;
|
||||
|
||||
const Register swap_reg = r0;
|
||||
const Register tmp = c_rarg2;
|
||||
const Register obj_reg = c_rarg3; // Will contain the oop
|
||||
const Register tmp2 = c_rarg4;
|
||||
const Register tmp3 = c_rarg5;
|
||||
const Register tmp = c_rarg2;
|
||||
const Register obj_reg = c_rarg3; // Will contain the oop
|
||||
const Register tmp2 = c_rarg4;
|
||||
const Register tmp3 = c_rarg5;
|
||||
|
||||
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
|
||||
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
|
||||
const int mark_offset = lock_offset +
|
||||
BasicLock::displaced_header_offset_in_bytes();
|
||||
// Load object pointer into obj_reg %c_rarg3
|
||||
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||
|
||||
Label slow_case;
|
||||
Label slow_case, done;
|
||||
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
|
||||
b(done);
|
||||
|
||||
// Load object pointer into obj_reg %c_rarg3
|
||||
ldr(obj_reg, Address(lock_reg, obj_offset));
|
||||
bind(slow_case);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
|
||||
b(done);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Call the runtime routine for slow case
|
||||
call_VM_preemptable(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||
lock_reg);
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, obj_reg);
|
||||
ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
|
||||
tst(tmp, KlassFlags::_misc_is_value_based_class);
|
||||
br(Assembler::NE, slow_case);
|
||||
}
|
||||
|
||||
// Load (object->mark() | 1) into swap_reg
|
||||
ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
orr(swap_reg, rscratch1, 1);
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
str(swap_reg, Address(lock_reg, mark_offset));
|
||||
|
||||
assert(lock_offset == 0,
|
||||
"displached header must be first word in BasicObjectLock");
|
||||
|
||||
Label fail;
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Fast check for recursive lock.
|
||||
//
|
||||
// Can apply the optimization only if this is a stack lock
|
||||
// allocated in this thread. For efficiency, we can focus on
|
||||
// recently allocated stack locks (instead of reading the stack
|
||||
// base and checking whether 'mark' points inside the current
|
||||
// thread stack):
|
||||
// 1) (mark & 7) == 0, and
|
||||
// 2) sp <= mark < mark + os::pagesize()
|
||||
//
|
||||
// Warning: sp + os::pagesize can overflow the stack base. We must
|
||||
// neither apply the optimization for an inflated lock allocated
|
||||
// just above the thread stack (this is why condition 1 matters)
|
||||
// nor apply the optimization if the stack lock is inside the stack
|
||||
// of another thread. The latter is avoided even in case of overflow
|
||||
// because we have guard pages at the end of all stacks. Hence, if
|
||||
// we go over the stack base and hit the stack of another thread,
|
||||
// this should not be in a writeable area that could contain a
|
||||
// stack lock allocated by that thread. As a consequence, a stack
|
||||
// lock less than page size away from sp is guaranteed to be
|
||||
// owned by the current thread.
|
||||
//
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - sp) & (7 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 3 bits clear.
|
||||
// NOTE: the mark is in swap_reg %r0 as the result of cmpxchg
|
||||
// NOTE2: aarch64 does not like to subtract sp from rn so take a
|
||||
// copy
|
||||
mov(rscratch1, sp);
|
||||
sub(swap_reg, swap_reg, rscratch1);
|
||||
ands(swap_reg, swap_reg, (uint64_t)(7 - (int)os::vm_page_size()));
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
str(swap_reg, Address(lock_reg, mark_offset));
|
||||
br(Assembler::NE, slow_case);
|
||||
|
||||
bind(count);
|
||||
inc_held_monitor_count(rscratch1);
|
||||
b(done);
|
||||
}
|
||||
bind(slow_case);
|
||||
|
||||
// Call the runtime routine for slow case
|
||||
call_VM_preemptable(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
|
||||
lock_reg);
|
||||
|
||||
bind(done);
|
||||
}
|
||||
bind(done);
|
||||
}
|
||||
|
||||
|
||||
@@ -807,57 +730,29 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
||||
{
|
||||
assert(lock_reg == c_rarg1, "The argument is only for looks. It must be rarg1");
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
||||
} else {
|
||||
Label count, done;
|
||||
const Register swap_reg = r0;
|
||||
const Register header_reg = c_rarg2; // Will contain the old oopMark
|
||||
const Register obj_reg = c_rarg3; // Will contain the oop
|
||||
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
|
||||
|
||||
const Register swap_reg = r0;
|
||||
const Register header_reg = c_rarg2; // Will contain the old oopMark
|
||||
const Register obj_reg = c_rarg3; // Will contain the oop
|
||||
const Register tmp_reg = c_rarg4; // Temporary used by lightweight_unlock
|
||||
save_bcp(); // Save in case of exception
|
||||
|
||||
save_bcp(); // Save in case of exception
|
||||
// Load oop into obj_reg(%c_rarg3)
|
||||
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||
|
||||
if (LockingMode != LM_LIGHTWEIGHT) {
|
||||
// Convert from BasicObjectLock structure to object and BasicLock
|
||||
// structure Store the BasicLock address into %r0
|
||||
lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset()));
|
||||
}
|
||||
// Free entry
|
||||
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||
|
||||
// Load oop into obj_reg(%c_rarg3)
|
||||
ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||
Label slow_case, done;
|
||||
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
|
||||
b(done);
|
||||
|
||||
// Free entry
|
||||
str(zr, Address(lock_reg, BasicObjectLock::obj_offset()));
|
||||
|
||||
Label slow_case;
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
|
||||
b(done);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Load the old header from BasicLock structure
|
||||
ldr(header_reg, Address(swap_reg,
|
||||
BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
// Test for recursion
|
||||
cbz(header_reg, count);
|
||||
|
||||
// Atomic swap back the old header
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, rscratch1, count, &slow_case);
|
||||
|
||||
bind(count);
|
||||
dec_held_monitor_count(rscratch1);
|
||||
b(done);
|
||||
}
|
||||
|
||||
bind(slow_case);
|
||||
// Call the runtime routine for slow case.
|
||||
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
||||
bind(done);
|
||||
restore_bcp();
|
||||
}
|
||||
bind(slow_case);
|
||||
// Call the runtime routine for slow case.
|
||||
str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset())); // restore obj
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
|
||||
bind(done);
|
||||
restore_bcp();
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
|
||||
|
||||
@@ -2259,7 +2259,7 @@ void MacroAssembler::movptr(Register r, uintptr_t imm64) {
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buffer[64];
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64);
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
@@ -2317,7 +2317,7 @@ void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64)
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buffer[64];
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX64, imm64);
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
@@ -2430,7 +2430,7 @@ void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32)
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buffer[64];
|
||||
snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "0x%" PRIX32, imm32);
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
@@ -2902,11 +2902,11 @@ int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode m
|
||||
{
|
||||
char buffer[48];
|
||||
if (mode == PushPopSVE) {
|
||||
snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d SVE registers", count);
|
||||
} else if (mode == PushPopNeon) {
|
||||
snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d Neon registers", count);
|
||||
} else {
|
||||
snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "push_fp: %d fp registers", count);
|
||||
}
|
||||
block_comment(buffer);
|
||||
}
|
||||
@@ -3014,11 +3014,11 @@ int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mo
|
||||
{
|
||||
char buffer[48];
|
||||
if (mode == PushPopSVE) {
|
||||
snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count);
|
||||
} else if (mode == PushPopNeon) {
|
||||
snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count);
|
||||
} else {
|
||||
snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "pop_fp: %d fp registers", count);
|
||||
}
|
||||
block_comment(buffer);
|
||||
}
|
||||
@@ -5920,7 +5920,7 @@ address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3,
|
||||
{
|
||||
const char kind = (elem_size == 2) ? 'U' : 'L';
|
||||
char comment[64];
|
||||
snprintf(comment, sizeof comment, "array_equals%c{", kind);
|
||||
os::snprintf_checked(comment, sizeof comment, "array_equals%c{", kind);
|
||||
BLOCK_COMMENT(comment);
|
||||
}
|
||||
#endif
|
||||
@@ -6118,7 +6118,7 @@ void MacroAssembler::string_equals(Register a1, Register a2,
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char comment[64];
|
||||
snprintf(comment, sizeof comment, "{string_equalsL");
|
||||
os::snprintf_checked(comment, sizeof comment, "{string_equalsL");
|
||||
BLOCK_COMMENT(comment);
|
||||
}
|
||||
#endif
|
||||
@@ -6266,7 +6266,7 @@ address MacroAssembler::zero_words(Register base, uint64_t cnt)
|
||||
#ifndef PRODUCT
|
||||
{
|
||||
char buf[64];
|
||||
snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
|
||||
os::snprintf_checked(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt);
|
||||
BLOCK_COMMENT(buf);
|
||||
}
|
||||
#endif
|
||||
@@ -6421,10 +6421,14 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
|
||||
|
||||
// Intrinsic for
|
||||
//
|
||||
// - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray
|
||||
// return the number of characters copied.
|
||||
// - java/lang/StringUTF16.compress
|
||||
// return index of non-latin1 character if copy fails, otherwise 'len'.
|
||||
// - sun.nio.cs.ISO_8859_1.Encoder#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes char[] to byte[] in ISO-8859-1
|
||||
//
|
||||
// - java.lang.StringCoding#encodeISOArray0(byte[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes byte[] (containing UTF-16) to byte[] in ISO-8859-1
|
||||
//
|
||||
// - java.lang.StringCoding#encodeAsciiArray0(char[] sa, int sp, byte[] da, int dp, int len)
|
||||
// Encodes char[] to byte[] in ASCII
|
||||
//
|
||||
// This version always returns the number of characters copied, and does not
|
||||
// clobber the 'len' register. A successful copy will complete with the post-
|
||||
@@ -7097,7 +7101,6 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
|
||||
// - t1, t2, t3: temporary registers, will be destroyed
|
||||
// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
|
||||
void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1);
|
||||
|
||||
Label push;
|
||||
@@ -7157,7 +7160,6 @@ void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Registe
|
||||
// - t1, t2, t3: temporary registers
|
||||
// - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
|
||||
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
// cmpxchg clobbers rscratch1.
|
||||
assert_different_registers(obj, t1, t2, t3, rscratch1);
|
||||
|
||||
|
||||
@@ -1721,7 +1721,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// We use the same pc/oopMap repeatedly when we call out.
|
||||
|
||||
Label native_return;
|
||||
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||
if (method->is_object_wait0()) {
|
||||
// For convenience we use the pc we want to resume to in case of preemption on Object.wait.
|
||||
__ set_last_Java_frame(sp, noreg, native_return, rscratch1);
|
||||
} else {
|
||||
@@ -1776,44 +1776,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Load the oop from the handle
|
||||
__ ldr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
__ b(slow_path_lock);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Load (object->mark() | 1) into swap_reg %r0
|
||||
__ ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
|
||||
__ orr(swap_reg, rscratch1, 1);
|
||||
|
||||
// Save (object->mark() | 1) into BasicLock's displaced header
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
|
||||
// src -> dest iff dest == r0 else r0 <- dest
|
||||
__ cmpxchg_obj_header(r0, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Hmm should this move to the slow path code area???
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
// 2) sp <= mark < mark + os::pagesize()
|
||||
// These 3 tests can be done by evaluating the following
|
||||
// expression: ((mark - sp) & (3 - os::vm_page_size())),
|
||||
// assuming both stack pointer and pagesize have their
|
||||
// least significant 2 bits clear.
|
||||
// NOTE: the oopMark is in swap_reg %r0 as the result of cmpxchg
|
||||
|
||||
__ sub(swap_reg, sp, swap_reg);
|
||||
__ neg(swap_reg, swap_reg);
|
||||
__ ands(swap_reg, swap_reg, 3 - (int)os::vm_page_size());
|
||||
|
||||
// Save the test result, for recursive case, the result is zero
|
||||
__ str(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
__ br(Assembler::NE, slow_path_lock);
|
||||
|
||||
__ bind(count);
|
||||
__ inc_held_monitor_count(rscratch1);
|
||||
} else {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
|
||||
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
|
||||
}
|
||||
__ lightweight_lock(lock_reg, obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
|
||||
|
||||
// Slow path will re-enter here
|
||||
__ bind(lock_done);
|
||||
@@ -1888,7 +1851,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||
if (method->is_object_wait0()) {
|
||||
// Check preemption for Object.wait()
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||
__ cbz(rscratch1, native_return);
|
||||
@@ -1917,48 +1880,18 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Get locked oop from the handle we passed to jni
|
||||
__ ldr(obj_reg, Address(oop_handle_reg, 0));
|
||||
|
||||
Label done, not_recursive;
|
||||
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
// Simple recursive lock?
|
||||
__ ldr(rscratch1, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
__ cbnz(rscratch1, not_recursive);
|
||||
__ dec_held_monitor_count(rscratch1);
|
||||
__ b(done);
|
||||
}
|
||||
|
||||
__ bind(not_recursive);
|
||||
|
||||
// Must save r0 if if it is live now because cmpxchg must use it
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
save_native_result(masm, ret_type, stack_slots);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
__ b(slow_path_unlock);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// get address of the stack lock
|
||||
__ lea(r0, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
|
||||
// get old displaced header
|
||||
__ ldr(old_hdr, Address(r0, 0));
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
Label count;
|
||||
__ cmpxchg_obj_header(r0, old_hdr, obj_reg, rscratch1, count, &slow_path_unlock);
|
||||
__ bind(count);
|
||||
__ dec_held_monitor_count(rscratch1);
|
||||
} else {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "");
|
||||
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
|
||||
}
|
||||
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
|
||||
|
||||
// slow path re-enters here
|
||||
__ bind(unlock_done);
|
||||
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
|
||||
restore_native_result(masm, ret_type, stack_slots);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
Label dtrace_method_exit, dtrace_method_exit_done;
|
||||
|
||||
@@ -2732,8 +2732,11 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address entry_jlong_arraycopy;
|
||||
address entry_checkcast_arraycopy;
|
||||
|
||||
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
|
||||
// generate the common exit first so later stubs can rely on it if
|
||||
// they want an UnsafeMemoryAccess exit non-local to the stub
|
||||
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
|
||||
// register the stub as the default exit with class UnsafeMemoryAccess
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
|
||||
|
||||
generate_copy_longs(StubId::stubgen_copy_byte_f_id, IN_HEAP | IS_ARRAY, copy_f, r0, r1, r15);
|
||||
generate_copy_longs(StubId::stubgen_copy_byte_b_id, IN_HEAP | IS_ARRAY, copy_b, r0, r1, r15);
|
||||
@@ -11680,8 +11683,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
// set table address before stub generation which use it
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::aarch64::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32();
|
||||
}
|
||||
|
||||
|
||||
@@ -71,6 +71,10 @@ ATTRIBUTE_ALIGNED(64) uint32_t StubRoutines::aarch64::_dilithiumConsts[] =
|
||||
/**
|
||||
* crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h
|
||||
*/
|
||||
|
||||
address StubRoutines::crc_table_addr() { return (address)StubRoutines::aarch64::_crc_table; }
|
||||
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }
|
||||
|
||||
ATTRIBUTE_ALIGNED(4096) juint StubRoutines::aarch64::_crc_table[] =
|
||||
{
|
||||
// Table 0
|
||||
|
||||
@@ -47,6 +47,7 @@ enum platform_dependent_constants {
|
||||
|
||||
class aarch64 {
|
||||
friend class StubGenerator;
|
||||
friend class StubRoutines;
|
||||
#if INCLUDE_JVMCI
|
||||
friend class JVMCIVMStructs;
|
||||
#endif
|
||||
|
||||
@@ -1478,22 +1478,17 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ lea(rscratch2, Address(rthread, JavaThread::thread_state_offset()));
|
||||
__ stlrw(rscratch1, rscratch2);
|
||||
|
||||
if (LockingMode != LM_LEGACY) {
|
||||
// Check preemption for Object.wait()
|
||||
Label not_preempted;
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||
__ cbz(rscratch1, not_preempted);
|
||||
__ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||
__ br(rscratch1);
|
||||
__ bind(native_return);
|
||||
__ restore_after_resume(true /* is_native */);
|
||||
// reload result_handler
|
||||
__ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
|
||||
__ bind(not_preempted);
|
||||
} else {
|
||||
// any pc will do so just use this one for LM_LEGACY to keep code together.
|
||||
__ bind(native_return);
|
||||
}
|
||||
// Check preemption for Object.wait()
|
||||
Label not_preempted;
|
||||
__ ldr(rscratch1, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||
__ cbz(rscratch1, not_preempted);
|
||||
__ str(zr, Address(rthread, JavaThread::preempt_alternate_return_offset()));
|
||||
__ br(rscratch1);
|
||||
__ bind(native_return);
|
||||
__ restore_after_resume(true /* is_native */);
|
||||
// reload result_handler
|
||||
__ ldr(result_handler, Address(rfp, frame::interpreter_frame_result_handler_offset*wordSize));
|
||||
__ bind(not_preempted);
|
||||
|
||||
// reset_last_Java_frame
|
||||
__ reset_last_Java_frame(true);
|
||||
|
||||
@@ -32,6 +32,7 @@
|
||||
#include "runtime/vm_version.hpp"
|
||||
#include "utilities/formatBuffer.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
int VM_Version::_cpu;
|
||||
int VM_Version::_model;
|
||||
@@ -50,6 +51,8 @@ uintptr_t VM_Version::_pac_mask;
|
||||
|
||||
SpinWait VM_Version::_spin_wait;
|
||||
|
||||
const char* VM_Version::_features_names[MAX_CPU_FEATURES] = { nullptr };
|
||||
|
||||
static SpinWait get_spin_wait_desc() {
|
||||
SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount);
|
||||
if (spin_wait.inst() == SpinWait::SB && !VM_Version::supports_sb()) {
|
||||
@@ -60,6 +63,11 @@ static SpinWait get_spin_wait_desc() {
|
||||
}
|
||||
|
||||
void VM_Version::initialize() {
|
||||
#define SET_CPU_FEATURE_NAME(id, name, bit) \
|
||||
_features_names[bit] = XSTR(name);
|
||||
CPU_FEATURE_FLAGS(SET_CPU_FEATURE_NAME)
|
||||
#undef SET_CPU_FEATURE_NAME
|
||||
|
||||
_supports_atomic_getset4 = true;
|
||||
_supports_atomic_getadd4 = true;
|
||||
_supports_atomic_getset8 = true;
|
||||
@@ -194,7 +202,7 @@ void VM_Version::initialize() {
|
||||
|
||||
// Cortex A53
|
||||
if (_cpu == CPU_ARM && model_is(0xd03)) {
|
||||
_features |= CPU_A53MAC;
|
||||
set_feature(CPU_A53MAC);
|
||||
if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
|
||||
FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
|
||||
}
|
||||
@@ -234,7 +242,7 @@ void VM_Version::initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
if (_features & (CPU_FP | CPU_ASIMD)) {
|
||||
if (supports_feature(CPU_FP) || supports_feature(CPU_ASIMD)) {
|
||||
if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
|
||||
FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
|
||||
}
|
||||
@@ -397,7 +405,7 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
|
||||
}
|
||||
|
||||
if (_features & CPU_ASIMD) {
|
||||
if (supports_feature(CPU_ASIMD)) {
|
||||
if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
|
||||
UseChaCha20Intrinsics = true;
|
||||
}
|
||||
@@ -408,7 +416,7 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
|
||||
}
|
||||
|
||||
if (_features & CPU_ASIMD) {
|
||||
if (supports_feature(CPU_ASIMD)) {
|
||||
if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) {
|
||||
UseKyberIntrinsics = true;
|
||||
}
|
||||
@@ -419,7 +427,7 @@ void VM_Version::initialize() {
|
||||
FLAG_SET_DEFAULT(UseKyberIntrinsics, false);
|
||||
}
|
||||
|
||||
if (_features & CPU_ASIMD) {
|
||||
if (supports_feature(CPU_ASIMD)) {
|
||||
if (FLAG_IS_DEFAULT(UseDilithiumIntrinsics)) {
|
||||
UseDilithiumIntrinsics = true;
|
||||
}
|
||||
@@ -620,32 +628,38 @@ void VM_Version::initialize() {
|
||||
|
||||
// Sync SVE related CPU features with flags
|
||||
if (UseSVE < 2) {
|
||||
_features &= ~CPU_SVE2;
|
||||
_features &= ~CPU_SVEBITPERM;
|
||||
clear_feature(CPU_SVE2);
|
||||
clear_feature(CPU_SVEBITPERM);
|
||||
}
|
||||
if (UseSVE < 1) {
|
||||
_features &= ~CPU_SVE;
|
||||
clear_feature(CPU_SVE);
|
||||
}
|
||||
|
||||
// Construct the "features" string
|
||||
char buf[512];
|
||||
int buf_used_len = os::snprintf_checked(buf, sizeof(buf), "0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
|
||||
stringStream ss(512);
|
||||
ss.print("0x%02x:0x%x:0x%03x:%d", _cpu, _variant, _model, _revision);
|
||||
if (_model2) {
|
||||
os::snprintf_checked(buf + buf_used_len, sizeof(buf) - buf_used_len, "(0x%03x)", _model2);
|
||||
ss.print("(0x%03x)", _model2);
|
||||
}
|
||||
size_t features_offset = strnlen(buf, sizeof(buf));
|
||||
#define ADD_FEATURE_IF_SUPPORTED(id, name, bit) \
|
||||
do { \
|
||||
if (VM_Version::supports_##name()) strcat(buf, ", " #name); \
|
||||
} while(0);
|
||||
CPU_FEATURE_FLAGS(ADD_FEATURE_IF_SUPPORTED)
|
||||
#undef ADD_FEATURE_IF_SUPPORTED
|
||||
ss.print(", ");
|
||||
int features_offset = (int)ss.size();
|
||||
insert_features_names(_features, ss);
|
||||
|
||||
_cpu_info_string = os::strdup(buf);
|
||||
_cpu_info_string = ss.as_string(true);
|
||||
_features_string = _cpu_info_string + features_offset;
|
||||
}
|
||||
|
||||
_features_string = extract_features_string(_cpu_info_string,
|
||||
strnlen(_cpu_info_string, sizeof(buf)),
|
||||
features_offset);
|
||||
void VM_Version::insert_features_names(uint64_t features, stringStream& ss) {
|
||||
int i = 0;
|
||||
ss.join([&]() {
|
||||
while (i < MAX_CPU_FEATURES) {
|
||||
if (supports_feature((VM_Version::Feature_Flag)i)) {
|
||||
return _features_names[i++];
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
return (const char*)nullptr;
|
||||
}, ", ");
|
||||
}
|
||||
|
||||
#if defined(LINUX)
|
||||
@@ -707,12 +721,12 @@ void VM_Version::initialize_cpu_information(void) {
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "AArch64");
|
||||
os::snprintf_checked(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "AArch64");
|
||||
|
||||
int desc_len = snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "AArch64 ");
|
||||
int desc_len = os::snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "AArch64 ");
|
||||
get_compatible_board(_cpu_desc + desc_len, CPU_DETAILED_DESC_BUF_SIZE - desc_len);
|
||||
desc_len = (int)strlen(_cpu_desc);
|
||||
snprintf(_cpu_desc + desc_len, CPU_DETAILED_DESC_BUF_SIZE - desc_len, " %s", _cpu_info_string);
|
||||
os::snprintf_checked(_cpu_desc + desc_len, CPU_DETAILED_DESC_BUF_SIZE - desc_len, " %s", _cpu_info_string);
|
||||
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
@@ -30,6 +30,10 @@
|
||||
#include "runtime/abstract_vm_version.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
|
||||
class stringStream;
|
||||
|
||||
#define BIT_MASK(flag) (1ULL<<(flag))
|
||||
|
||||
class VM_Version : public Abstract_VM_Version {
|
||||
friend class VMStructs;
|
||||
friend class JVMCIVMStructs;
|
||||
@@ -66,6 +70,8 @@ public:
|
||||
static void initialize();
|
||||
static void check_virtualizations();
|
||||
|
||||
static void insert_features_names(uint64_t features, stringStream& ss);
|
||||
|
||||
static void print_platform_virtualization_info(outputStream*);
|
||||
|
||||
// Asserts
|
||||
@@ -139,17 +145,32 @@ enum Ampere_CPU_Model {
|
||||
decl(A53MAC, a53mac, 31)
|
||||
|
||||
enum Feature_Flag {
|
||||
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1 << bit),
|
||||
#define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = bit,
|
||||
CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
|
||||
#undef DECLARE_CPU_FEATURE_FLAG
|
||||
MAX_CPU_FEATURES
|
||||
};
|
||||
|
||||
STATIC_ASSERT(sizeof(_features) * BitsPerByte >= MAX_CPU_FEATURES);
|
||||
|
||||
static const char* _features_names[MAX_CPU_FEATURES];
|
||||
|
||||
// Feature identification
|
||||
#define CPU_FEATURE_DETECTION(id, name, bit) \
|
||||
static bool supports_##name() { return (_features & CPU_##id) != 0; };
|
||||
static bool supports_##name() { return supports_feature(CPU_##id); }
|
||||
CPU_FEATURE_FLAGS(CPU_FEATURE_DETECTION)
|
||||
#undef CPU_FEATURE_DETECTION
|
||||
|
||||
static void set_feature(Feature_Flag flag) {
|
||||
_features |= BIT_MASK(flag);
|
||||
}
|
||||
static void clear_feature(Feature_Flag flag) {
|
||||
_features &= (~BIT_MASK(flag));
|
||||
}
|
||||
static bool supports_feature(Feature_Flag flag) {
|
||||
return (_features & BIT_MASK(flag)) != 0;
|
||||
}
|
||||
|
||||
static int cpu_family() { return _cpu; }
|
||||
static int cpu_model() { return _model; }
|
||||
static int cpu_model2() { return _model2; }
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -95,8 +95,6 @@
|
||||
}
|
||||
|
||||
static int adjust_reg_range(int range) {
|
||||
// Reduce the number of available regs (to free Rheap_base) in case of compressed oops
|
||||
if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
|
||||
return range;
|
||||
}
|
||||
|
||||
|
||||
@@ -2229,16 +2229,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// We don't know the array types are compatible
|
||||
if (basic_type != T_OBJECT) {
|
||||
// Simple test for basic type arrays
|
||||
if (UseCompressedClassPointers) {
|
||||
// We don't need decode because we just need to compare
|
||||
__ ldr_u32(tmp, Address(src, oopDesc::klass_offset_in_bytes()));
|
||||
__ ldr_u32(tmp2, Address(dst, oopDesc::klass_offset_in_bytes()));
|
||||
__ cmp_32(tmp, tmp2);
|
||||
} else {
|
||||
__ load_klass(tmp, src);
|
||||
__ load_klass(tmp2, dst);
|
||||
__ cmp(tmp, tmp2);
|
||||
}
|
||||
__ load_klass(tmp, src);
|
||||
__ load_klass(tmp2, dst);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ b(*stub->entry(), ne);
|
||||
} else {
|
||||
// For object arrays, if src is a sub class of dst then we can
|
||||
@@ -2433,13 +2426,7 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
Register hdr = op->hdr_opr()->as_pointer_register();
|
||||
Register lock = op->lock_opr()->as_pointer_register();
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
if (op->info() != nullptr) {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ null_check(obj);
|
||||
}
|
||||
__ b(*op->stub()->entry());
|
||||
} else if (op->code() == lir_lock) {
|
||||
if (op->code() == lir_lock) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
if (op->info() != nullptr) {
|
||||
@@ -2461,12 +2448,7 @@ void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
|
||||
if (info != nullptr) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
if (UseCompressedClassPointers) { // On 32 bit arm??
|
||||
__ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
} else {
|
||||
__ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
__ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
|
||||
void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
|
||||
@@ -177,18 +177,16 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len,
|
||||
}
|
||||
|
||||
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
Label done, fast_lock, fast_lock_done;
|
||||
int null_check_offset = 0;
|
||||
|
||||
const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
|
||||
assert_different_registers(hdr, obj, disp_hdr, tmp2);
|
||||
|
||||
assert(BasicObjectLock::lock_offset() == 0, "adjust this code");
|
||||
const ByteSize obj_offset = BasicObjectLock::obj_offset();
|
||||
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
// save object being locked into the BasicObjectLock
|
||||
str(obj, Address(disp_hdr, obj_offset));
|
||||
str(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
|
||||
|
||||
null_check_offset = offset();
|
||||
|
||||
@@ -199,95 +197,29 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
|
||||
Register t1 = disp_hdr; // Needs saving, probably
|
||||
Register t2 = hdr; // blow
|
||||
Register t3 = Rtemp; // blow
|
||||
|
||||
lightweight_lock(obj /* obj */, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
|
||||
// Success: fall through
|
||||
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as ether CAS or slow case path is taken in that case.
|
||||
|
||||
// Must be the first instruction here, because implicit null check relies on it
|
||||
ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
tst(hdr, markWord::unlocked_value);
|
||||
b(fast_lock, ne);
|
||||
|
||||
// Check for recursive locking
|
||||
// See comments in InterpreterMacroAssembler::lock_object for
|
||||
// explanations on the fast recursive locking check.
|
||||
// -1- test low 2 bits
|
||||
movs(tmp2, AsmOperand(hdr, lsl, 30));
|
||||
// -2- test (hdr - SP) if the low two bits are 0
|
||||
sub(tmp2, hdr, SP, eq);
|
||||
movs(tmp2, AsmOperand(tmp2, lsr, exact_log2(os::vm_page_size())), eq);
|
||||
// If still 'eq' then recursive locking OK
|
||||
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
|
||||
str(tmp2, Address(disp_hdr, mark_offset));
|
||||
b(fast_lock_done, eq);
|
||||
// else need slow case
|
||||
b(slow_case);
|
||||
|
||||
|
||||
bind(fast_lock);
|
||||
// Save previous object header in BasicLock structure and update the header
|
||||
str(hdr, Address(disp_hdr, mark_offset));
|
||||
|
||||
cas_for_lock_acquire(hdr, disp_hdr, obj, tmp2, slow_case);
|
||||
|
||||
bind(fast_lock_done);
|
||||
}
|
||||
bind(done);
|
||||
Register t1 = disp_hdr; // Needs saving, probably
|
||||
Register t2 = hdr; // blow
|
||||
Register t3 = Rtemp; // blow
|
||||
|
||||
lightweight_lock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
|
||||
// Success: fall through
|
||||
return null_check_offset;
|
||||
}
|
||||
|
||||
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
|
||||
assert_different_registers(hdr, obj, disp_hdr, Rtemp);
|
||||
Register tmp2 = Rtemp;
|
||||
|
||||
assert(BasicObjectLock::lock_offset() == 0, "adjust this code");
|
||||
const ByteSize obj_offset = BasicObjectLock::obj_offset();
|
||||
const int mark_offset = BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
Label done;
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "Required by atomic instructions");
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
ldr(obj, Address(disp_hdr, BasicObjectLock::obj_offset()));
|
||||
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
Register t1 = disp_hdr; // Needs saving, probably
|
||||
Register t2 = hdr; // blow
|
||||
Register t3 = Rtemp; // blow
|
||||
|
||||
Register t1 = disp_hdr; // Needs saving, probably
|
||||
Register t2 = hdr; // blow
|
||||
Register t3 = Rtemp; // blow
|
||||
|
||||
lightweight_unlock(obj /* object */, t1, t2, t3, 1 /* savemask (save t1) */,
|
||||
slow_case);
|
||||
// Success: Fall through
|
||||
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
// Load displaced header and object from the lock
|
||||
ldr(hdr, Address(disp_hdr, mark_offset));
|
||||
// If hdr is null, we've got recursive locking and there's nothing more to do
|
||||
cbz(hdr, done);
|
||||
|
||||
// load object
|
||||
ldr(obj, Address(disp_hdr, obj_offset));
|
||||
|
||||
// Restore the object header
|
||||
cas_for_lock_release(disp_hdr, hdr, obj, tmp2, slow_case);
|
||||
}
|
||||
bind(done);
|
||||
lightweight_unlock(obj, t1, t2, t3, 1 /* savemask - save t1 */, slow_case);
|
||||
// Success: fall through
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
@@ -81,7 +81,7 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
|
||||
assert(VM_Version::supports_ldrex(), "unsupported, yet?");
|
||||
assert_different_registers(Roop, Rbox, Rscratch, Rscratch2);
|
||||
|
||||
Label fast_lock, done;
|
||||
Label done;
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
@@ -90,43 +90,10 @@ void C2_MacroAssembler::fast_lock(Register Roop, Register Rbox, Register Rscratc
|
||||
b(done, ne);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
|
||||
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
||||
1 /* savemask (save t1) */, done);
|
||||
|
||||
// Success: set Z
|
||||
cmp(Roop, Roop);
|
||||
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
Register Rmark = Rscratch2;
|
||||
|
||||
ldr(Rmark, Address(Roop, oopDesc::mark_offset_in_bytes()));
|
||||
tst(Rmark, markWord::unlocked_value);
|
||||
b(fast_lock, ne);
|
||||
|
||||
// Check for recursive lock
|
||||
// See comments in InterpreterMacroAssembler::lock_object for
|
||||
// explanations on the fast recursive locking check.
|
||||
// -1- test low 2 bits
|
||||
movs(Rscratch, AsmOperand(Rmark, lsl, 30));
|
||||
// -2- test (hdr - SP) if the low two bits are 0
|
||||
sub(Rscratch, Rmark, SP, eq);
|
||||
movs(Rscratch, AsmOperand(Rscratch, lsr, exact_log2(os::vm_page_size())), eq);
|
||||
// If still 'eq' then recursive locking OK
|
||||
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8153107)
|
||||
str(Rscratch, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
|
||||
b(done);
|
||||
|
||||
bind(fast_lock);
|
||||
str(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
bool allow_fallthrough_on_failure = true;
|
||||
bool one_shot = true;
|
||||
cas_for_lock_acquire(Rmark, Rbox, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot);
|
||||
}
|
||||
lightweight_lock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
||||
1 /* savemask (save t1) */, done);
|
||||
|
||||
cmp(Roop, Roop); // Success: set Z
|
||||
bind(done);
|
||||
|
||||
// At this point flags are set as follows:
|
||||
@@ -140,29 +107,12 @@ void C2_MacroAssembler::fast_unlock(Register Roop, Register Rbox, Register Rscra
|
||||
|
||||
Label done;
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
||||
1 /* savemask (save t1) */, done);
|
||||
|
||||
lightweight_unlock(Roop /* obj */, Rbox /* t1 */, Rscratch /* t2 */, Rscratch2 /* t3 */,
|
||||
1 /* savemask (save t1) */, done);
|
||||
cmp(Roop, Roop); // Success: Set Z
|
||||
// Fall through
|
||||
|
||||
cmp(Roop, Roop); // Success: Set Z
|
||||
// Fall through
|
||||
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
Register Rmark = Rscratch2;
|
||||
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
ldr(Rmark, Address(Rbox, BasicLock::displaced_header_offset_in_bytes()));
|
||||
// If hdr is null, we've got recursive locking and there's nothing more to do
|
||||
cmp(Rmark, 0);
|
||||
b(done, eq);
|
||||
|
||||
// Restore the object header
|
||||
bool allow_fallthrough_on_failure = true;
|
||||
bool one_shot = true;
|
||||
cas_for_lock_release(Rbox, Rmark, Roop, Rscratch, done, allow_fallthrough_on_failure, one_shot);
|
||||
}
|
||||
bind(done);
|
||||
|
||||
// At this point flags are set as follows:
|
||||
|
||||
@@ -60,6 +60,10 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
@@ -888,105 +888,30 @@ void InterpreterMacroAssembler::set_do_not_unlock_if_synchronized(bool flag, Reg
|
||||
void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
assert(Rlock == R1, "the second argument");
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
|
||||
} else {
|
||||
Label done;
|
||||
const Register Robj = R2;
|
||||
const Register Rmark = R3;
|
||||
assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
|
||||
|
||||
const Register Robj = R2;
|
||||
const Register Rmark = R3;
|
||||
assert_different_registers(Robj, Rmark, Rlock, R0, Rtemp);
|
||||
Label done, slow_case;
|
||||
|
||||
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
|
||||
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
|
||||
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
|
||||
// Load object pointer
|
||||
ldr(Robj, Address(Rlock, BasicObjectLock::obj_offset()));
|
||||
|
||||
Label already_locked, slow_case;
|
||||
|
||||
// Load object pointer
|
||||
ldr(Robj, Address(Rlock, obj_offset));
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(R0, Robj);
|
||||
ldrb(R0, Address(R0, Klass::misc_flags_offset()));
|
||||
tst(R0, KlassFlags::_misc_is_value_based_class);
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
|
||||
b(done);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as ether CAS or slow case path is taken in that case.
|
||||
// Exception to that is if the object is locked by the calling thread, then the recursive test will pass (guaranteed as
|
||||
// loads are satisfied from a store queue if performed on the same processor).
|
||||
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "must be");
|
||||
ldr(Rmark, Address(Robj, oopDesc::mark_offset_in_bytes()));
|
||||
|
||||
// Test if object is already locked
|
||||
tst(Rmark, markWord::unlocked_value);
|
||||
b(already_locked, eq);
|
||||
|
||||
// Save old object->mark() into BasicLock's displaced header
|
||||
str(Rmark, Address(Rlock, mark_offset));
|
||||
|
||||
cas_for_lock_acquire(Rmark, Rlock, Robj, Rtemp, slow_case);
|
||||
|
||||
b(done);
|
||||
|
||||
// If we got here that means the object is locked by ether calling thread or another thread.
|
||||
bind(already_locked);
|
||||
// Handling of locked objects: recursive locks and slow case.
|
||||
|
||||
// Fast check for recursive lock.
|
||||
//
|
||||
// Can apply the optimization only if this is a stack lock
|
||||
// allocated in this thread. For efficiency, we can focus on
|
||||
// recently allocated stack locks (instead of reading the stack
|
||||
// base and checking whether 'mark' points inside the current
|
||||
// thread stack):
|
||||
// 1) (mark & 3) == 0
|
||||
// 2) SP <= mark < SP + os::pagesize()
|
||||
//
|
||||
// Warning: SP + os::pagesize can overflow the stack base. We must
|
||||
// neither apply the optimization for an inflated lock allocated
|
||||
// just above the thread stack (this is why condition 1 matters)
|
||||
// nor apply the optimization if the stack lock is inside the stack
|
||||
// of another thread. The latter is avoided even in case of overflow
|
||||
// because we have guard pages at the end of all stacks. Hence, if
|
||||
// we go over the stack base and hit the stack of another thread,
|
||||
// this should not be in a writeable area that could contain a
|
||||
// stack lock allocated by that thread. As a consequence, a stack
|
||||
// lock less than page size away from SP is guaranteed to be
|
||||
// owned by the current thread.
|
||||
//
|
||||
// Note: assuming SP is aligned, we can check the low bits of
|
||||
// (mark-SP) instead of the low bits of mark. In that case,
|
||||
// assuming page size is a power of 2, we can merge the two
|
||||
// conditions into a single test:
|
||||
// => ((mark - SP) & (3 - os::pagesize())) == 0
|
||||
|
||||
// (3 - os::pagesize()) cannot be encoded as an ARM immediate operand.
|
||||
// Check independently the low bits and the distance to SP.
|
||||
// -1- test low 2 bits
|
||||
movs(R0, AsmOperand(Rmark, lsl, 30));
|
||||
// -2- test (mark - SP) if the low two bits are 0
|
||||
sub(R0, Rmark, SP, eq);
|
||||
movs(R0, AsmOperand(R0, lsr, exact_log2(os::vm_page_size())), eq);
|
||||
// If still 'eq' then recursive locking OK: store 0 into lock record
|
||||
str(R0, Address(Rlock, mark_offset), eq);
|
||||
|
||||
b(done, eq);
|
||||
}
|
||||
|
||||
bind(slow_case);
|
||||
|
||||
// Call the runtime routine for slow case
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
|
||||
bind(done);
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(R0, Robj);
|
||||
ldrb(R0, Address(R0, Klass::misc_flags_offset()));
|
||||
tst(R0, KlassFlags::_misc_is_value_based_class);
|
||||
b(slow_case, ne);
|
||||
}
|
||||
|
||||
lightweight_lock(Robj, R0 /* t1 */, Rmark /* t2 */, Rtemp /* t3 */, 0 /* savemask */, slow_case);
|
||||
b(done);
|
||||
|
||||
bind(slow_case);
|
||||
|
||||
// Call the runtime routine for slow case
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), Rlock);
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
|
||||
@@ -997,65 +922,39 @@ void InterpreterMacroAssembler::lock_object(Register Rlock) {
|
||||
void InterpreterMacroAssembler::unlock_object(Register Rlock) {
|
||||
assert(Rlock == R0, "the first argument");
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
|
||||
} else {
|
||||
Label done, slow_case;
|
||||
Label done, slow_case;
|
||||
|
||||
const Register Robj = R2;
|
||||
const Register Rmark = R3;
|
||||
assert_different_registers(Robj, Rmark, Rlock, Rtemp);
|
||||
const Register Robj = R2;
|
||||
const Register Rmark = R3;
|
||||
assert_different_registers(Robj, Rmark, Rlock, Rtemp);
|
||||
|
||||
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
|
||||
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
|
||||
const int mark_offset = lock_offset + BasicLock::displaced_header_offset_in_bytes();
|
||||
const int obj_offset = in_bytes(BasicObjectLock::obj_offset());
|
||||
const Register Rzero = zero_register(Rtemp);
|
||||
|
||||
const Register Rzero = zero_register(Rtemp);
|
||||
// Load oop into Robj
|
||||
ldr(Robj, Address(Rlock, obj_offset));
|
||||
|
||||
// Load oop into Robj
|
||||
ldr(Robj, Address(Rlock, obj_offset));
|
||||
// Free entry
|
||||
str(Rzero, Address(Rlock, obj_offset));
|
||||
|
||||
// Free entry
|
||||
str(Rzero, Address(Rlock, obj_offset));
|
||||
// Check for non-symmetric locking. This is allowed by the spec and the interpreter
|
||||
// must handle it.
|
||||
ldr(Rtemp, Address(Rthread, JavaThread::lock_stack_top_offset()));
|
||||
sub(Rtemp, Rtemp, oopSize);
|
||||
ldr(Rtemp, Address(Rthread, Rtemp));
|
||||
cmpoop(Rtemp, Robj);
|
||||
b(slow_case, ne);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
|
||||
1 /* savemask (save t1) */, slow_case);
|
||||
b(done);
|
||||
|
||||
// Check for non-symmetric locking. This is allowed by the spec and the interpreter
|
||||
// must handle it.
|
||||
ldr(Rtemp, Address(Rthread, JavaThread::lock_stack_top_offset()));
|
||||
sub(Rtemp, Rtemp, oopSize);
|
||||
ldr(Rtemp, Address(Rthread, Rtemp));
|
||||
cmpoop(Rtemp, Robj);
|
||||
b(slow_case, ne);
|
||||
bind(slow_case);
|
||||
// Call the runtime routine for slow case.
|
||||
str(Robj, Address(Rlock, obj_offset)); // restore obj
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
|
||||
|
||||
lightweight_unlock(Robj /* obj */, Rlock /* t1 */, Rmark /* t2 */, Rtemp /* t3 */,
|
||||
1 /* savemask (save t1) */, slow_case);
|
||||
|
||||
b(done);
|
||||
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
// Load the old header from BasicLock structure
|
||||
ldr(Rmark, Address(Rlock, mark_offset));
|
||||
|
||||
// Test for recursion (zero mark in BasicLock)
|
||||
cbz(Rmark, done);
|
||||
|
||||
bool allow_fallthrough_on_failure = true;
|
||||
|
||||
cas_for_lock_release(Rlock, Rmark, Robj, Rtemp, slow_case, allow_fallthrough_on_failure);
|
||||
|
||||
b(done, eq);
|
||||
|
||||
}
|
||||
bind(slow_case);
|
||||
|
||||
// Call the runtime routine for slow case.
|
||||
str(Robj, Address(Rlock, obj_offset)); // restore obj
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), Rlock);
|
||||
|
||||
bind(done);
|
||||
}
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// Test ImethodDataPtr. If it is null, continue at the specified label
|
||||
|
||||
@@ -839,7 +839,7 @@ void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file,
|
||||
char buffer[64];
|
||||
#ifdef COMPILER1
|
||||
if (CommentedAssembly) {
|
||||
snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
|
||||
os::snprintf_checked(buffer, sizeof(buffer), "verify_oop at %d", offset());
|
||||
block_comment(buffer);
|
||||
}
|
||||
#endif
|
||||
@@ -1758,7 +1758,6 @@ void MacroAssembler::read_polling_page(Register dest, relocInfo::relocType rtype
|
||||
// - Success: fallthrough
|
||||
// - Error: break to slow, Z cleared.
|
||||
void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
assert_different_registers(obj, t1, t2, t3);
|
||||
|
||||
#ifdef ASSERT
|
||||
@@ -1816,7 +1815,6 @@ void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Re
|
||||
// - Success: fallthrough
|
||||
// - Error: break to slow, Z cleared.
|
||||
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, unsigned savemask, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
assert_different_registers(obj, t1, t2, t3);
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
@@ -1139,41 +1139,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// Remember the handle for the unlocking code
|
||||
__ mov(sync_handle, R1);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
log_trace(fastlock)("SharedRuntime lock fast");
|
||||
__ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
||||
0x7 /* savemask */, slow_lock);
|
||||
log_trace(fastlock)("SharedRuntime lock fast");
|
||||
__ lightweight_lock(sync_obj /* object */, disp_hdr /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
||||
0x7 /* savemask */, slow_lock);
|
||||
// Fall through to lock_done
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
const Register mark = tmp;
|
||||
// On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
|
||||
// That would be acceptable as either CAS or slow case path is taken in that case
|
||||
|
||||
__ ldr(mark, Address(sync_obj, oopDesc::mark_offset_in_bytes()));
|
||||
__ sub(disp_hdr, FP, lock_slot_fp_offset);
|
||||
__ tst(mark, markWord::unlocked_value);
|
||||
__ b(fast_lock, ne);
|
||||
|
||||
// Check for recursive lock
|
||||
// See comments in InterpreterMacroAssembler::lock_object for
|
||||
// explanations on the fast recursive locking check.
|
||||
// Check independently the low bits and the distance to SP
|
||||
// -1- test low 2 bits
|
||||
__ movs(Rtemp, AsmOperand(mark, lsl, 30));
|
||||
// -2- test (hdr - SP) if the low two bits are 0
|
||||
__ sub(Rtemp, mark, SP, eq);
|
||||
__ movs(Rtemp, AsmOperand(Rtemp, lsr, exact_log2(os::vm_page_size())), eq);
|
||||
// If still 'eq' then recursive locking OK
|
||||
// set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
|
||||
__ str(Rtemp, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
|
||||
__ b(lock_done, eq);
|
||||
__ b(slow_lock);
|
||||
|
||||
__ bind(fast_lock);
|
||||
__ str(mark, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
|
||||
|
||||
__ cas_for_lock_acquire(mark, disp_hdr, sync_obj, Rtemp, slow_lock);
|
||||
}
|
||||
__ bind(lock_done);
|
||||
}
|
||||
|
||||
@@ -1226,21 +1195,11 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
Label slow_unlock, unlock_done;
|
||||
if (method->is_synchronized()) {
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
log_trace(fastlock)("SharedRuntime unlock fast");
|
||||
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
||||
7 /* savemask */, slow_unlock);
|
||||
// Fall through
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// See C1_MacroAssembler::unlock_object() for more comments
|
||||
__ ldr(sync_obj, Address(sync_handle));
|
||||
log_trace(fastlock)("SharedRuntime unlock fast");
|
||||
__ lightweight_unlock(sync_obj, R2 /* t1 */, tmp /* t2 */, Rtemp /* t3 */,
|
||||
7 /* savemask */, slow_unlock);
|
||||
// Fall through
|
||||
|
||||
// See C1_MacroAssembler::unlock_object() for more comments
|
||||
__ ldr(R2, Address(disp_hdr, BasicLock::displaced_header_offset_in_bytes()));
|
||||
__ cbz(R2, unlock_done);
|
||||
|
||||
__ cas_for_lock_release(disp_hdr, R2, sync_obj, Rtemp, slow_unlock);
|
||||
}
|
||||
__ bind(unlock_done);
|
||||
}
|
||||
|
||||
|
||||
@@ -3001,12 +3001,15 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
|
||||
// generate the common exit first so later stubs can rely on it if
|
||||
// they want an UnsafeMemoryAccess exit non-local to the stub
|
||||
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
|
||||
// register the stub as the default exit with class UnsafeMemoryAccess
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
|
||||
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
|
||||
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
|
||||
|
||||
// these need always status in case they are called from generic_arraycopy
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubId::stubgen_jshort_disjoint_arraycopy_id);
|
||||
|
||||
@@ -36,3 +36,6 @@ STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
|
||||
address StubRoutines::crc_table_addr() { ShouldNotCallThis(); return nullptr; }
|
||||
address StubRoutines::crc32c_table_addr() { ShouldNotCallThis(); return nullptr; }
|
||||
|
||||
@@ -174,6 +174,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
break;
|
||||
case Interpreter::java_lang_math_fmaD:
|
||||
case Interpreter::java_lang_math_fmaF:
|
||||
case Interpreter::java_lang_math_sinh:
|
||||
case Interpreter::java_lang_math_tanh:
|
||||
case Interpreter::java_lang_math_cbrt:
|
||||
// TODO: Implement intrinsic
|
||||
|
||||
@@ -362,7 +362,7 @@ void VM_Version::initialize_cpu_information(void) {
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "ARM%d", _arm_arch);
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "%s", _cpu_info_string);
|
||||
os::snprintf_checked(_cpu_name, CPU_TYPE_DESC_BUF_SIZE - 1, "ARM%d", _arm_arch);
|
||||
os::snprintf_checked(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "%s", _cpu_info_string);
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
@@ -228,11 +228,7 @@ int LIR_Assembler::emit_unwind_handler() {
|
||||
if (method()->is_synchronized()) {
|
||||
monitor_address(0, FrameMap::R4_opr);
|
||||
stub = new MonitorExitStub(FrameMap::R4_opr, true, 0);
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
__ b(*stub->entry());
|
||||
} else {
|
||||
__ unlock_object(R5, R6, R4, *stub->entry());
|
||||
}
|
||||
__ unlock_object(R5, R6, R4, *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
@@ -2618,44 +2614,20 @@ void LIR_Assembler::emit_lock(LIR_OpLock* op) {
|
||||
// Obj may not be an oop.
|
||||
if (op->code() == lir_lock) {
|
||||
MonitorEnterStub* stub = (MonitorEnterStub*)op->stub();
|
||||
if (LockingMode != LM_MONITOR) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// Add debug info for NullPointerException only if one is possible.
|
||||
if (op->info() != nullptr) {
|
||||
if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
|
||||
explicit_null_check(obj, op->info());
|
||||
} else {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
}
|
||||
}
|
||||
__ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
|
||||
} else {
|
||||
// always do slow locking
|
||||
// note: The slow locking code could be inlined here, however if we use
|
||||
// slow locking, speed doesn't matter anyway and this solution is
|
||||
// simpler and requires less duplicated code - additionally, the
|
||||
// slow locking code is the same in either case which simplifies
|
||||
// debugging.
|
||||
if (op->info() != nullptr) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
// Add debug info for NullPointerException only if one is possible.
|
||||
if (op->info() != nullptr) {
|
||||
if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
|
||||
explicit_null_check(obj, op->info());
|
||||
} else {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ null_check(obj);
|
||||
}
|
||||
__ b(*op->stub()->entry());
|
||||
}
|
||||
__ lock_object(hdr, obj, lock, op->scratch_opr()->as_register(), *op->stub()->entry());
|
||||
} else {
|
||||
assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
|
||||
if (LockingMode != LM_MONITOR) {
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
} else {
|
||||
// always do slow unlocking
|
||||
// note: The slow unlocking code could be inlined here, however if we use
|
||||
// slow unlocking, speed doesn't matter anyway and this solution is
|
||||
// simpler and requires less duplicated code - additionally, the
|
||||
// slow unlocking code is the same in either case which simplifies
|
||||
// debugging.
|
||||
__ b(*op->stub()->entry());
|
||||
}
|
||||
assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
|
||||
__ unlock_object(hdr, obj, lock, *op->stub()->entry());
|
||||
}
|
||||
__ bind(*op->stub()->continuation());
|
||||
}
|
||||
|
||||
@@ -82,59 +82,13 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
// Save object being locked into the BasicObjectLock...
|
||||
std(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
lbz(Rscratch, in_bytes(Klass::misc_flags_offset()), Rscratch);
|
||||
testbitdi(CR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CR0, slow_int);
|
||||
}
|
||||
|
||||
// ... and mark it unlocked.
|
||||
ori(Rmark, Rmark, markWord::unlocked_value);
|
||||
|
||||
// Save unlocked object header into the displaced header location on the stack.
|
||||
std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
|
||||
// Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/Rscratch,
|
||||
/*compare_value=*/Rmark,
|
||||
/*exchange_value=*/Rbox,
|
||||
/*where=*/Roop/*+0==mark_offset_in_bytes*/,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg,
|
||||
&cas_failed,
|
||||
/*check without membar and ldarx first*/true);
|
||||
// If compare/exchange succeeded we found an unlocked object and we now have locked it
|
||||
// hence we are done.
|
||||
} else {
|
||||
assert(false, "Unhandled LockingMode:%d", LockingMode);
|
||||
}
|
||||
lightweight_lock(Rbox, Roop, Rmark, Rscratch, slow_int);
|
||||
b(done);
|
||||
|
||||
bind(slow_int);
|
||||
b(slow_case); // far
|
||||
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
bind(cas_failed);
|
||||
// We did not find an unlocked object so see if this is a recursive case.
|
||||
sub(Rscratch, Rscratch, R1_SP);
|
||||
load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
|
||||
and_(R0/*==0?*/, Rscratch, R0);
|
||||
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
bne(CR0, slow_int);
|
||||
}
|
||||
|
||||
bind(done);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
inc_held_monitor_count(Rmark /*tmp*/);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -146,43 +100,17 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||
Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
|
||||
assert(mark_addr.disp() == 0, "cas must take a zero displacement");
|
||||
|
||||
if (LockingMode != LM_LIGHTWEIGHT) {
|
||||
// Test first if it is a fast recursive unlock.
|
||||
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
cmpdi(CR0, Rmark, 0);
|
||||
beq(CR0, done);
|
||||
}
|
||||
|
||||
// Load object.
|
||||
ld(Roop, in_bytes(BasicObjectLock::obj_offset()), Rbox);
|
||||
verify_oop(Roop, FILE_AND_LINE);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_unlock(Roop, Rmark, slow_int);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/R0,
|
||||
/*compare_value=*/Rbox,
|
||||
/*exchange_value=*/Rmark,
|
||||
/*where=*/Roop,
|
||||
MacroAssembler::MemBarRel,
|
||||
MacroAssembler::cmpxchgx_hint_release_lock(),
|
||||
noreg,
|
||||
&slow_int);
|
||||
} else {
|
||||
assert(false, "Unhandled LockingMode:%d", LockingMode);
|
||||
}
|
||||
lightweight_unlock(Roop, Rmark, slow_int);
|
||||
b(done);
|
||||
bind(slow_int);
|
||||
b(slow_case); // far
|
||||
|
||||
// Done
|
||||
bind(done);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
dec_held_monitor_count(Rmark /*tmp*/);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -334,6 +334,9 @@ inline void FreezeBase::patch_pd(frame& hf, const frame& caller) {
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void FreezeBase::patch_pd_unused(intptr_t* sp) {
|
||||
}
|
||||
|
||||
//////// Thaw
|
||||
|
||||
// Fast path
|
||||
|
||||
@@ -311,7 +311,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
}
|
||||
|
||||
// Invoke runtime.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), pre_val, R16_thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), pre_val);
|
||||
|
||||
// Restore to-be-preserved registers.
|
||||
if (!preserve_gp_registers && preloaded_mode && pre_val->is_volatile()) {
|
||||
@@ -966,7 +966,7 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
__ push_frame_reg_args(nbytes_save, R11_tmp1);
|
||||
|
||||
// Invoke runtime.
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre), R0_pre_val, R16_thread);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_pre), R0_pre_val);
|
||||
|
||||
// Restore to-be-preserved registers.
|
||||
__ pop_frame();
|
||||
|
||||
@@ -946,121 +946,20 @@ void InterpreterMacroAssembler::leave_jfr_critical_section() {
|
||||
// object - Address of the object to be locked.
|
||||
//
|
||||
void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
||||
} else {
|
||||
// template code (for LM_LEGACY):
|
||||
//
|
||||
// markWord displaced_header = obj->mark().set_unlocked();
|
||||
// monitor->lock()->set_displaced_header(displaced_header);
|
||||
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
|
||||
// // We stored the monitor address into the object's mark word.
|
||||
// } else if (THREAD->is_lock_owned((address)displaced_header))
|
||||
// // Simple recursive case.
|
||||
// monitor->lock()->set_displaced_header(nullptr);
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorenter(THREAD, monitor);
|
||||
// }
|
||||
const Register header = R7_ARG5;
|
||||
const Register tmp = R8_ARG6;
|
||||
|
||||
const Register header = R7_ARG5;
|
||||
const Register object_mark_addr = R8_ARG6;
|
||||
const Register current_header = R9_ARG7;
|
||||
const Register tmp = R10_ARG8;
|
||||
Label done, slow_case;
|
||||
|
||||
Label count_locking, done, slow_case, cas_failed;
|
||||
assert_different_registers(header, tmp);
|
||||
|
||||
assert_different_registers(header, object_mark_addr, current_header, tmp);
|
||||
lightweight_lock(monitor, object, header, tmp, slow_case);
|
||||
b(done);
|
||||
|
||||
// markWord displaced_header = obj->mark().set_unlocked();
|
||||
bind(slow_case);
|
||||
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_lock(monitor, object, header, tmp, slow_case);
|
||||
b(done);
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, object);
|
||||
lbz(tmp, in_bytes(Klass::misc_flags_offset()), tmp);
|
||||
testbitdi(CR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CR0, slow_case);
|
||||
}
|
||||
|
||||
// Load markWord from object into header.
|
||||
ld(header, oopDesc::mark_offset_in_bytes(), object);
|
||||
|
||||
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
|
||||
ori(header, header, markWord::unlocked_value);
|
||||
|
||||
// monitor->lock()->set_displaced_header(displaced_header);
|
||||
const int lock_offset = in_bytes(BasicObjectLock::lock_offset());
|
||||
const int mark_offset = lock_offset +
|
||||
BasicLock::displaced_header_offset_in_bytes();
|
||||
|
||||
// Initialize the box (Must happen before we update the object mark!).
|
||||
std(header, mark_offset, monitor);
|
||||
|
||||
// if (Atomic::cmpxchg(/*addr*/obj->mark_addr(), /*cmp*/displaced_header, /*ex=*/monitor) == displaced_header) {
|
||||
|
||||
// Store stack address of the BasicObjectLock (this is monitor) into object.
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// CmpxchgX sets CR0 to cmpX(current, displaced).
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/header, /*exchange_value=*/monitor,
|
||||
/*where=*/object_mark_addr,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg,
|
||||
&cas_failed,
|
||||
/*check without membar and ldarx first*/true);
|
||||
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object and we have now locked it.
|
||||
b(count_locking);
|
||||
bind(cas_failed);
|
||||
|
||||
// } else if (THREAD->is_lock_owned((address)displaced_header))
|
||||
// // Simple recursive case.
|
||||
// monitor->lock()->set_displaced_header(nullptr);
|
||||
|
||||
// We did not see an unlocked object so try the fast recursive case.
|
||||
|
||||
// Check if owner is self by comparing the value in the markWord of object
|
||||
// (current_header) with the stack pointer.
|
||||
sub(current_header, current_header, R1_SP);
|
||||
|
||||
assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
|
||||
load_const_optimized(tmp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
|
||||
|
||||
and_(R0/*==0?*/, current_header, tmp);
|
||||
// If condition is true we are done and hence we can store 0 in the displaced
|
||||
// header indicating it is a recursive lock.
|
||||
bne(CR0, slow_case);
|
||||
std(R0/*==0!*/, mark_offset, monitor);
|
||||
b(count_locking);
|
||||
}
|
||||
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorenter(THREAD, monitor);
|
||||
|
||||
// None of the above fast optimizations worked so we have to get into the
|
||||
// slow case of monitor enter.
|
||||
bind(slow_case);
|
||||
call_VM_preemptable(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), monitor);
|
||||
// }
|
||||
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
b(done);
|
||||
align(32, 12);
|
||||
bind(count_locking);
|
||||
inc_held_monitor_count(current_header /*tmp*/);
|
||||
}
|
||||
bind(done);
|
||||
}
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// Unlocks an object. Used in monitorexit bytecode and remove_activation.
|
||||
@@ -1071,95 +970,34 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
//
|
||||
// Throw IllegalMonitorException if object is not locked by current thread.
|
||||
void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
|
||||
} else {
|
||||
const Register object = R7_ARG5;
|
||||
const Register header = R8_ARG6;
|
||||
const Register current_header = R10_ARG8;
|
||||
|
||||
// template code (for LM_LEGACY):
|
||||
//
|
||||
// if ((displaced_header = monitor->displaced_header()) == nullptr) {
|
||||
// // Recursive unlock. Mark the monitor unlocked by setting the object field to null.
|
||||
// monitor->set_obj(nullptr);
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(nullptr);
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorexit(monitor);
|
||||
// }
|
||||
Label free_slot;
|
||||
Label slow_case;
|
||||
|
||||
const Register object = R7_ARG5;
|
||||
const Register header = R8_ARG6;
|
||||
const Register object_mark_addr = R9_ARG7;
|
||||
const Register current_header = R10_ARG8;
|
||||
assert_different_registers(object, header, current_header);
|
||||
|
||||
Label free_slot;
|
||||
Label slow_case;
|
||||
// The object address from the monitor is in object.
|
||||
ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
|
||||
|
||||
assert_different_registers(object, header, object_mark_addr, current_header);
|
||||
lightweight_unlock(object, header, slow_case);
|
||||
|
||||
if (LockingMode != LM_LIGHTWEIGHT) {
|
||||
// Test first if we are in the fast recursive case.
|
||||
ld(header, in_bytes(BasicObjectLock::lock_offset()) +
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
b(free_slot);
|
||||
|
||||
// If the displaced header is zero, we have a recursive unlock.
|
||||
cmpdi(CR0, header, 0);
|
||||
beq(CR0, free_slot); // recursive unlock
|
||||
}
|
||||
bind(slow_case);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
|
||||
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
// // We swapped the unlocked mark in displaced_header into the object's mark word.
|
||||
// monitor->set_obj(nullptr);
|
||||
Label done;
|
||||
b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
|
||||
|
||||
// If we still have a lightweight lock, unlock the object and be done.
|
||||
|
||||
// The object address from the monitor is in object.
|
||||
ld(object, in_bytes(BasicObjectLock::obj_offset()), monitor);
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
lightweight_unlock(object, header, slow_case);
|
||||
} else {
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// We have the displaced header in displaced_header. If the lock is still
|
||||
// lightweight, it will contain the monitor address and we'll store the
|
||||
// displaced header back into the object's mark word.
|
||||
// CmpxchgX sets CR0 to cmpX(current, monitor).
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/monitor, /*exchange_value=*/header,
|
||||
/*where=*/object_mark_addr,
|
||||
MacroAssembler::MemBarRel,
|
||||
MacroAssembler::cmpxchgx_hint_release_lock(),
|
||||
noreg,
|
||||
&slow_case);
|
||||
}
|
||||
b(free_slot);
|
||||
|
||||
// } else {
|
||||
// // Slow path.
|
||||
// InterpreterRuntime::monitorexit(monitor);
|
||||
|
||||
// The lock has been converted into a heavy lock and hence
|
||||
// we need to get into the slow case.
|
||||
bind(slow_case);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), monitor);
|
||||
// }
|
||||
|
||||
Label done;
|
||||
b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
|
||||
|
||||
// Exchange worked, do monitor->set_obj(nullptr);
|
||||
align(32, 12);
|
||||
bind(free_slot);
|
||||
li(R0, 0);
|
||||
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
dec_held_monitor_count(current_header /*tmp*/);
|
||||
}
|
||||
bind(done);
|
||||
}
|
||||
// Do monitor->set_obj(nullptr);
|
||||
align(32, 12);
|
||||
bind(free_slot);
|
||||
li(R0, 0);
|
||||
std(R0, in_bytes(BasicObjectLock::obj_offset()), monitor);
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// Load compiled (i2c) or interpreter entry when calling from interpreted and
|
||||
|
||||
@@ -2671,238 +2671,6 @@ address MacroAssembler::emit_trampoline_stub(int destination_toc_offset,
|
||||
}
|
||||
|
||||
// "The box" is the space on the stack where we copy the object mark.
|
||||
void MacroAssembler::compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register temp, Register displaced_header, Register current_header) {
|
||||
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_lock_lightweight");
|
||||
assert_different_registers(oop, box, temp, displaced_header, current_header);
|
||||
Label object_has_monitor;
|
||||
Label cas_failed;
|
||||
Label success, failure;
|
||||
|
||||
// Load markWord from object into displaced_header.
|
||||
ld(displaced_header, oopDesc::mark_offset_in_bytes(), oop);
|
||||
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(temp, oop);
|
||||
lbz(temp, in_bytes(Klass::misc_flags_offset()), temp);
|
||||
testbitdi(flag, R0, temp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(flag, failure);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
// The object has an existing monitor iff (mark & monitor_value) != 0.
|
||||
andi_(temp, displaced_header, markWord::monitor_value);
|
||||
bne(CR0, object_has_monitor);
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
// Set NE to indicate 'failure' -> take slow-path.
|
||||
crandc(flag, Assembler::equal, flag, Assembler::equal);
|
||||
b(failure);
|
||||
} else {
|
||||
assert(LockingMode == LM_LEGACY, "must be");
|
||||
// Set displaced_header to be (markWord of object | UNLOCK_VALUE).
|
||||
ori(displaced_header, displaced_header, markWord::unlocked_value);
|
||||
|
||||
// Load Compare Value application register.
|
||||
|
||||
// Initialize the box. (Must happen before we update the object mark!)
|
||||
std(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// Compare object markWord with mark and if equal exchange scratch1 with object markWord.
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/displaced_header,
|
||||
/*exchange_value=*/box,
|
||||
/*where=*/oop,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock(),
|
||||
noreg,
|
||||
&cas_failed,
|
||||
/*check without membar and ldarx first*/true);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
// If the compare-and-exchange succeeded, then we found an unlocked
|
||||
// object and we have now locked it.
|
||||
b(success);
|
||||
|
||||
bind(cas_failed);
|
||||
// We did not see an unlocked object so try the fast recursive case.
|
||||
|
||||
// Check if the owner is self by comparing the value in the markWord of object
|
||||
// (current_header) with the stack pointer.
|
||||
sub(current_header, current_header, R1_SP);
|
||||
load_const_optimized(temp, ~(os::vm_page_size()-1) | markWord::lock_mask_in_place);
|
||||
|
||||
and_(R0/*==0?*/, current_header, temp);
|
||||
// If condition is true we are cont and hence we can store 0 as the
|
||||
// displaced header in the box, which indicates that it is a recursive lock.
|
||||
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
|
||||
if (flag != CR0) {
|
||||
mcrf(flag, CR0);
|
||||
}
|
||||
beq(CR0, success);
|
||||
b(failure);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
bind(object_has_monitor);
|
||||
|
||||
// Try to CAS owner (no owner => current thread's _monitor_owner_id).
|
||||
addi(temp, displaced_header, in_bytes(ObjectMonitor::owner_offset()) - markWord::monitor_value);
|
||||
Register thread_id = displaced_header;
|
||||
ld(thread_id, in_bytes(JavaThread::monitor_owner_id_offset()), R16_thread);
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/(intptr_t)0,
|
||||
/*exchange_value=*/thread_id,
|
||||
/*where=*/temp,
|
||||
MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
|
||||
MacroAssembler::cmpxchgx_hint_acquire_lock());
|
||||
|
||||
// Store a non-null value into the box.
|
||||
std(box, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
beq(flag, success);
|
||||
|
||||
// Check for recursive locking.
|
||||
cmpd(flag, current_header, thread_id);
|
||||
bne(flag, failure);
|
||||
|
||||
// Current thread already owns the lock. Just increment recursions.
|
||||
Register recursions = displaced_header;
|
||||
ld(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
|
||||
addi(recursions, recursions, 1);
|
||||
std(recursions, in_bytes(ObjectMonitor::recursions_offset() - ObjectMonitor::owner_offset()), temp);
|
||||
|
||||
// flag == EQ indicates success, increment held monitor count if LM_LEGACY is enabled
|
||||
// flag == NE indicates failure
|
||||
bind(success);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
inc_held_monitor_count(temp);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
// Check that unlocked label is reached with flag == EQ.
|
||||
Label flag_correct;
|
||||
beq(flag, flag_correct);
|
||||
stop("compiler_fast_lock_object: Flag != EQ");
|
||||
#endif
|
||||
bind(failure);
|
||||
#ifdef ASSERT
|
||||
// Check that slow_path label is reached with flag == NE.
|
||||
bne(flag, flag_correct);
|
||||
stop("compiler_fast_lock_object: Flag != NE");
|
||||
bind(flag_correct);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register temp, Register displaced_header, Register current_header) {
|
||||
assert(LockingMode != LM_LIGHTWEIGHT, "uses fast_unlock_lightweight");
|
||||
assert_different_registers(oop, box, temp, displaced_header, current_header);
|
||||
Label success, failure, object_has_monitor, not_recursive;
|
||||
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
// Find the lock address and load the displaced header from the stack.
|
||||
ld(displaced_header, BasicLock::displaced_header_offset_in_bytes(), box);
|
||||
|
||||
// If the displaced header is 0, we have a recursive unlock.
|
||||
cmpdi(flag, displaced_header, 0);
|
||||
beq(flag, success);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
// The object has an existing monitor iff (mark & monitor_value) != 0.
|
||||
ld(current_header, oopDesc::mark_offset_in_bytes(), oop);
|
||||
andi_(R0, current_header, markWord::monitor_value);
|
||||
bne(CR0, object_has_monitor);
|
||||
|
||||
if (LockingMode == LM_MONITOR) {
|
||||
// Set NE to indicate 'failure' -> take slow-path.
|
||||
crandc(flag, Assembler::equal, flag, Assembler::equal);
|
||||
b(failure);
|
||||
} else {
|
||||
assert(LockingMode == LM_LEGACY, "must be");
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
// Cmpxchg sets flag to cmpd(current_header, box).
|
||||
cmpxchgd(/*flag=*/flag,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/box,
|
||||
/*exchange_value=*/displaced_header,
|
||||
/*where=*/oop,
|
||||
MacroAssembler::MemBarRel,
|
||||
MacroAssembler::cmpxchgx_hint_release_lock(),
|
||||
noreg,
|
||||
&failure);
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
|
||||
b(success);
|
||||
}
|
||||
|
||||
// Handle existing monitor.
|
||||
bind(object_has_monitor);
|
||||
STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
|
||||
addi(current_header, current_header, -(int)markWord::monitor_value); // monitor
|
||||
|
||||
ld(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
|
||||
addic_(displaced_header, displaced_header, -1);
|
||||
blt(CR0, not_recursive); // Not recursive if negative after decrement.
|
||||
|
||||
// Recursive unlock
|
||||
std(displaced_header, in_bytes(ObjectMonitor::recursions_offset()), current_header);
|
||||
if (flag == CR0) { // Otherwise, flag is already EQ, here.
|
||||
crorc(CR0, Assembler::equal, CR0, Assembler::equal); // Set CR0 EQ
|
||||
}
|
||||
b(success);
|
||||
|
||||
bind(not_recursive);
|
||||
|
||||
// Set owner to null.
|
||||
// Release to satisfy the JMM
|
||||
release();
|
||||
li(temp, 0);
|
||||
std(temp, in_bytes(ObjectMonitor::owner_offset()), current_header);
|
||||
// We need a full fence after clearing owner to avoid stranding.
|
||||
// StoreLoad achieves this.
|
||||
membar(StoreLoad);
|
||||
|
||||
// Check if the entry_list is empty.
|
||||
ld(temp, in_bytes(ObjectMonitor::entry_list_offset()), current_header);
|
||||
cmpdi(flag, temp, 0);
|
||||
beq(flag, success); // If so we are done.
|
||||
|
||||
// Check if there is a successor.
|
||||
ld(temp, in_bytes(ObjectMonitor::succ_offset()), current_header);
|
||||
cmpdi(flag, temp, 0);
|
||||
// Invert equal bit
|
||||
crnand(flag, Assembler::equal, flag, Assembler::equal);
|
||||
beq(flag, success); // If there is a successor we are done.
|
||||
|
||||
// Save the monitor pointer in the current thread, so we can try
|
||||
// to reacquire the lock in SharedRuntime::monitor_exit_helper().
|
||||
std(current_header, in_bytes(JavaThread::unlocked_inflated_monitor_offset()), R16_thread);
|
||||
b(failure); // flag == NE
|
||||
|
||||
// flag == EQ indicates success, decrement held monitor count if LM_LEGACY is enabled
|
||||
// flag == NE indicates failure
|
||||
bind(success);
|
||||
if (LockingMode == LM_LEGACY) {
|
||||
dec_held_monitor_count(temp);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
// Check that unlocked label is reached with flag == EQ.
|
||||
Label flag_correct;
|
||||
beq(flag, flag_correct);
|
||||
stop("compiler_fast_unlock_object: Flag != EQ");
|
||||
#endif
|
||||
bind(failure);
|
||||
#ifdef ASSERT
|
||||
// Check that slow_path label is reached with flag == NE.
|
||||
bne(flag, flag_correct);
|
||||
stop("compiler_fast_unlock_object: Flag != NE");
|
||||
bind(flag_correct);
|
||||
#endif
|
||||
}
|
||||
|
||||
void MacroAssembler::compiler_fast_lock_lightweight_object(ConditionRegister flag, Register obj, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3) {
|
||||
assert_different_registers(obj, box, tmp1, tmp2, tmp3);
|
||||
@@ -4769,38 +4537,6 @@ void MacroAssembler::pop_cont_fastpath() {
|
||||
bind(done);
|
||||
}
|
||||
|
||||
// Note: Must preserve CR0 EQ (invariant).
|
||||
void MacroAssembler::inc_held_monitor_count(Register tmp) {
|
||||
assert(LockingMode == LM_LEGACY, "");
|
||||
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
cmpdi(CR0, tmp, 0);
|
||||
bge_predict_taken(CR0, ok);
|
||||
stop("held monitor count is negativ at increment");
|
||||
bind(ok);
|
||||
crorc(CR0, Assembler::equal, CR0, Assembler::equal); // Restore CR0 EQ
|
||||
#endif
|
||||
addi(tmp, tmp, 1);
|
||||
std(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
}
|
||||
|
||||
// Note: Must preserve CR0 EQ (invariant).
|
||||
void MacroAssembler::dec_held_monitor_count(Register tmp) {
|
||||
assert(LockingMode == LM_LEGACY, "");
|
||||
ld(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
#ifdef ASSERT
|
||||
Label ok;
|
||||
cmpdi(CR0, tmp, 0);
|
||||
bgt_predict_taken(CR0, ok);
|
||||
stop("held monitor count is <= 0 at decrement");
|
||||
bind(ok);
|
||||
crorc(CR0, Assembler::equal, CR0, Assembler::equal); // Restore CR0 EQ
|
||||
#endif
|
||||
addi(tmp, tmp, -1);
|
||||
std(tmp, in_bytes(JavaThread::held_monitor_count_offset()), R16_thread);
|
||||
}
|
||||
|
||||
// Function to flip between unlocked and locked state (fast locking).
|
||||
// Branches to failed if the state is not as expected with CR0 NE.
|
||||
// Falls through upon success with CR0 EQ.
|
||||
@@ -4842,7 +4578,6 @@ void MacroAssembler::atomically_flip_locked_state(bool is_unlock, Register obj,
|
||||
// - obj: the object to be locked
|
||||
// - t1, t2: temporary register
|
||||
void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
assert_different_registers(box, obj, t1, t2, R0);
|
||||
|
||||
Label push;
|
||||
@@ -4899,7 +4634,6 @@ void MacroAssembler::lightweight_lock(Register box, Register obj, Register t1, R
|
||||
// - obj: the object to be unlocked
|
||||
// - t1: temporary register
|
||||
void MacroAssembler::lightweight_unlock(Register obj, Register t1, Label& slow) {
|
||||
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
|
||||
assert_different_registers(obj, t1);
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
@@ -697,8 +697,6 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
void push_cont_fastpath();
|
||||
void pop_cont_fastpath();
|
||||
void inc_held_monitor_count(Register tmp);
|
||||
void dec_held_monitor_count(Register tmp);
|
||||
void atomically_flip_locked_state(bool is_unlock, Register obj, Register tmp, Label& failed, int semantics);
|
||||
void lightweight_lock(Register box, Register obj, Register t1, Register t2, Label& slow);
|
||||
void lightweight_unlock(Register obj, Register t1, Label& slow);
|
||||
@@ -715,12 +713,6 @@ class MacroAssembler: public Assembler {
|
||||
enum { trampoline_stub_size = 6 * 4 };
|
||||
address emit_trampoline_stub(int destination_toc_offset, int insts_call_instruction_offset, Register Rtoc = noreg);
|
||||
|
||||
void compiler_fast_lock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
void compiler_fast_unlock_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
void compiler_fast_lock_lightweight_object(ConditionRegister flag, Register oop, Register box,
|
||||
Register tmp1, Register tmp2, Register tmp3);
|
||||
|
||||
|
||||
@@ -11573,40 +11573,8 @@ instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP
|
||||
|
||||
// inlined locking and unlocking
|
||||
|
||||
instruct cmpFastLock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
|
||||
predicate(LockingMode != LM_LIGHTWEIGHT);
|
||||
match(Set crx (FastLock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
|
||||
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
||||
ins_encode %{
|
||||
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
|
||||
// If locking was successful, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
||||
%}
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
|
||||
predicate(LockingMode != LM_LIGHTWEIGHT);
|
||||
match(Set crx (FastUnlock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
||||
|
||||
format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %}
|
||||
ins_encode %{
|
||||
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
||||
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
||||
// If unlocking was successful, crx should indicate 'EQ'.
|
||||
// The compiler generates a branch to the runtime call to
|
||||
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.
|
||||
%}
|
||||
ins_pipe(pipe_class_compare);
|
||||
%}
|
||||
|
||||
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
|
||||
predicate(LockingMode == LM_LIGHTWEIGHT && !UseObjectMonitorTable);
|
||||
predicate(!UseObjectMonitorTable);
|
||||
match(Set crx (FastLock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
|
||||
@@ -11622,7 +11590,7 @@ instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRe
|
||||
%}
|
||||
|
||||
instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, flagsRegCR1 cr1) %{
|
||||
predicate(LockingMode == LM_LIGHTWEIGHT && UseObjectMonitorTable);
|
||||
predicate(UseObjectMonitorTable);
|
||||
match(Set crx (FastLock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr1);
|
||||
|
||||
@@ -11638,7 +11606,6 @@ instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iR
|
||||
%}
|
||||
|
||||
instruct cmpFastUnlockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
|
||||
predicate(LockingMode == LM_LIGHTWEIGHT);
|
||||
match(Set crx (FastUnlock oop box));
|
||||
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
||||
|
||||
|
||||
@@ -2446,14 +2446,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ addi(r_box, R1_SP, lock_offset);
|
||||
|
||||
// Try fastpath for locking.
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
|
||||
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||
} else {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
__ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
|
||||
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||
__ beq(CR0, locked);
|
||||
|
||||
// None of the above fast optimizations worked so we have to get into the
|
||||
@@ -2620,7 +2615,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ stw(R0, thread_(thread_state));
|
||||
|
||||
// Check preemption for Object.wait()
|
||||
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||
if (method->is_object_wait0()) {
|
||||
Label not_preempted;
|
||||
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
@@ -2672,11 +2667,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
__ addi(r_box, R1_SP, lock_offset);
|
||||
|
||||
// Try fastpath for unlocking.
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
} else {
|
||||
__ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ beq(CR0, done);
|
||||
|
||||
// Save and restore any potential method result value around the unlocking operation.
|
||||
@@ -2717,7 +2708,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// Last java frame won't be set if we're resuming after preemption
|
||||
bool maybe_preempted = LockingMode != LM_LEGACY && method->is_object_wait0();
|
||||
bool maybe_preempted = method->is_object_wait0();
|
||||
__ reset_last_Java_frame(!maybe_preempted /* check_last_java_sp */);
|
||||
|
||||
// Unbox oop result, e.g. JNIHandles::resolve value.
|
||||
|
||||
@@ -3271,12 +3271,15 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
// generate the common exit first so later stubs can rely on it if
|
||||
// they want an UnsafeMemoryAccess exit non-local to the stub
|
||||
StubRoutines::_unsafecopy_common_exit = generate_unsafecopy_common_error_exit();
|
||||
// register the stub as the default exit with class UnsafeMemoryAccess
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(StubRoutines::_unsafecopy_common_exit);
|
||||
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
|
||||
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
|
||||
|
||||
// non-aligned disjoint versions
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(StubId::stubgen_jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(StubId::stubgen_jshort_disjoint_arraycopy_id);
|
||||
@@ -4982,13 +4985,11 @@ void generate_lookup_secondary_supers_table_stub() {
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
if (UseCRC32Intrinsics) {
|
||||
StubRoutines::_crc_table_adr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY);
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes(StubId::stubgen_updateBytesCRC32_id);
|
||||
}
|
||||
|
||||
// CRC32C Intrinsics.
|
||||
if (UseCRC32CIntrinsics) {
|
||||
StubRoutines::_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY);
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32_updateBytes(StubId::stubgen_updateBytesCRC32C_id);
|
||||
}
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ enum platform_dependent_constants {
|
||||
|
||||
class ppc {
|
||||
friend class StubGenerator;
|
||||
friend class StubRoutines;
|
||||
|
||||
private:
|
||||
public:
|
||||
|
||||
@@ -74,6 +74,22 @@ static julong compute_inverse_poly(julong long_poly) {
|
||||
return div;
|
||||
}
|
||||
|
||||
static address _crc_table_addr = nullptr;
|
||||
static address _crc32c_table_addr = nullptr;
|
||||
|
||||
address StubRoutines::crc_table_addr() {
|
||||
if (_crc_table_addr == nullptr) {
|
||||
_crc_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32_POLY);
|
||||
}
|
||||
return _crc_table_addr;
|
||||
}
|
||||
address StubRoutines::crc32c_table_addr() {
|
||||
if (_crc32c_table_addr == nullptr) {
|
||||
_crc32c_table_addr = StubRoutines::ppc::generate_crc_constants(REVERSE_CRC32C_POLY);
|
||||
}
|
||||
return _crc32c_table_addr;
|
||||
}
|
||||
|
||||
// Constants to fold n words as needed by macroAssembler.
|
||||
address StubRoutines::ppc::generate_crc_constants(juint reverse_poly) {
|
||||
// Layout of constant table:
|
||||
|
||||
@@ -1089,6 +1089,7 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
|
||||
case Interpreter::java_lang_math_sin : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin); break;
|
||||
case Interpreter::java_lang_math_cos : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos); break;
|
||||
case Interpreter::java_lang_math_tan : runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan); break;
|
||||
case Interpreter::java_lang_math_sinh : /* run interpreted */ break;
|
||||
case Interpreter::java_lang_math_tanh : /* run interpreted */ break;
|
||||
case Interpreter::java_lang_math_cbrt : /* run interpreted */ break;
|
||||
case Interpreter::java_lang_math_abs : /* run interpreted */ break;
|
||||
@@ -1361,7 +1362,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// convenient and the slow signature handler can use this same frame
|
||||
// anchor.
|
||||
|
||||
bool support_vthread_preemption = Continuations::enabled() && LockingMode != LM_LEGACY;
|
||||
bool support_vthread_preemption = Continuations::enabled();
|
||||
|
||||
// We have a TOP_IJAVA_FRAME here, which belongs to us.
|
||||
Label last_java_pc;
|
||||
|
||||
@@ -625,7 +625,7 @@ void VM_Version::initialize_cpu_information(void) {
|
||||
_no_of_cores = os::processor_count();
|
||||
_no_of_threads = _no_of_cores;
|
||||
_no_of_sockets = _no_of_cores;
|
||||
snprintf(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "PowerPC POWER%lu", PowerArchitecturePPC64);
|
||||
snprintf(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "PPC %s", cpu_info_string());
|
||||
os::snprintf_checked(_cpu_name, CPU_TYPE_DESC_BUF_SIZE, "PowerPC POWER%lu", PowerArchitecturePPC64);
|
||||
os::snprintf_checked(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE, "PPC %s", cpu_info_string());
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
@@ -1988,6 +1988,7 @@ enum VectorMask {
|
||||
|
||||
// Vector Narrowing Integer Right Shift Instructions
|
||||
INSN(vnsra_wi, 0b1010111, 0b011, 0b101101);
|
||||
INSN(vnsrl_wi, 0b1010111, 0b011, 0b101100);
|
||||
|
||||
#undef INSN
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user