mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2026-01-24 17:30:47 +01:00
Compare commits
1350 Commits
bookmark3
...
lbourges/W
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8647a4eb8 | ||
|
|
1715658594 | ||
|
|
a1e8ce92cf | ||
|
|
1f2f796c36 | ||
|
|
4bf0320186 | ||
|
|
2b973ae0e0 | ||
|
|
a09d1eb3c0 | ||
|
|
c0665dddbc | ||
|
|
ce85348f5f | ||
|
|
dbe70fdfa8 | ||
|
|
3e6d0bd920 | ||
|
|
2e6ac048ea | ||
|
|
79473d577f | ||
|
|
aaec17318e | ||
|
|
55e157c6b4 | ||
|
|
6dcbb28f0a | ||
|
|
e365558596 | ||
|
|
7fb96e3b23 | ||
|
|
576b0ff45a | ||
|
|
a1db0c39c2 | ||
|
|
35d7ad0ea0 | ||
|
|
edfe5e01ed | ||
|
|
cbe0117521 | ||
|
|
337dcf44cc | ||
|
|
eef17bdfd8 | ||
|
|
52faa7827c | ||
|
|
c6f6013f4f | ||
|
|
6cfe642ae6 | ||
|
|
c28e9f1856 | ||
|
|
487ba7c01d | ||
|
|
ed973af5e0 | ||
|
|
cfb9c02544 | ||
|
|
87d509f59f | ||
|
|
e619a40c63 | ||
|
|
dfa7e6fcb8 | ||
|
|
f86c558a85 | ||
|
|
fc427105d2 | ||
|
|
b180e721da | ||
|
|
062f5fe308 | ||
|
|
6ebe722518 | ||
|
|
3a45322cda | ||
|
|
d06ea2debd | ||
|
|
a04c1d0ae9 | ||
|
|
ae718a372d | ||
|
|
288c4beaf1 | ||
|
|
c2b3c701a6 | ||
|
|
c67722301f | ||
|
|
9222d98af0 | ||
|
|
ae0a98959d | ||
|
|
3fc669403c | ||
|
|
f2dfb5a21d | ||
|
|
d452e9a194 | ||
|
|
982688c7ed | ||
|
|
22b99f47a2 | ||
|
|
f2d056aca3 | ||
|
|
6923c48c8e | ||
|
|
f8ca1fed6d | ||
|
|
950a473ef7 | ||
|
|
c7535cf499 | ||
|
|
f2a34a05e9 | ||
|
|
1d658ae622 | ||
|
|
f9df1129b0 | ||
|
|
f953b9b80f | ||
|
|
80c4bfe965 | ||
|
|
618d5b46c4 | ||
|
|
c2f669c57b | ||
|
|
3d6d3cb6fa | ||
|
|
cdfc67d1bd | ||
|
|
95f124fd49 | ||
|
|
8bc644c323 | ||
|
|
5afba7ae3a | ||
|
|
c110e4039b | ||
|
|
4496d07b98 | ||
|
|
f95f8df4fe | ||
|
|
54c28bfbfc | ||
|
|
ac4a5edaaa | ||
|
|
d226b974d7 | ||
|
|
4e1a4885da | ||
|
|
53c74d645f | ||
|
|
d05de2041b | ||
|
|
9059bf8c1d | ||
|
|
0f273cb9b1 | ||
|
|
f92415cd67 | ||
|
|
393929ef2d | ||
|
|
f19974db35 | ||
|
|
7cbc2d5361 | ||
|
|
8244814a67 | ||
|
|
3d8814a21b | ||
|
|
8d1656fc16 | ||
|
|
d5b1a9e78d | ||
|
|
9213e5b85c | ||
|
|
51c82c04f2 | ||
|
|
f72fb44e34 | ||
|
|
df1c9fafee | ||
|
|
030732469f | ||
|
|
325089884e | ||
|
|
b500faf828 | ||
|
|
28d60e698d | ||
|
|
38ef6f40c5 | ||
|
|
9124ca44fe | ||
|
|
6525b24650 | ||
|
|
6dd0b9102c | ||
|
|
37f6de2b06 | ||
|
|
dd201a1387 | ||
|
|
3c3d8810cd | ||
|
|
0143833115 | ||
|
|
377c6d3875 | ||
|
|
3a044a51da | ||
|
|
b4580627fd | ||
|
|
a2d0974677 | ||
|
|
adb48f47e5 | ||
|
|
75a7159a44 | ||
|
|
53f3636ac1 | ||
|
|
1af84b216c | ||
|
|
8567b49184 | ||
|
|
e3abe4aa99 | ||
|
|
0fd949d88b | ||
|
|
b008963183 | ||
|
|
403df3ebb0 | ||
|
|
0466715352 | ||
|
|
96796561c6 | ||
|
|
d60ae62759 | ||
|
|
5dbde5c521 | ||
|
|
0b09653ef6 | ||
|
|
becd24938d | ||
|
|
767f004a5f | ||
|
|
ea562f15fd | ||
|
|
f500419703 | ||
|
|
b8422b87ad | ||
|
|
c31c49b0f2 | ||
|
|
c831e9a508 | ||
|
|
9d5b93bf16 | ||
|
|
fcbe040c42 | ||
|
|
137bd0b188 | ||
|
|
dbb8fe820b | ||
|
|
f5a1f791b7 | ||
|
|
5618a0e049 | ||
|
|
64ac95d10b | ||
|
|
83528d0f87 | ||
|
|
dc86650afa | ||
|
|
9bacac5195 | ||
|
|
38ffaf3850 | ||
|
|
0d417081c7 | ||
|
|
1606efe1b2 | ||
|
|
5df15e552b | ||
|
|
8a53d615e1 | ||
|
|
46a291cc0e | ||
|
|
3a34c1e4e0 | ||
|
|
5e007d6e40 | ||
|
|
a9dcc741f5 | ||
|
|
4f18408df2 | ||
|
|
124dfcb3c7 | ||
|
|
4f81db9a9a | ||
|
|
daf275bd23 | ||
|
|
a91c9c2100 | ||
|
|
5c4c909a18 | ||
|
|
a8d78ac117 | ||
|
|
79f47b89cb | ||
|
|
83ee160111 | ||
|
|
ca6f807fa1 | ||
|
|
a08d4dd6f2 | ||
|
|
3f285a3596 | ||
|
|
94d4533b3f | ||
|
|
3f4f8d13d4 | ||
|
|
4067a605a1 | ||
|
|
16f7e390a6 | ||
|
|
549d57e4bb | ||
|
|
9d30c7fc4f | ||
|
|
9980bc4409 | ||
|
|
6a5460f482 | ||
|
|
c98a9ecfd1 | ||
|
|
9fd37cf896 | ||
|
|
5c10e3103c | ||
|
|
530dc094ac | ||
|
|
171e73846c | ||
|
|
475fa6fb8d | ||
|
|
6eb437756f | ||
|
|
e0eb98b97c | ||
|
|
7920796ca1 | ||
|
|
b483eee40c | ||
|
|
ec1f41cfc6 | ||
|
|
456e0fe71d | ||
|
|
364db659e7 | ||
|
|
faf02992d2 | ||
|
|
9c744e2d8d | ||
|
|
514a0ec4ee | ||
|
|
6298d36d19 | ||
|
|
872b533abc | ||
|
|
6f1b174675 | ||
|
|
d73d732362 | ||
|
|
9027cb0846 | ||
|
|
d2c13a2801 | ||
|
|
70ca04e20a | ||
|
|
c841f18bc1 | ||
|
|
ae291a84ee | ||
|
|
37c73185d9 | ||
|
|
120281e4db | ||
|
|
e76cdeaab8 | ||
|
|
a406829e5a | ||
|
|
8de5648ccf | ||
|
|
b273c18fb8 | ||
|
|
27a232a6a6 | ||
|
|
ff4bf4e270 | ||
|
|
85620ba59b | ||
|
|
04ec2dcb02 | ||
|
|
f6e98f8a44 | ||
|
|
4e13195fdd | ||
|
|
9f29fc9938 | ||
|
|
639afd889d | ||
|
|
abcf2b25cc | ||
|
|
7d11fb8f55 | ||
|
|
8db97a7bd0 | ||
|
|
ee6767ce66 | ||
|
|
6652ca8094 | ||
|
|
5b19c197ec | ||
|
|
d6d38512ce | ||
|
|
d3b74cbbe1 | ||
|
|
d686863188 | ||
|
|
feb2097075 | ||
|
|
6d88f7d0bd | ||
|
|
2be9a3e5fc | ||
|
|
dc915e7e59 | ||
|
|
38059e60d9 | ||
|
|
7a64666176 | ||
|
|
90a892b9c6 | ||
|
|
13021baedf | ||
|
|
7d356b7166 | ||
|
|
893170a88b | ||
|
|
6d5cc2804c | ||
|
|
be36b28335 | ||
|
|
ff332717ca | ||
|
|
472c95a25a | ||
|
|
cfc615f982 | ||
|
|
b1b327bb75 | ||
|
|
e21d33e769 | ||
|
|
16df9769ca | ||
|
|
bf1b9fd777 | ||
|
|
e3f3defe5c | ||
|
|
79564e9e12 | ||
|
|
7c9727832e | ||
|
|
6307591312 | ||
|
|
66cb87d4b5 | ||
|
|
0b944217fa | ||
|
|
ef73421c80 | ||
|
|
cbd04729f7 | ||
|
|
884966121e | ||
|
|
357a9f64aa | ||
|
|
3dc13634f3 | ||
|
|
8ed7eddf60 | ||
|
|
07961fbe19 | ||
|
|
21f54bdec0 | ||
|
|
7ebc0dfa99 | ||
|
|
385eeb88f7 | ||
|
|
b6bbef5bfb | ||
|
|
3f1e501882 | ||
|
|
30276ea602 | ||
|
|
d5b598471a | ||
|
|
277f30552a | ||
|
|
f1335ae718 | ||
|
|
c27b100369 | ||
|
|
b257c4e019 | ||
|
|
072215d51c | ||
|
|
27791caa0b | ||
|
|
3312f4384b | ||
|
|
908938164f | ||
|
|
2100446aea | ||
|
|
c34675872b | ||
|
|
4fe5f4374e | ||
|
|
a6d0626d86 | ||
|
|
7fcd5dfa58 | ||
|
|
486dde1149 | ||
|
|
8c57fd4cf4 | ||
|
|
626917f91b | ||
|
|
c2bde2e145 | ||
|
|
634356cf8a | ||
|
|
f57984d5dd | ||
|
|
515e132017 | ||
|
|
df4d6c6b4d | ||
|
|
2b56932942 | ||
|
|
bf79986f87 | ||
|
|
c0b9cab5fe | ||
|
|
9d4e7eb767 | ||
|
|
56e0211ffe | ||
|
|
61cc364cc5 | ||
|
|
8c704919c5 | ||
|
|
9198896033 | ||
|
|
75fa4d8ce6 | ||
|
|
2e4e9165c5 | ||
|
|
b80962be04 | ||
|
|
4c54e92658 | ||
|
|
68ee6843c5 | ||
|
|
e95324ca18 | ||
|
|
e1dca1ab55 | ||
|
|
fff87228c1 | ||
|
|
93f5b476e1 | ||
|
|
e5ae4e26ea | ||
|
|
de28d90362 | ||
|
|
2cffe1b2e9 | ||
|
|
0fad791b29 | ||
|
|
61bdcd1b8a | ||
|
|
0713d9d876 | ||
|
|
7e16ecf232 | ||
|
|
85c9fe4655 | ||
|
|
e348cb0bf5 | ||
|
|
b17ccc5a5d | ||
|
|
115d1393e8 | ||
|
|
3dcce1e284 | ||
|
|
105faec6c2 | ||
|
|
e54e6f15dc | ||
|
|
530bde83e5 | ||
|
|
dda065efd9 | ||
|
|
2e780931d2 | ||
|
|
1a4d2032de | ||
|
|
a213c583f2 | ||
|
|
62ec3c5405 | ||
|
|
cbc6a079e0 | ||
|
|
74dd2d02fe | ||
|
|
5f4c1e67fe | ||
|
|
9316f48140 | ||
|
|
8ac7e73fe5 | ||
|
|
3f9811cc7a | ||
|
|
6f7cb6941b | ||
|
|
fc905b86ba | ||
|
|
63ffa3d740 | ||
|
|
30a2151088 | ||
|
|
e323e4092d | ||
|
|
60507b90ca | ||
|
|
9a13fb07df | ||
|
|
da5ce2863b | ||
|
|
2ad4e3de42 | ||
|
|
df5119653b | ||
|
|
231248d85b | ||
|
|
749a8ea6b7 | ||
|
|
678e503a44 | ||
|
|
e2334016cd | ||
|
|
95771c82e5 | ||
|
|
7f835f0d0b | ||
|
|
310868c7b8 | ||
|
|
f7a5b91b70 | ||
|
|
14e3d46945 | ||
|
|
9e72279616 | ||
|
|
bc66d1c29b | ||
|
|
174cdd8f24 | ||
|
|
56a598c195 | ||
|
|
c89b0e50da | ||
|
|
36c059fd83 | ||
|
|
634c9b0a29 | ||
|
|
370384737a | ||
|
|
aa153de97d | ||
|
|
fb7869e461 | ||
|
|
0cd296dd04 | ||
|
|
17af8ab636 | ||
|
|
c6cd735ce6 | ||
|
|
7de5df540a | ||
|
|
7f5583105f | ||
|
|
2e876027b9 | ||
|
|
23d529fd00 | ||
|
|
57fbaf9141 | ||
|
|
2cbdae81ab | ||
|
|
d540a33ae9 | ||
|
|
8f1294076d | ||
|
|
cdbf324ef3 | ||
|
|
638c0196b6 | ||
|
|
3f8e898c17 | ||
|
|
d939956632 | ||
|
|
f30c91798e | ||
|
|
a52783d2be | ||
|
|
d167b378c5 | ||
|
|
2d7ef132f3 | ||
|
|
0803d9164e | ||
|
|
f0bd68e8ca | ||
|
|
a42c825188 | ||
|
|
8412265fb8 | ||
|
|
6c85d2adf7 | ||
|
|
bd5aec6857 | ||
|
|
247499fc14 | ||
|
|
a74ce8af75 | ||
|
|
be012c657f | ||
|
|
db2ff3f34f | ||
|
|
ddba071475 | ||
|
|
6f0ec7e590 | ||
|
|
e54383cdbc | ||
|
|
3b37fe2259 | ||
|
|
cfda19e0aa | ||
|
|
912d4d8da7 | ||
|
|
953273152a | ||
|
|
541b5b409d | ||
|
|
7265756af3 | ||
|
|
1a33cb47eb | ||
|
|
d54d7cb8ec | ||
|
|
8af1a454cf | ||
|
|
e6429e659f | ||
|
|
2d99f5eea1 | ||
|
|
7f5f01b586 | ||
|
|
fa5cba761a | ||
|
|
15c54b9ad3 | ||
|
|
0c127a4185 | ||
|
|
43648791c4 | ||
|
|
ebb4fb8fee | ||
|
|
6cbff99f11 | ||
|
|
adfca3b7d6 | ||
|
|
8a4888ec5a | ||
|
|
985597be68 | ||
|
|
5d23817da8 | ||
|
|
31be65a4ac | ||
|
|
c725edb60e | ||
|
|
f549014952 | ||
|
|
7104b0ae06 | ||
|
|
d0754ece5e | ||
|
|
06bc44659d | ||
|
|
ab8e41940b | ||
|
|
8d6f8be8bd | ||
|
|
d6d31a6b80 | ||
|
|
9f6616c006 | ||
|
|
e54894fba5 | ||
|
|
0703f6c382 | ||
|
|
522696c524 | ||
|
|
c5dae65b7c | ||
|
|
5d127ee8ff | ||
|
|
a7670131f4 | ||
|
|
633273c08e | ||
|
|
dd158e2ab0 | ||
|
|
8b11f7add9 | ||
|
|
695f4d7ca7 | ||
|
|
77045abcd7 | ||
|
|
2fc58eea3c | ||
|
|
68c4fa9932 | ||
|
|
6efff40eaa | ||
|
|
b5849f7164 | ||
|
|
9232813147 | ||
|
|
ce97437bc2 | ||
|
|
2c7b324946 | ||
|
|
ef01f4b84b | ||
|
|
40953730ae | ||
|
|
39c5abb63d | ||
|
|
4b3035d1c0 | ||
|
|
2d22f9e4fb | ||
|
|
06826e7229 | ||
|
|
41f05ed652 | ||
|
|
e4cf2d38c4 | ||
|
|
1ff77d4971 | ||
|
|
bfd5de6e4e | ||
|
|
8df4f92f72 | ||
|
|
da1638f5ca | ||
|
|
b21966d839 | ||
|
|
17b96cd92b | ||
|
|
20129bf6c4 | ||
|
|
404ab84d1a | ||
|
|
4409510514 | ||
|
|
9051abf851 | ||
|
|
c6a4fd5a37 | ||
|
|
07e3466926 | ||
|
|
7e7fb1bf68 | ||
|
|
20d514d654 | ||
|
|
db7f392c13 | ||
|
|
8ad70be9ff | ||
|
|
539df4919a | ||
|
|
e80e91302a | ||
|
|
a68eecf7bb | ||
|
|
bb4515c3da | ||
|
|
cc0747dc88 | ||
|
|
051c4c2107 | ||
|
|
578953bd21 | ||
|
|
a134493871 | ||
|
|
e1b560ed8b | ||
|
|
90eb2d3a45 | ||
|
|
8eed2e364f | ||
|
|
61d68e174e | ||
|
|
487d411b08 | ||
|
|
93494c96f0 | ||
|
|
b659aab6c7 | ||
|
|
03137a3f4e | ||
|
|
3069d43bc3 | ||
|
|
ba4fd35fc2 | ||
|
|
73d954cab6 | ||
|
|
273cbc131b | ||
|
|
e58f9bd056 | ||
|
|
550c4074f5 | ||
|
|
a001a0c430 | ||
|
|
41401a2d98 | ||
|
|
c79a4d7288 | ||
|
|
01bd62ff6c | ||
|
|
7f6ecf5f19 | ||
|
|
0c2f37799d | ||
|
|
cba5235eba | ||
|
|
20972829eb | ||
|
|
bbd41bcaf2 | ||
|
|
cacc1ed11a | ||
|
|
6e4771bdbd | ||
|
|
3382f9396d | ||
|
|
d52e9279f7 | ||
|
|
51757e0779 | ||
|
|
2327c94f2f | ||
|
|
a0a0d61852 | ||
|
|
1f05e7fc71 | ||
|
|
aa4b09e666 | ||
|
|
498b39905e | ||
|
|
feb5393248 | ||
|
|
5add66443d | ||
|
|
b094a7d08d | ||
|
|
ec37cf4811 | ||
|
|
331f07e6b2 | ||
|
|
7f5a3f5f90 | ||
|
|
11497e4623 | ||
|
|
5b412a2840 | ||
|
|
ab170ced5f | ||
|
|
a82a425d9a | ||
|
|
57afd313ad | ||
|
|
9dde599a20 | ||
|
|
c6b9bcdd2c | ||
|
|
5b5a1e77fd | ||
|
|
abfe8a58ff | ||
|
|
8695cda052 | ||
|
|
909f30657b | ||
|
|
d597db0ce3 | ||
|
|
1412bda387 | ||
|
|
cd14dfc9c7 | ||
|
|
7f206696f8 | ||
|
|
9b13367931 | ||
|
|
5c96875d93 | ||
|
|
ea553d0022 | ||
|
|
0b6ab48017 | ||
|
|
72cfee2f4d | ||
|
|
3ec0d9b3c2 | ||
|
|
5a888e73fd | ||
|
|
21b6643fd1 | ||
|
|
025b05a10d | ||
|
|
5bbb57a991 | ||
|
|
7571cdf117 | ||
|
|
1412e4f4d3 | ||
|
|
fcb2fd3f8e | ||
|
|
0d6c7c3b8b | ||
|
|
84698a7fa8 | ||
|
|
d99870b18a | ||
|
|
f33175043d | ||
|
|
c1f71845ac | ||
|
|
b73f641d0b | ||
|
|
73ef107971 | ||
|
|
9b86cbcde9 | ||
|
|
74e0c42b5f | ||
|
|
9e17e674ea | ||
|
|
218337f9e8 | ||
|
|
15bd4da79b | ||
|
|
8b6c7368f6 | ||
|
|
81b6e2f37d | ||
|
|
b1ddfca550 | ||
|
|
dd06473ac7 | ||
|
|
36001f5629 | ||
|
|
9c3a815db0 | ||
|
|
b6e4b31a77 | ||
|
|
feee6097b6 | ||
|
|
c9aa90014b | ||
|
|
525fc59ca8 | ||
|
|
a1a63755e4 | ||
|
|
dbd505c86c | ||
|
|
4d92df205c | ||
|
|
08314de692 | ||
|
|
e35075f07b | ||
|
|
9f60606f29 | ||
|
|
b4c2a972d4 | ||
|
|
3d0218d0d8 | ||
|
|
7d910f1dae | ||
|
|
9b9570b6ef | ||
|
|
bb3184f2ba | ||
|
|
6870687ed9 | ||
|
|
7f68a40fa2 | ||
|
|
4b9f96912d | ||
|
|
4bd8d5e918 | ||
|
|
79dbfe5597 | ||
|
|
3d6de3d00d | ||
|
|
9007447237 | ||
|
|
f19a4239ab | ||
|
|
f8aad68342 | ||
|
|
44e0f65cbd | ||
|
|
1dbfe677d1 | ||
|
|
508699b674 | ||
|
|
e8997ff976 | ||
|
|
bb56d004b6 | ||
|
|
29fd37f43f | ||
|
|
9944dd070f | ||
|
|
400bc56093 | ||
|
|
f334440b93 | ||
|
|
2a48637d79 | ||
|
|
71d24df216 | ||
|
|
141ab32704 | ||
|
|
7db5da94c7 | ||
|
|
280833a4fa | ||
|
|
f9e07a8b45 | ||
|
|
1100e5671a | ||
|
|
d56cb941e0 | ||
|
|
441ec17b32 | ||
|
|
c13da423fe | ||
|
|
e1ff28f2bd | ||
|
|
1beda8fd9e | ||
|
|
0b7041c83d | ||
|
|
adf809693f | ||
|
|
39904b7da3 | ||
|
|
20616e58ae | ||
|
|
0bed6e837d | ||
|
|
7faa04dcc8 | ||
|
|
7a4327ad2f | ||
|
|
21e6370813 | ||
|
|
0aa6cf4794 | ||
|
|
50051b4902 | ||
|
|
e71af7698b | ||
|
|
b73285110e | ||
|
|
bce0d77b15 | ||
|
|
cff273e413 | ||
|
|
7a3c3466a6 | ||
|
|
7084050031 | ||
|
|
76c34b1f34 | ||
|
|
7b34add46f | ||
|
|
a8fead2c5c | ||
|
|
14180459aa | ||
|
|
ab98306f62 | ||
|
|
ac838626d8 | ||
|
|
79e2dd5ea2 | ||
|
|
73e2e33339 | ||
|
|
4b04e79cb3 | ||
|
|
5d75a6d292 | ||
|
|
b60fb649de | ||
|
|
be7cb07d8e | ||
|
|
2605231581 | ||
|
|
dafc9db7a0 | ||
|
|
8bc0991a1e | ||
|
|
2e7f742362 | ||
|
|
98b58d2c0c | ||
|
|
d1014a9792 | ||
|
|
1f97c5bf24 | ||
|
|
debd000c2a | ||
|
|
f37d8b8b6b | ||
|
|
b2541f0976 | ||
|
|
62456ccc40 | ||
|
|
3ee61acf08 | ||
|
|
2e788d396c | ||
|
|
a7057f59b8 | ||
|
|
ba1d21a5a9 | ||
|
|
7ad92659f3 | ||
|
|
a51892aaf9 | ||
|
|
4932b172fc | ||
|
|
0465e005de | ||
|
|
4c7a5c6f5d | ||
|
|
7f58442fff | ||
|
|
7ab96ea49f | ||
|
|
9a37cdec34 | ||
|
|
4d4b74e6d4 | ||
|
|
415e47917d | ||
|
|
7f6c5fdd59 | ||
|
|
a9796cd23f | ||
|
|
13d6222aaa | ||
|
|
acac6a5ff3 | ||
|
|
7151b62ed6 | ||
|
|
49585ff488 | ||
|
|
1db722e9eb | ||
|
|
ab6f23aca0 | ||
|
|
db3ea414bd | ||
|
|
dc41905233 | ||
|
|
63c8fd7430 | ||
|
|
d7d00cb692 | ||
|
|
7591556335 | ||
|
|
addda11c87 | ||
|
|
7e7d05d501 | ||
|
|
f87e860cdc | ||
|
|
daac732a8b | ||
|
|
8b43dfbf66 | ||
|
|
04ca7f0e7b | ||
|
|
8dfaa54c09 | ||
|
|
a8e165c5a6 | ||
|
|
320d205827 | ||
|
|
8d42ac82ff | ||
|
|
fee3af6e7c | ||
|
|
e4d14b142a | ||
|
|
ca51bee5bc | ||
|
|
118e2030b9 | ||
|
|
61fc0fac28 | ||
|
|
8247d0820b | ||
|
|
83cd582cb7 | ||
|
|
b5bd845420 | ||
|
|
a5343c2ae3 | ||
|
|
53785803c0 | ||
|
|
62055d8cbe | ||
|
|
5617b63308 | ||
|
|
6d5f3ffa49 | ||
|
|
510c90e08a | ||
|
|
bca87adb53 | ||
|
|
37d6e75991 | ||
|
|
f1e4de476e | ||
|
|
a53029a891 | ||
|
|
88c1823010 | ||
|
|
67a2968501 | ||
|
|
6c49c02337 | ||
|
|
d7ef2b7ff6 | ||
|
|
6223c80c50 | ||
|
|
e9d29d5468 | ||
|
|
5815c621ae | ||
|
|
c206a3c096 | ||
|
|
cb27e91013 | ||
|
|
247691c80e | ||
|
|
2456c805ca | ||
|
|
88835575c4 | ||
|
|
82838bd470 | ||
|
|
d8b3bed1dc | ||
|
|
649592b90c | ||
|
|
01b42a6fb1 | ||
|
|
b4723c7c49 | ||
|
|
c87607ec6e | ||
|
|
a65d7c4928 | ||
|
|
2b1269fc6d | ||
|
|
156a714491 | ||
|
|
a0984d720a | ||
|
|
1d792ec33a | ||
|
|
db4be9081f | ||
|
|
01c0bd28f3 | ||
|
|
b630d86fb5 | ||
|
|
d6b0c8af57 | ||
|
|
826e9610de | ||
|
|
4601599c71 | ||
|
|
904ca7e853 | ||
|
|
280b6b423e | ||
|
|
1945556a98 | ||
|
|
6233bf18a8 | ||
|
|
24b8a4f6a4 | ||
|
|
9abf09036f | ||
|
|
640e3fb200 | ||
|
|
ce2a0131f2 | ||
|
|
3bffae2e6e | ||
|
|
74601b88e3 | ||
|
|
f281b17d0c | ||
|
|
f9ddb2e11c | ||
|
|
4636280130 | ||
|
|
c642808269 | ||
|
|
1d8125da54 | ||
|
|
9a5a4f295e | ||
|
|
42567ae432 | ||
|
|
d1069c5897 | ||
|
|
0a214c0f33 | ||
|
|
a77b9aea22 | ||
|
|
8d4b0d80f0 | ||
|
|
efefd495f7 | ||
|
|
1b8026ad75 | ||
|
|
ba102c699d | ||
|
|
899b44a3ba | ||
|
|
6db3515794 | ||
|
|
a6257430f2 | ||
|
|
2b76ed7383 | ||
|
|
fc68581cc3 | ||
|
|
6a78e896af | ||
|
|
fb3846cc95 | ||
|
|
b6a2a2b344 | ||
|
|
20e26fb42d | ||
|
|
ef69ed1782 | ||
|
|
72d0a3ab89 | ||
|
|
4e0c743dad | ||
|
|
d4841d872f | ||
|
|
8b6d837c31 | ||
|
|
9507ad0056 | ||
|
|
aa8b9ae314 | ||
|
|
c6660e2dd7 | ||
|
|
9cbe341388 | ||
|
|
08846cca86 | ||
|
|
30bf742b79 | ||
|
|
e33fb0fc88 | ||
|
|
06c86ddd7d | ||
|
|
de7d790712 | ||
|
|
06130d6abb | ||
|
|
2623e93338 | ||
|
|
31e7e4d3a2 | ||
|
|
03d0655aa3 | ||
|
|
cd5788203e | ||
|
|
071a031652 | ||
|
|
66ab6fd44f | ||
|
|
487ad8f85c | ||
|
|
17a1fe07f9 | ||
|
|
8212bfd585 | ||
|
|
3c876dc50b | ||
|
|
63ceb593ce | ||
|
|
8c58ec864b | ||
|
|
aaa55315ad | ||
|
|
865fcfc342 | ||
|
|
b9cd6a08e3 | ||
|
|
eb3a47995a | ||
|
|
ba1fd334d9 | ||
|
|
ca7c1dc3d1 | ||
|
|
33d13b2b67 | ||
|
|
dc46224b63 | ||
|
|
ced6b23f2b | ||
|
|
89eaad8df0 | ||
|
|
e238e6c48f | ||
|
|
9417f91710 | ||
|
|
0c3f1905d0 | ||
|
|
f82fb49d0b | ||
|
|
77a43382c4 | ||
|
|
1dd1225d72 | ||
|
|
4e4f543405 | ||
|
|
8fc837a6c3 | ||
|
|
0200702753 | ||
|
|
5e364ea15b | ||
|
|
1dc1bcfd97 | ||
|
|
fa9e7e7204 | ||
|
|
4b3726e66c | ||
|
|
9af30e462a | ||
|
|
5984aa2020 | ||
|
|
9ab0a88160 | ||
|
|
7dcde0ec96 | ||
|
|
defc17aa56 | ||
|
|
b4c3a4c7d0 | ||
|
|
ba56cdb9b5 | ||
|
|
76d6151b6f | ||
|
|
c3d7ba5ff1 | ||
|
|
cce6a6b78d | ||
|
|
f984e5e88a | ||
|
|
50c82d5490 | ||
|
|
085c5fd57c | ||
|
|
48eece2cff | ||
|
|
c06fddaf22 | ||
|
|
5fa6f955ce | ||
|
|
2636fd14e7 | ||
|
|
eb0ac0103b | ||
|
|
226591c277 | ||
|
|
c1de1b74b8 | ||
|
|
e88f669d41 | ||
|
|
4f89966e6b | ||
|
|
3047966281 | ||
|
|
2871a5f1e9 | ||
|
|
f573992c8b | ||
|
|
8263a88895 | ||
|
|
1cdf356988 | ||
|
|
5410f67863 | ||
|
|
71f4acbff2 | ||
|
|
ea3a82609e | ||
|
|
37870d01ae | ||
|
|
c51806c74f | ||
|
|
bff74d84c2 | ||
|
|
81b5d4e041 | ||
|
|
9d76a16817 | ||
|
|
56d1086950 | ||
|
|
fc32197945 | ||
|
|
9d76c02f1c | ||
|
|
ee6cae330f | ||
|
|
ee5f897ec1 | ||
|
|
20209faa46 | ||
|
|
b11cffdebf | ||
|
|
b3755ddca4 | ||
|
|
bc526a9691 | ||
|
|
3093a07901 | ||
|
|
ea6c4d7fd9 | ||
|
|
84bee83e25 | ||
|
|
2b51c076a7 | ||
|
|
ad52173d81 | ||
|
|
abd4bf466c | ||
|
|
f0a377b8e7 | ||
|
|
49f0d8ec04 | ||
|
|
1f7bbda24c | ||
|
|
2ab4a49bd9 | ||
|
|
222e439221 | ||
|
|
16540af24d | ||
|
|
98d9df2d52 | ||
|
|
1238a84ccf | ||
|
|
e1afc4990d | ||
|
|
a9c909b740 | ||
|
|
cfb6cedac7 | ||
|
|
3619b79768 | ||
|
|
c3d39beb68 | ||
|
|
abfba3a653 | ||
|
|
9d9528b918 | ||
|
|
8614817c90 | ||
|
|
32151f1cb1 | ||
|
|
2bde952575 | ||
|
|
dfd218c2eb | ||
|
|
5368a000bb | ||
|
|
b8f4f072ee | ||
|
|
db7c2079b4 | ||
|
|
ec5fd7f6ff | ||
|
|
68c69b85a0 | ||
|
|
6f0474fa61 | ||
|
|
e8c3404faa | ||
|
|
ec9aa67e9e | ||
|
|
8e949c3a4d | ||
|
|
eef7c1311b | ||
|
|
205c05d56d | ||
|
|
02046613aa | ||
|
|
a7077ec694 | ||
|
|
5b9900fa25 | ||
|
|
d6c471d470 | ||
|
|
674f92d035 | ||
|
|
08b6357c48 | ||
|
|
70a32351fe | ||
|
|
6e101fe0d8 | ||
|
|
206ccac6f1 | ||
|
|
ca14c51575 | ||
|
|
e7e4edc9c3 | ||
|
|
11a7850456 | ||
|
|
17f3f8de71 | ||
|
|
8550a7ce04 | ||
|
|
36abf1b659 | ||
|
|
ef46728f47 | ||
|
|
2121dae6d7 | ||
|
|
474c3127ef | ||
|
|
e7781b75a6 | ||
|
|
4b91a252cb | ||
|
|
c779a73798 | ||
|
|
cf11709e79 | ||
|
|
aa01a4055d | ||
|
|
60a9ca9965 | ||
|
|
c7d13dc2c4 | ||
|
|
9ffa008b47 | ||
|
|
e6d742b13b | ||
|
|
133c55cce5 | ||
|
|
288cbb696e | ||
|
|
3b415382ff | ||
|
|
ba3d36763b | ||
|
|
54cc148b65 | ||
|
|
a8c031fdaf | ||
|
|
6ee232f9be | ||
|
|
afbd297afb | ||
|
|
5b59259694 | ||
|
|
2d220c3811 | ||
|
|
875760f859 | ||
|
|
b80ed26b43 | ||
|
|
a017ada122 | ||
|
|
b58f1bb52e | ||
|
|
5af0f96529 | ||
|
|
f8d47fd5d0 | ||
|
|
896001746f | ||
|
|
c5eb93cf91 | ||
|
|
9ba7617df8 | ||
|
|
4a178ec594 | ||
|
|
6e26c49d30 | ||
|
|
7a756d4349 | ||
|
|
fdf7b131ef | ||
|
|
f26fa68f5b | ||
|
|
3f67687048 | ||
|
|
8afb233e81 | ||
|
|
9834131a19 | ||
|
|
6078c5f334 | ||
|
|
7be44175fa | ||
|
|
3a55cb1c16 | ||
|
|
aa82364929 | ||
|
|
8ee73b86ca | ||
|
|
d46e664d0f | ||
|
|
21c3d5e5d7 | ||
|
|
3237029ff4 | ||
|
|
a1e6f354d2 | ||
|
|
4896f73898 | ||
|
|
740dea4b46 | ||
|
|
db6c48463b | ||
|
|
865568c7c6 | ||
|
|
6df0b2a3e3 | ||
|
|
faf4ca271b | ||
|
|
c1fbe26cea | ||
|
|
4e2d49b482 | ||
|
|
b18c55f768 | ||
|
|
b73eea1197 | ||
|
|
619adb1dfa | ||
|
|
0ecc9924c8 | ||
|
|
65590169ba | ||
|
|
01e2e67d86 | ||
|
|
f369ba48f2 | ||
|
|
7d49e7c71e | ||
|
|
384fb9c935 | ||
|
|
f45cef959e | ||
|
|
cd0954993c | ||
|
|
b86d39886e | ||
|
|
a7c004d4b9 | ||
|
|
701830bd1e | ||
|
|
376d090b40 | ||
|
|
5306fed03e | ||
|
|
7fff1b791b | ||
|
|
1653c1d779 | ||
|
|
2d3b6e46bf | ||
|
|
1372d63b55 | ||
|
|
644651a701 | ||
|
|
a95ca3a344 | ||
|
|
472dcbb096 | ||
|
|
d430d114f5 | ||
|
|
e8726b88e0 | ||
|
|
84ed397e70 | ||
|
|
238c7e8c15 | ||
|
|
70cca58829 | ||
|
|
c7d31e9013 | ||
|
|
65038b0166 | ||
|
|
ec87ef298f | ||
|
|
6911d3cb0e | ||
|
|
743736e071 | ||
|
|
fb5828c960 | ||
|
|
5c025fa33e | ||
|
|
48c89da1ca | ||
|
|
5f73bbd9ca | ||
|
|
667eba38e1 | ||
|
|
79eafbd742 | ||
|
|
1ace5bbaf1 | ||
|
|
ae44fabc11 | ||
|
|
63dd1ca5bf | ||
|
|
717b48d3b4 | ||
|
|
2314ac99bb | ||
|
|
b88b760204 | ||
|
|
f0c70f6e31 | ||
|
|
76aa0bc5f9 | ||
|
|
98dc712b99 | ||
|
|
96eb9d5a0a | ||
|
|
fc9e9dfdf2 | ||
|
|
7b572bf1f7 | ||
|
|
060e686f0a | ||
|
|
561e5ba3dc | ||
|
|
8d78fb220c | ||
|
|
45a5140e6a | ||
|
|
21d0cb77b9 | ||
|
|
2bcc6c7e82 | ||
|
|
6937f2f9ef | ||
|
|
3813054cf2 | ||
|
|
66aa62080e | ||
|
|
1b3c8bbf5d | ||
|
|
c55ce48299 | ||
|
|
fb1dd3e5a2 | ||
|
|
606f7306c1 | ||
|
|
af9e6d2c4c | ||
|
|
663cdb25ae | ||
|
|
88f3c2c135 | ||
|
|
0f3c2429a8 | ||
|
|
1ed15701a8 | ||
|
|
981237768f | ||
|
|
00dbd4c63c | ||
|
|
2ae3bf4ec5 | ||
|
|
61731ed57d | ||
|
|
e018072364 | ||
|
|
6fb89015d5 | ||
|
|
f186547cd7 | ||
|
|
7c29ec3993 | ||
|
|
321992c2e9 | ||
|
|
858c1d265c | ||
|
|
69c6c348b1 | ||
|
|
dee41bc03f | ||
|
|
6cd888ec8c | ||
|
|
855bb35ad4 | ||
|
|
bb0c793f58 | ||
|
|
6fee40e533 | ||
|
|
30397290c6 | ||
|
|
8cb0a5ca75 | ||
|
|
6d0bcb3c10 | ||
|
|
2546f5c98b | ||
|
|
65c2597331 | ||
|
|
93fdf57095 | ||
|
|
1fb9ca1f16 | ||
|
|
eb8dec34d1 | ||
|
|
8902fe191f | ||
|
|
f64ccf0498 | ||
|
|
669d01dc81 | ||
|
|
00197eb308 | ||
|
|
eb5b55f92b | ||
|
|
75d2a1a972 | ||
|
|
d1918ae06d | ||
|
|
0d83edc1ec | ||
|
|
63aa29403a | ||
|
|
58e2836539 | ||
|
|
9f62aebf9e | ||
|
|
36c44e51a3 | ||
|
|
1e23061f97 | ||
|
|
5c97a01f4c | ||
|
|
73d9d6481a | ||
|
|
882bdb321b | ||
|
|
bc438f1ff6 | ||
|
|
731f329494 | ||
|
|
59f4b1b4d9 | ||
|
|
23f173e52a | ||
|
|
82bef02c23 | ||
|
|
97a7c8d848 | ||
|
|
5689ac9497 | ||
|
|
0e3b5e2607 | ||
|
|
5f694397b0 | ||
|
|
230d6f8d69 | ||
|
|
9af4d96e53 | ||
|
|
1fbdd76e9c | ||
|
|
5df877ff73 | ||
|
|
d694cfba74 | ||
|
|
6a5dfbf782 | ||
|
|
53ef551e4f | ||
|
|
922b6c2b54 | ||
|
|
f9b5a66373 | ||
|
|
bf3e3f6690 | ||
|
|
a6597a0230 | ||
|
|
6a7a5b8db3 | ||
|
|
121e15080f | ||
|
|
700b340dd7 | ||
|
|
4db088ee1d | ||
|
|
4ee9165204 | ||
|
|
384cbbf0ed | ||
|
|
6319f53250 | ||
|
|
90162372f1 | ||
|
|
b78d2aeee8 | ||
|
|
f6d550accf | ||
|
|
851bf1cf19 | ||
|
|
887c22f360 | ||
|
|
7663045ed1 | ||
|
|
9d6ddf22a1 | ||
|
|
830af112fb | ||
|
|
1e1e7006d7 | ||
|
|
2c4e993e12 | ||
|
|
293e7031e5 | ||
|
|
16b46fa962 | ||
|
|
2e8765e504 | ||
|
|
bd4adcd6a7 | ||
|
|
1eea2ed5a0 | ||
|
|
22021adac6 | ||
|
|
94a7e71593 | ||
|
|
d8f4efd6a7 | ||
|
|
5f058af0d4 | ||
|
|
d5c3f91e4f | ||
|
|
2454a45f11 | ||
|
|
11e9a8633c | ||
|
|
8d935afff6 | ||
|
|
1a64b945c7 | ||
|
|
5c4afa5cd7 | ||
|
|
30e666410a | ||
|
|
79f26589e2 | ||
|
|
a25b2202fc | ||
|
|
b0cf9ee63c | ||
|
|
bf01aec6d2 | ||
|
|
0bc5f94d3f | ||
|
|
60714be391 | ||
|
|
84b30a4439 | ||
|
|
b09af0f02d | ||
|
|
8d9cfb4322 | ||
|
|
171596ac44 | ||
|
|
72abc8580e | ||
|
|
3181ef3525 | ||
|
|
0f19b70423 | ||
|
|
b2d5c0d982 | ||
|
|
3aa0528d2d | ||
|
|
b1510f5069 | ||
|
|
26b4209f9e | ||
|
|
6a81a19574 | ||
|
|
9b280cb9eb | ||
|
|
a8e657fe18 | ||
|
|
a1989d02c0 | ||
|
|
192d3f16e8 | ||
|
|
5ba25ce8e7 | ||
|
|
e0e3f70a6a | ||
|
|
00505be2f4 | ||
|
|
7865985508 | ||
|
|
aad5dcd50c | ||
|
|
bfd0c68fea | ||
|
|
039173b639 | ||
|
|
a685496b73 | ||
|
|
70a0f3ace4 | ||
|
|
c46f711785 | ||
|
|
e009910eba | ||
|
|
cc85a5ccb1 | ||
|
|
5d3339898c | ||
|
|
a33e77742e | ||
|
|
52927ce523 | ||
|
|
0562a7aa21 | ||
|
|
edf4bafe61 | ||
|
|
4379cf59bf | ||
|
|
4bf5c9591e | ||
|
|
0f365ee01b | ||
|
|
7bb7f83876 | ||
|
|
a3df6e1897 | ||
|
|
a7ee9b5671 | ||
|
|
8384c71a16 | ||
|
|
f441a86388 | ||
|
|
b3eda5b9ef | ||
|
|
4cdeb40eff | ||
|
|
0404cd5587 | ||
|
|
477bb0c64f | ||
|
|
5fe9547ff2 | ||
|
|
a158f3a254 | ||
|
|
fca84082bb | ||
|
|
13bbe9a7a5 | ||
|
|
b54377da8a | ||
|
|
95ba4dcec5 | ||
|
|
ecbec0ee48 | ||
|
|
257c331b11 | ||
|
|
755abd7de8 | ||
|
|
fde116d259 | ||
|
|
5efb48cdbd | ||
|
|
6fb7687d72 | ||
|
|
87f3ec2ff6 | ||
|
|
e863a1db82 | ||
|
|
a04de69668 | ||
|
|
835eaf5242 | ||
|
|
5698f6e9fe | ||
|
|
f06b3eb712 | ||
|
|
c079b0c93c | ||
|
|
7f00113742 | ||
|
|
350215c938 | ||
|
|
72bb7d144c | ||
|
|
0094e0439d | ||
|
|
d8d42ec9ae | ||
|
|
572012d1d0 | ||
|
|
e94707e6f7 | ||
|
|
42f4ca1a1b | ||
|
|
e2a40d8065 | ||
|
|
4f0af8f8ef | ||
|
|
46c87e963b | ||
|
|
bdcff51bcd | ||
|
|
c74f66d8a8 | ||
|
|
668edea219 | ||
|
|
eecd87e1d6 | ||
|
|
403603b541 | ||
|
|
2343aecea7 | ||
|
|
92af52bd82 | ||
|
|
3bf8c4a5b1 | ||
|
|
349154eb9e | ||
|
|
9af0a8d4e8 | ||
|
|
0828ec2dcf | ||
|
|
6ed7f838d2 | ||
|
|
96bcd349ee | ||
|
|
7a804d48de | ||
|
|
d3968ee879 | ||
|
|
6ebb6d6ff0 | ||
|
|
6f9427157a | ||
|
|
389208da56 | ||
|
|
23b4d78ec9 | ||
|
|
8310364c65 | ||
|
|
ba3e479d87 | ||
|
|
fce0a347a5 | ||
|
|
50a3572006 | ||
|
|
b088f6d3f7 | ||
|
|
e318d06737 | ||
|
|
6e7fd7b51e | ||
|
|
328dc89332 | ||
|
|
2dd28e4382 | ||
|
|
35d985eb9d | ||
|
|
3cdfe0e7d8 | ||
|
|
a09587c2b5 | ||
|
|
288cb55597 | ||
|
|
3b51ccd7ad | ||
|
|
ab4e573594 | ||
|
|
a7e346c870 | ||
|
|
2b08e2aac8 | ||
|
|
8023f1c642 | ||
|
|
111cd60166 | ||
|
|
21463f5106 | ||
|
|
f3d8d7913a | ||
|
|
d4d2fef3c9 | ||
|
|
7bc630d3ab | ||
|
|
e0c695636b | ||
|
|
0f89ef6807 | ||
|
|
640237a54f | ||
|
|
1320289fca | ||
|
|
7efe77756d | ||
|
|
e4659e5593 | ||
|
|
417968e04c | ||
|
|
137ed0f38c | ||
|
|
5389c09931 | ||
|
|
fe093407d9 | ||
|
|
d904eedfd2 | ||
|
|
9be9fdc996 | ||
|
|
07669afbcd | ||
|
|
417fa19019 | ||
|
|
0358b6c09d | ||
|
|
be7fa2124e | ||
|
|
307a4909ec | ||
|
|
8d6433b152 | ||
|
|
c6f18448a3 | ||
|
|
3ac7cc25e5 | ||
|
|
ac99593e29 | ||
|
|
6d90ef499f | ||
|
|
756adb0cd0 | ||
|
|
1f983f3591 | ||
|
|
e502ef3cd3 | ||
|
|
77c2d0ca1b | ||
|
|
872623f59c | ||
|
|
7bdc8fe12f | ||
|
|
63f724880d | ||
|
|
c3d7b419c3 | ||
|
|
1fdbb0c222 | ||
|
|
9eeca316c6 | ||
|
|
01295bad92 | ||
|
|
5693118fda | ||
|
|
452b8520e9 | ||
|
|
48ddd11e5f | ||
|
|
b38c3f67c8 | ||
|
|
345991e1b0 | ||
|
|
bd416ac4fe | ||
|
|
fe5c42ed0d | ||
|
|
20748c9941 | ||
|
|
dfefa19051 | ||
|
|
6f9311fec0 | ||
|
|
fbbdd52ead | ||
|
|
c0f64ccf24 | ||
|
|
77fc48524e | ||
|
|
4bc2989cde | ||
|
|
dfd3abd54b | ||
|
|
23603e4676 | ||
|
|
7b1f6a9575 | ||
|
|
235e35af08 | ||
|
|
51cc7db7ab | ||
|
|
368bd7b245 | ||
|
|
2ffa342b0b | ||
|
|
c6e5453af8 | ||
|
|
42096eff80 | ||
|
|
838b4cd73a | ||
|
|
194d1838c1 | ||
|
|
36f3b1623b | ||
|
|
acec8bbaf4 | ||
|
|
6643e18255 | ||
|
|
60b0fa6093 | ||
|
|
f637a7bb51 | ||
|
|
7f896e7deb | ||
|
|
9aa0077b81 | ||
|
|
dc0ff6d8dc | ||
|
|
2a96f89959 | ||
|
|
3465d656a5 | ||
|
|
b52bf36006 | ||
|
|
33592c22b8 | ||
|
|
c0b065e0b8 | ||
|
|
f93bb26876 | ||
|
|
cbfaaaa0be | ||
|
|
1308f25aa9 | ||
|
|
7f5e654c33 | ||
|
|
fb3bf4008c | ||
|
|
b20291c13f | ||
|
|
4c7547dea9 | ||
|
|
930d7b04cf | ||
|
|
583e2e1938 | ||
|
|
7c37e2097a | ||
|
|
adf7fba26a | ||
|
|
000d7687f5 | ||
|
|
c7082159e6 | ||
|
|
9808eac403 | ||
|
|
e48f9fe7c9 | ||
|
|
475965240a | ||
|
|
e419c021e7 | ||
|
|
048f17ec98 | ||
|
|
f1cc7e73c6 | ||
|
|
f22cc3cf27 | ||
|
|
3e8c1f46ae | ||
|
|
d931073369 | ||
|
|
053a5b513e | ||
|
|
f165c09ec5 | ||
|
|
8794792c6f | ||
|
|
5899e0727b | ||
|
|
546b77db74 | ||
|
|
e4b80b3245 | ||
|
|
384ffd03cd | ||
|
|
88e9169c29 | ||
|
|
37f9ff1cc5 | ||
|
|
a4e94c084e | ||
|
|
cb1404e6b4 | ||
|
|
48236fc8f1 | ||
|
|
2ca8134be1 | ||
|
|
54b1c6ee03 | ||
|
|
49ae0c77d4 | ||
|
|
afbe3b7d25 | ||
|
|
a4d97f5a49 | ||
|
|
17e3245a63 | ||
|
|
e5cbfc9522 | ||
|
|
c252ec7740 | ||
|
|
32541b383e |
2
.github/README.md
vendored
2
.github/README.md
vendored
@@ -24,7 +24,7 @@ can be found on the [releases page](https://github.com/JetBrains/JetBrainsRuntim
|
||||
|
||||
| IDE Version | Latest JBR | Date Released |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------|---------------|
|
||||
| 2025.1 | [21.0.6-b872.80](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.6b872.80) | 03-Feb-2025 |
|
||||
| 2025.1 | [21.0.5-b792.48](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.5b792.48) | 20-Jan-2025 |
|
||||
| 2024.3 | [21.0.5-b631.28](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.5b631.28) | 26-Nov-2024 |
|
||||
| 2024.2 | [21.0.4-b509.30](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.5b509.30) | 26-Nov-2024 |
|
||||
| 2024.1 | [21.0.2-b346.3](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.2b346.3) | 30-Jan-2024 |
|
||||
|
||||
11
Makefile
11
Makefile
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -24,9 +24,8 @@
|
||||
#
|
||||
|
||||
###
|
||||
### This file is just a very small wrapper which will include make/PreInit.gmk,
|
||||
### where the real work is done. This wrapper also performs some sanity checks
|
||||
### on make that must be done before we can include another file.
|
||||
### This file is just a very small wrapper needed to run the real make/Init.gmk.
|
||||
### It also performs some sanity checks on make.
|
||||
###
|
||||
|
||||
# The shell code below will be executed on /usr/bin/make on Solaris, but not in GNU Make.
|
||||
@@ -61,5 +60,5 @@ else
|
||||
endif
|
||||
topdir := $(strip $(patsubst %/, %, $(dir $(makefile_path))))
|
||||
|
||||
# ... and then we can include the real makefile to bootstrap the build
|
||||
include $(topdir)/make/PreInit.gmk
|
||||
# ... and then we can include the real makefile
|
||||
include $(topdir)/make/Init.gmk
|
||||
|
||||
@@ -127,7 +127,7 @@ if [ "$VERBOSE" = true ] ; then
|
||||
echo "Will generate IDEA project files in \"$IDEA_OUTPUT\" for project \"$TOPLEVEL_DIR\""
|
||||
fi
|
||||
|
||||
cd $TOP ; make idea-gen-config ALLOW=TOPLEVEL_DIR,IDEA_OUTPUT_PARENT,IDEA_OUTPUT,MODULES TOPLEVEL_DIR="$TOPLEVEL_DIR" \
|
||||
cd $TOP ; make idea-gen-config MAKEOVERRIDES= TOPLEVEL_DIR="$TOPLEVEL_DIR" \
|
||||
IDEA_OUTPUT_PARENT="$IDEA_OUTPUT_PARENT" IDEA_OUTPUT="$IDEA_OUTPUT" MODULES="$*" $CONF_ARG || exit 1
|
||||
cd $SCRIPT_DIR
|
||||
|
||||
|
||||
@@ -217,10 +217,10 @@ file as the first include line. Declarations needed by other files
|
||||
should be put in the .hpp file, and not in the .inline.hpp file. This
|
||||
rule exists to resolve problems with circular dependencies between
|
||||
.inline.hpp files.</p></li>
|
||||
<li><p>Some build configurations use precompiled headers to speed up the
|
||||
build times. The precompiled headers are included in the precompiled.hpp
|
||||
file. Note that precompiled.hpp is just a build time optimization, so
|
||||
don't rely on it to resolve include problems.</p></li>
|
||||
<li><p>All .cpp files include precompiled.hpp as the first include
|
||||
line.</p></li>
|
||||
<li><p>precompiled.hpp is just a build time optimization, so don't rely
|
||||
on it to resolve include problems.</p></li>
|
||||
<li><p>Keep the include lines alphabetically sorted.</p></li>
|
||||
<li><p>Put conditional inclusions (<code>#if ...</code>) at the end of
|
||||
the include list.</p></li>
|
||||
|
||||
@@ -150,10 +150,10 @@ the first include line. Declarations needed by other files should be put
|
||||
in the .hpp file, and not in the .inline.hpp file. This rule exists to
|
||||
resolve problems with circular dependencies between .inline.hpp files.
|
||||
|
||||
* Some build configurations use precompiled headers to speed up the
|
||||
build times. The precompiled headers are included in the precompiled.hpp
|
||||
file. Note that precompiled.hpp is just a build time optimization, so
|
||||
don't rely on it to resolve include problems.
|
||||
* All .cpp files include precompiled.hpp as the first include line.
|
||||
|
||||
* precompiled.hpp is just a build time optimization, so don't rely on
|
||||
it to resolve include problems.
|
||||
|
||||
* Keep the include lines alphabetically sorted.
|
||||
|
||||
|
||||
@@ -55,10 +55,8 @@ done
|
||||
|
||||
log "Signing jmod files"
|
||||
JMODS_DIR="$APPLICATION_PATH/Contents/Home/jmods"
|
||||
JMOD_EXE="$BOOT_JDK/bin/jmod"
|
||||
JMOD_EXE="$APPLICATION_PATH/Contents/Home/bin/jmod"
|
||||
if [ -d "$JMODS_DIR" ]; then
|
||||
log "processing jmods"
|
||||
|
||||
for jmod_file in "$JMODS_DIR"/*.jmod; do
|
||||
log "Processing $jmod_file"
|
||||
|
||||
@@ -66,16 +64,16 @@ if [ -d "$JMODS_DIR" ]; then
|
||||
rm -rf "$TMP_DIR"
|
||||
mkdir "$TMP_DIR"
|
||||
|
||||
log "Unzipping $jmod_file"
|
||||
log "Unzipping $jmod_file"
|
||||
$JMOD_EXE extract --dir "$TMP_DIR" "$jmod_file" >/dev/null
|
||||
log "Removing $jmod_file"
|
||||
rm -f "$jmod_file"
|
||||
|
||||
log "Signing dylibs in $TMP_DIR"
|
||||
find "$TMP_DIR" \
|
||||
-type f \( -name "*.dylib" -o -name "*.so"-o -perm +111 -o -name jarsigner -o -name jnativescan -o -name jdeps -o -name jpackageapplauncher -o -name jspawnhelper -o -name jar -o -name javap -o -name jdeprscan -o -name jfr -o -name rmiregistry -o -name java -o -name jhsdb -o -name jstatd -o -name jstatd -o -name jpackage -o -name keytool -o -name jmod -o -name jlink -o -name jimage -o -name jstack -o -name jcmd -o -name jps -o -name jmap -o -name jstat -o -name jinfo -o -name jshell -o -name jwebserver -o -name javac -o -name serialver -o -name jrunscript -o -name jdb -o -name jconsole -o -name javadoc \) \
|
||||
-exec sh -c '"$1" --timestamp -v -s "$2" --options=runtime --force --entitlements "$3" "$4" || exit 1' sh "$SIGN_UTILITY" "$JB_DEVELOPER_CERT" "$SCRIPT_DIR/entitlements.xml" {} \;
|
||||
|
||||
log "Removing $jmod_file"
|
||||
rm -f "$jmod_file"
|
||||
cmd="$JMOD_EXE create --class-path $TMP_DIR/classes"
|
||||
|
||||
# Check each directory and add to the command if it exists
|
||||
@@ -86,8 +84,6 @@ if [ -d "$JMODS_DIR" ]; then
|
||||
[ -d "$TMP_DIR/legal" ] && cmd="$cmd --legal-notices $TMP_DIR/legal"
|
||||
[ -d "$TMP_DIR/man" ] && cmd="$cmd --man-pages $TMP_DIR/man"
|
||||
|
||||
log "Creating jmod file"
|
||||
log "$cmd"
|
||||
# Add the output file
|
||||
cmd="$cmd $jmod_file"
|
||||
|
||||
@@ -97,41 +93,6 @@ if [ -d "$JMODS_DIR" ]; then
|
||||
log "Removing $TMP_DIR"
|
||||
rm -rf "$TMP_DIR"
|
||||
done
|
||||
|
||||
log "Repack java.base.jmod with new hashes of modules"
|
||||
hash_modules=$($JMOD_EXE describe $JMODS_DIR/java.base.jmod | grep hashes | awk '{print $2}' | tr '\n' '|' | sed s/\|$//) || exit $?
|
||||
|
||||
TMP_DIR="$JMODS_DIR/tmp"
|
||||
rm -rf "$TMP_DIR"
|
||||
mkdir "$TMP_DIR"
|
||||
|
||||
jmod_file="$JMODS_DIR/java.base.jmod"
|
||||
log "Unzipping $jmod_file"
|
||||
$JMOD_EXE extract --dir "$TMP_DIR" "$jmod_file" >/dev/null
|
||||
|
||||
log "Removing java.base.jmod"
|
||||
rm -f "$jmod_file"
|
||||
|
||||
cmd="$JMOD_EXE create --class-path $TMP_DIR/classes --hash-modules \"$hash_modules\" --module-path $JMODS_DIR"
|
||||
|
||||
# Check each directory and add to the command if it exists
|
||||
[ -d "$TMP_DIR/bin" ] && cmd="$cmd --cmds $TMP_DIR/bin"
|
||||
[ -d "$TMP_DIR/conf" ] && cmd="$cmd --config $TMP_DIR/conf"
|
||||
[ -d "$TMP_DIR/lib" ] && cmd="$cmd --libs $TMP_DIR/lib"
|
||||
[ -d "$TMP_DIR/include" ] && cmd="$cmd --header-files $TMP_DIR/include"
|
||||
[ -d "$TMP_DIR/legal" ] && cmd="$cmd --legal-notices $TMP_DIR/legal"
|
||||
[ -d "$TMP_DIR/man" ] && cmd="$cmd --man-pages $TMP_DIR/man"
|
||||
|
||||
log "Creating jmod file"
|
||||
log "$cmd"
|
||||
# Add the output file
|
||||
cmd="$cmd $jmod_file"
|
||||
|
||||
# Execute the command
|
||||
eval $cmd
|
||||
|
||||
log "Removing $TMP_DIR"
|
||||
rm -rf "$TMP_DIR"
|
||||
else
|
||||
echo "Directory '$JMODS_DIR' does not exist. Skipping signing of jmod files."
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -108,7 +108,6 @@ help:
|
||||
$(info $(_) MICRO="OPT1=x;OPT2=y" # Control the MICRO test harness, use 'make test-only MICRO=help' to list)
|
||||
$(info $(_) TEST_OPTS="OPT1=x;..." # Generic control of all test harnesses)
|
||||
$(info $(_) TEST_VM_OPTS="ARG ..." # Same as setting TEST_OPTS to VM_OPTIONS="ARG ...")
|
||||
$(info $(_) ALLOW="FOO,BAR" # Do not warn that FOO and BAR are non-control variables)
|
||||
$(info )
|
||||
$(if $(all_confs), $(info Available configurations in $(build_dir):) $(foreach var,$(all_confs),$(info * $(var))), \
|
||||
$(info No configurations were found in $(build_dir).) $(info Run 'bash configure' to create a configuration.))
|
||||
|
||||
314
make/Init.gmk
314
make/Init.gmk
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -24,11 +24,9 @@
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# Init.gmk sits between PreInit.gmk and Main.gmk when bootstrapping the build.
|
||||
# It is called from PreInit.gmk, and its main responsibility is to launch
|
||||
# Main.gmk with the proper make and the proper make arguments.
|
||||
# PreMain.gmk has provided us with a proper SPEC. This allows us to use the
|
||||
# value of $(MAKE) for all further make calls.
|
||||
# This is the bootstrapping part of the build. This file is included from the
|
||||
# top level Makefile, and is responsible for launching the Main.gmk file with
|
||||
# the proper make and the proper make arguments.
|
||||
################################################################################
|
||||
|
||||
# This must be the first rule
|
||||
@@ -39,68 +37,249 @@ default:
|
||||
# serially, regardless of -j.
|
||||
.NOTPARALLEL:
|
||||
|
||||
include $(SPEC)
|
||||
ifeq ($(HAS_SPEC), )
|
||||
##############################################################################
|
||||
# This is the default mode. We have not been recursively called with a SPEC.
|
||||
##############################################################################
|
||||
|
||||
include $(TOPDIR)/make/common/MakeBase.gmk
|
||||
# Include our helper functions.
|
||||
include $(topdir)/make/InitSupport.gmk
|
||||
|
||||
# Our helper functions.
|
||||
include $(TOPDIR)/make/InitSupport.gmk
|
||||
include $(TOPDIR)/make/common/LogUtils.gmk
|
||||
# Here are "global" targets, i.e. targets that can be executed without having
|
||||
# a configuration. This will define ALL_GLOBAL_TARGETS.
|
||||
include $(topdir)/make/Global.gmk
|
||||
|
||||
# Parse COMPARE_BUILD (for makefile development)
|
||||
$(eval $(call ParseCompareBuild))
|
||||
# Targets provided by Init.gmk.
|
||||
ALL_INIT_TARGETS := print-modules print-targets print-configuration \
|
||||
print-tests reconfigure pre-compare-build post-compare-build
|
||||
|
||||
# Setup reproducible build environment
|
||||
$(eval $(call SetupReproducibleBuild))
|
||||
# CALLED_TARGETS is the list of targets that the user provided,
|
||||
# or "default" if unspecified.
|
||||
CALLED_TARGETS := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), default)
|
||||
|
||||
# If no LOG= was given on command line, but we have a non-standard default
|
||||
# value, use that instead and re-parse log level.
|
||||
ifeq ($(LOG), )
|
||||
ifneq ($(DEFAULT_LOG), )
|
||||
override LOG := $(DEFAULT_LOG)
|
||||
$(eval $(call ParseLogLevel))
|
||||
# Extract non-global targets that require a spec file.
|
||||
CALLED_SPEC_TARGETS := $(filter-out $(ALL_GLOBAL_TARGETS), $(CALLED_TARGETS))
|
||||
|
||||
# If we have only global targets, or if we are called with -qp (assuming an
|
||||
# external part, e.g. bash completion, is trying to understand our targets),
|
||||
# we will skip SPEC location and the sanity checks.
|
||||
ifeq ($(CALLED_SPEC_TARGETS), )
|
||||
ONLY_GLOBAL_TARGETS := true
|
||||
endif
|
||||
ifeq ($(findstring p, $(MAKEFLAGS))$(findstring q, $(MAKEFLAGS)), pq)
|
||||
ONLY_GLOBAL_TARGETS := true
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(LOG_NOFILE), true)
|
||||
# Disable build log if LOG=[level,]nofile was given
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
ifeq ($(ONLY_GLOBAL_TARGETS), true)
|
||||
############################################################################
|
||||
# We have only global targets, or are called with -pq.
|
||||
############################################################################
|
||||
|
||||
ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean)
|
||||
# We can't have a log file if we're about to remove it.
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
ifeq ($(wildcard $(SPEC)), )
|
||||
# If we have no SPEC provided, we will just make a "best effort" target list.
|
||||
# First try to grab any available pre-existing main-targets.gmk.
|
||||
main_targets_file := $(firstword $(wildcard $(build_dir)/*/make-support/main-targets.gmk))
|
||||
ifneq ($(main_targets_file), )
|
||||
# Extract the SPEC that corresponds to this main-targets.gmk file.
|
||||
SPEC := $(patsubst %/make-support/main-targets.gmk, %/spec.gmk, $(main_targets_file))
|
||||
else
|
||||
# None found, pick an arbitrary SPEC for which to generate a file
|
||||
SPEC := $(firstword $(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(OUTPUT_SYNC_SUPPORTED), true)
|
||||
OUTPUT_SYNC_FLAG := -O$(OUTPUT_SYNC)
|
||||
endif
|
||||
ifneq ($(wildcard $(SPEC)), )
|
||||
$(eval $(call DefineMainTargets, LAZY, $(SPEC)))
|
||||
else
|
||||
# If we have no configurations we can not provide any main targets.
|
||||
ALL_MAIN_TARGETS :=
|
||||
endif
|
||||
|
||||
##############################################################################
|
||||
# Init targets. These are handled fully, here and now.
|
||||
##############################################################################
|
||||
ALL_TARGETS := $(sort $(ALL_GLOBAL_TARGETS) $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS))
|
||||
|
||||
print-modules:
|
||||
# Just list all our targets.
|
||||
$(ALL_TARGETS):
|
||||
|
||||
.PHONY: $(ALL_TARGETS)
|
||||
|
||||
else
|
||||
############################################################################
|
||||
# This is the normal case, we have been called from the command line by the
|
||||
# user and we need to call ourself back with a proper SPEC.
|
||||
# We have at least one non-global target, so we need to find a spec file.
|
||||
############################################################################
|
||||
|
||||
# Basic checks on environment and command line.
|
||||
$(eval $(call CheckControlVariables))
|
||||
$(eval $(call CheckDeprecatedEnvironment))
|
||||
$(eval $(call CheckInvalidMakeFlags))
|
||||
|
||||
# Check that CONF_CHECK is valid.
|
||||
$(eval $(call ParseConfCheckOption))
|
||||
|
||||
# Check that the LOG given is valid, and set LOG_LEVEL, LOG_NOFILE, MAKE_LOG_VARS and MAKE_LOG_FLAGS.
|
||||
$(eval $(call ParseLogLevel))
|
||||
|
||||
# After this SPECS contain 1..N spec files (otherwise ParseConfAndSpec fails).
|
||||
$(eval $(call ParseConfAndSpec))
|
||||
|
||||
# Extract main targets from Main.gmk using the spec(s) provided. In theory,
|
||||
# with multiple specs, we should find the intersection of targets provided
|
||||
# by all specs, but we approximate this by an arbitrary spec from the list.
|
||||
# This will setup ALL_MAIN_TARGETS.
|
||||
$(eval $(call DefineMainTargets, FORCE, $(firstword $(SPECS))))
|
||||
|
||||
# Separate called targets depending on type.
|
||||
INIT_TARGETS := $(filter $(ALL_INIT_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
MAIN_TARGETS := $(filter $(ALL_MAIN_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
SEQUENTIAL_TARGETS := $(filter dist-clean clean%, $(MAIN_TARGETS))
|
||||
PARALLEL_TARGETS := $(filter-out $(SEQUENTIAL_TARGETS), $(MAIN_TARGETS))
|
||||
|
||||
# The spec files depend on the autoconf source code. This check makes sure
|
||||
# the configuration is up to date after changes to configure.
|
||||
$(SPECS): $(wildcard $(topdir)/make/autoconf/*) \
|
||||
$(if $(CUSTOM_CONFIG_DIR), $(wildcard $(CUSTOM_CONFIG_DIR)/*)) \
|
||||
$(addprefix $(topdir)/make/conf/, version-numbers.conf branding.conf) \
|
||||
$(if $(CUSTOM_CONF_DIR), $(wildcard $(addprefix $(CUSTOM_CONF_DIR)/, \
|
||||
version-numbers.conf branding.conf)))
|
||||
ifeq ($(CONF_CHECK), fail)
|
||||
@echo Error: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
$(call PrintConfCheckFailed)
|
||||
@exit 2
|
||||
else ifeq ($(CONF_CHECK), auto)
|
||||
@echo Note: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
@( cd $(topdir) && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$@ HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
reconfigure )
|
||||
else ifeq ($(CONF_CHECK), ignore)
|
||||
# Do nothing
|
||||
endif
|
||||
|
||||
# Do not let make delete spec files even if aborted while doing a reconfigure
|
||||
.PRECIOUS: $(SPECS)
|
||||
|
||||
# Unless reconfigure is explicitly called, let all main targets depend on
|
||||
# the spec files to be up to date.
|
||||
ifeq ($(findstring reconfigure, $(INIT_TARGETS)), )
|
||||
$(MAIN_TARGETS): $(SPECS)
|
||||
endif
|
||||
|
||||
make-info:
|
||||
ifneq ($(findstring $(LOG_LEVEL), info debug trace), )
|
||||
$(info Running make as '$(strip $(MAKE) $(MFLAGS) \
|
||||
$(COMMAND_LINE_VARIABLES) $(MAKECMDGOALS))')
|
||||
endif
|
||||
|
||||
MAKE_INIT_WITH_SPEC_ARGUMENTS := ACTUAL_TOPDIR=$(topdir) \
|
||||
USER_MAKE_VARS="$(USER_MAKE_VARS)" MAKE_LOG_FLAGS=$(MAKE_LOG_FLAGS) \
|
||||
$(MAKE_LOG_VARS) \
|
||||
INIT_TARGETS="$(INIT_TARGETS)" \
|
||||
SEQUENTIAL_TARGETS="$(SEQUENTIAL_TARGETS)" \
|
||||
PARALLEL_TARGETS="$(PARALLEL_TARGETS)"
|
||||
|
||||
# Now the init and main targets will be called, once for each SPEC. The
|
||||
# recipe will be run once for every target specified, but we only want to
|
||||
# execute the recipe a single time, hence the TARGET_DONE with a dummy
|
||||
# command if true.
|
||||
# The COMPARE_BUILD part implements special support for makefile development.
|
||||
$(ALL_INIT_TARGETS) $(ALL_MAIN_TARGETS): make-info
|
||||
@$(if $(TARGET_DONE), \
|
||||
true \
|
||||
, \
|
||||
( cd $(topdir) && \
|
||||
$(foreach spec, $(SPECS), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
main && \
|
||||
$(if $(and $(COMPARE_BUILD), $(PARALLEL_TARGETS)), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD)" pre-compare-build && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" main && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" post-compare-build && \
|
||||
) \
|
||||
) true ) \
|
||||
$(eval TARGET_DONE=true) \
|
||||
)
|
||||
|
||||
.PHONY: $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS)
|
||||
|
||||
endif # $(ONLY_GLOBAL_TARGETS)!=true
|
||||
|
||||
else # HAS_SPEC=true
|
||||
|
||||
##############################################################################
|
||||
# Now we have a spec. This part provides the "main" target that acts as a
|
||||
# trampoline to call the Main.gmk with the value of $(MAKE) found in the spec
|
||||
# file.
|
||||
##############################################################################
|
||||
|
||||
include $(SPEC)
|
||||
|
||||
# Our helper functions.
|
||||
include $(TOPDIR)/make/InitSupport.gmk
|
||||
|
||||
# Parse COMPARE_BUILD (for makefile development)
|
||||
$(eval $(call ParseCompareBuild))
|
||||
|
||||
# Setup reproducible build environment
|
||||
$(eval $(call SetupReproducibleBuild))
|
||||
|
||||
# If no LOG= was given on command line, but we have a non-standard default
|
||||
# value, use that instead and re-parse log level.
|
||||
ifeq ($(LOG), )
|
||||
ifneq ($(DEFAULT_LOG), )
|
||||
override LOG := $(DEFAULT_LOG)
|
||||
$(eval $(call ParseLogLevel))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(LOG_NOFILE), true)
|
||||
# Disable build log if LOG=[level,]nofile was given
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
|
||||
ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean)
|
||||
# We can't have a log file if we're about to remove it.
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
|
||||
ifeq ($(OUTPUT_SYNC_SUPPORTED), true)
|
||||
OUTPUT_SYNC_FLAG := -O$(OUTPUT_SYNC)
|
||||
endif
|
||||
|
||||
##############################################################################
|
||||
# Init targets
|
||||
##############################################################################
|
||||
|
||||
print-modules:
|
||||
( cd $(TOPDIR) && \
|
||||
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
NO_RECIPES=true print-modules )
|
||||
|
||||
print-targets:
|
||||
print-targets:
|
||||
( cd $(TOPDIR) && \
|
||||
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
NO_RECIPES=true print-targets )
|
||||
|
||||
print-tests:
|
||||
print-tests:
|
||||
( cd $(TOPDIR) && \
|
||||
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
NO_RECIPES=true print-tests )
|
||||
|
||||
print-configuration:
|
||||
$(ECHO) $(CONFIGURE_COMMAND_LINE)
|
||||
print-configuration:
|
||||
$(ECHO) $(CONFIGURE_COMMAND_LINE)
|
||||
|
||||
reconfigure:
|
||||
reconfigure:
|
||||
ifneq ($(REAL_CONFIGURE_COMMAND_EXEC_FULL), )
|
||||
$(ECHO) "Re-running configure using original command line '$(REAL_CONFIGURE_COMMAND_EXEC_SHORT) $(REAL_CONFIGURE_COMMAND_LINE)'"
|
||||
$(eval RECONFIGURE_COMMAND := $(REAL_CONFIGURE_COMMAND_EXEC_FULL) $(REAL_CONFIGURE_COMMAND_LINE))
|
||||
@@ -116,27 +295,25 @@ reconfigure:
|
||||
CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
|
||||
$(RECONFIGURE_COMMAND) )
|
||||
|
||||
.PHONY: print-modules print-targets print-tests print-configuration reconfigure
|
||||
##############################################################################
|
||||
# The main target, for delegating into Main.gmk
|
||||
##############################################################################
|
||||
|
||||
##############################################################################
|
||||
# The main target. This will delegate all other targets into Main.gmk.
|
||||
##############################################################################
|
||||
MAIN_TARGETS := $(SEQUENTIAL_TARGETS) $(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE)
|
||||
# If building the default target, add what they are to the description.
|
||||
DESCRIPTION_TARGETS := $(strip $(MAIN_TARGETS))
|
||||
ifeq ($(DESCRIPTION_TARGETS), default)
|
||||
DESCRIPTION_TARGETS += ($(DEFAULT_MAKE_TARGET))
|
||||
endif
|
||||
TARGET_DESCRIPTION := target$(if $(word 2, $(MAIN_TARGETS)),s) \
|
||||
'$(strip $(DESCRIPTION_TARGETS))' in configuration '$(CONF_NAME)'
|
||||
|
||||
MAIN_TARGETS := $(SEQUENTIAL_TARGETS) $(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE)
|
||||
# If building the default target, add what they are to the description.
|
||||
DESCRIPTION_TARGETS := $(strip $(MAIN_TARGETS))
|
||||
ifeq ($(DESCRIPTION_TARGETS), default)
|
||||
DESCRIPTION_TARGETS += ($(DEFAULT_MAKE_TARGET))
|
||||
endif
|
||||
TARGET_DESCRIPTION := target$(if $(word 2, $(MAIN_TARGETS)),s) \
|
||||
'$(strip $(DESCRIPTION_TARGETS))' in configuration '$(CONF_NAME)'
|
||||
# MAKEOVERRIDES is automatically set and propagated by Make to sub-Make calls.
|
||||
# We need to clear it of the init-specific variables. The user-specified
|
||||
# variables are explicitly propagated using $(USER_MAKE_VARS).
|
||||
main: MAKEOVERRIDES :=
|
||||
|
||||
# MAKEOVERRIDES is automatically set and propagated by Make to sub-Make calls.
|
||||
# We need to clear it of the init-specific variables. The user-specified
|
||||
# variables are explicitly propagated using $(USER_MAKE_VARS).
|
||||
main: MAKEOVERRIDES :=
|
||||
|
||||
main: $(INIT_TARGETS)
|
||||
main: $(INIT_TARGETS)
|
||||
ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), )
|
||||
$(call RotateLogFiles)
|
||||
$(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE)
|
||||
@@ -156,7 +333,7 @@ main: $(INIT_TARGETS)
|
||||
# treat it as NOT using jobs at all.
|
||||
( cd $(TOPDIR) && \
|
||||
$(NICE) $(MAKE) $(MAKE_ARGS) $(OUTPUT_SYNC_FLAG) \
|
||||
$(if $(JOBS), -j $(JOBS)) \
|
||||
$(if $(JOBS), -j $(JOBS)) \
|
||||
-f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
$(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \
|
||||
( exitcode=$$? && \
|
||||
@@ -176,7 +353,7 @@ main: $(INIT_TARGETS)
|
||||
$(call ReportProfileTimes)
|
||||
endif
|
||||
|
||||
on-failure:
|
||||
on-failure:
|
||||
$(call CleanupJavacServer)
|
||||
$(call StopGlobalTimer)
|
||||
$(call ReportBuildTimes)
|
||||
@@ -188,14 +365,15 @@ on-failure:
|
||||
$(call CleanupCompareBuild)
|
||||
endif
|
||||
|
||||
# Support targets for COMPARE_BUILD, used for makefile development
|
||||
pre-compare-build:
|
||||
# Support targets for COMPARE_BUILD, used for makefile development
|
||||
pre-compare-build:
|
||||
$(call WaitForJavacServerFinish)
|
||||
$(call PrepareCompareBuild)
|
||||
|
||||
post-compare-build:
|
||||
post-compare-build:
|
||||
$(call WaitForJavacServerFinish)
|
||||
$(call CleanupCompareBuild)
|
||||
$(call CompareBuildDoComparison)
|
||||
|
||||
.PHONY: main on-failure pre-compare-build post-compare-build
|
||||
.PHONY: print-targets print-modules reconfigure main on-failure
|
||||
endif
|
||||
|
||||
@@ -25,108 +25,389 @@
|
||||
|
||||
################################################################################
|
||||
# This file contains helper functions for Init.gmk.
|
||||
# It is divided in two parts, depending on if a SPEC is present or not
|
||||
# (HAS_SPEC is true or not).
|
||||
################################################################################
|
||||
|
||||
# Define basic logging setup
|
||||
BUILD_LOG := $(OUTPUTDIR)/build.log
|
||||
BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log
|
||||
ifndef _INITSUPPORT_GMK
|
||||
_INITSUPPORT_GMK := 1
|
||||
|
||||
BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait
|
||||
# Use this for simple echo/printf commands that are never expected to print
|
||||
# to stderr.
|
||||
BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG)
|
||||
ifeq ($(HAS_SPEC), )
|
||||
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
topdir = $(CUSTOM_ROOT)
|
||||
else
|
||||
topdir = $(TOPDIR)
|
||||
endif
|
||||
# COMMA is defined in spec.gmk, but that is not included yet
|
||||
COMMA := ,
|
||||
|
||||
# Setup the build environment to match the requested specification on
|
||||
# level of reproducible builds
|
||||
define SetupReproducibleBuild
|
||||
ifeq ($$(SOURCE_DATE), updated)
|
||||
# For static values of SOURCE_DATE (not "updated"), these are set in spec.gmk
|
||||
export SOURCE_DATE_EPOCH := $$(shell $$(DATE) +"%s")
|
||||
export SOURCE_DATE_ISO_8601 := $$(call EpochToISO8601, $$(SOURCE_DATE_EPOCH))
|
||||
# Include the corresponding closed file, if present.
|
||||
ifneq ($(CUSTOM_MAKE_DIR), )
|
||||
-include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
|
||||
endif
|
||||
endef
|
||||
|
||||
# Parse COMPARE_BUILD into COMPARE_BUILD_*
|
||||
# Syntax: COMPARE_BUILD=CONF=<configure options>:PATCH=<patch file>:
|
||||
# MAKE=<make targets>:COMP_OPTS=<compare script options>:
|
||||
# COMP_DIR=<compare script base dir>|<default>:
|
||||
# FAIL=<bool>
|
||||
# If neither CONF or PATCH is given, assume <default> means CONF if it
|
||||
# begins with "--", otherwise assume it means PATCH.
|
||||
# MAKE and COMP_OPTS can only be used with CONF and/or PATCH specified.
|
||||
# If any value contains "+", it will be replaced by space.
|
||||
# FAIL can be set to false to have the return value of compare be ignored.
|
||||
define ParseCompareBuild
|
||||
ifneq ($$(COMPARE_BUILD), )
|
||||
COMPARE_BUILD_OUTPUTDIR := $(topdir)/build/compare-build/$(CONF_NAME)
|
||||
COMPARE_BUILD_FAIL := true
|
||||
##############################################################################
|
||||
# Helper functions for the initial part of Init.gmk, before the spec file is
|
||||
# loaded. Most of these functions provide parsing and setting up make options
|
||||
# from the command-line.
|
||||
##############################################################################
|
||||
|
||||
ifneq ($$(findstring :, $$(COMPARE_BUILD)), )
|
||||
$$(foreach part, $$(subst :, , $$(COMPARE_BUILD)), \
|
||||
$$(if $$(filter PATCH=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(part)))) \
|
||||
) \
|
||||
$$(if $$(filter CONF=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter MAKE=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_MAKE = $$(strip $$(subst +, , $$(patsubst MAKE=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_OPTS=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_OPTS = $$(strip $$(subst +, , $$(patsubst COMP_OPTS=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_DIR=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_DIR = $$(strip $$(subst +, , $$(patsubst COMP_DIR=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter FAIL=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_FAIL = $$(strip $$(subst +, , $$(patsubst FAIL=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter NODRYRUN=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_NODRYRUN = $$(strip $$(subst +, , $$(patsubst NODRYRUN=%, %, $$(part))))) \
|
||||
) \
|
||||
)
|
||||
else
|
||||
# Separate handling for single field case, to allow for spaces in values.
|
||||
ifneq ($$(filter PATCH=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(COMPARE_BUILD)))
|
||||
else ifneq ($$(filter CONF=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(COMPARE_BUILD))))
|
||||
else ifneq ($$(filter --%, $$(COMPARE_BUILD)), )
|
||||
# Assume CONF if value begins with --
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(COMPARE_BUILD)))
|
||||
# Make control variables, handled by Init.gmk
|
||||
INIT_CONTROL_VARIABLES += LOG CONF CONF_NAME SPEC JOBS TEST_JOBS CONF_CHECK \
|
||||
COMPARE_BUILD JTREG GTEST MICRO TEST_OPTS TEST_VM_OPTS TEST_DEPS
|
||||
|
||||
# All known make control variables
|
||||
MAKE_CONTROL_VARIABLES := $(INIT_CONTROL_VARIABLES) TEST JDK_FILTER SPEC_FILTER
|
||||
|
||||
# Define a simple reverse function.
|
||||
# Should maybe move to MakeBase.gmk, but we can't include that file now.
|
||||
reverse = \
|
||||
$(if $(strip $(1)), $(call reverse, $(wordlist 2, $(words $(1)), $(1)))) \
|
||||
$(firstword $(1))
|
||||
|
||||
# The variable MAKEOVERRIDES contains variable assignments from the command
|
||||
# line, but in reverse order to what the user entered.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
COMMAND_LINE_VARIABLES := $(subst §,\ , $(call reverse, $(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# A list like FOO="val1" BAR="val2" containing all user-supplied make
|
||||
# variables that we should propagate.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
USER_MAKE_VARS := $(subst §,\ , $(filter-out $(addsuffix =%, $(INIT_CONTROL_VARIABLES)), \
|
||||
$(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# Setup information about available configurations, if any.
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
build_dir = $(CUSTOM_ROOT)/build
|
||||
else
|
||||
build_dir = $(topdir)/build
|
||||
endif
|
||||
all_spec_files = $(wildcard $(build_dir)/*/spec.gmk)
|
||||
# Extract the configuration names from the path
|
||||
all_confs = $(patsubst %/spec.gmk, %, $(patsubst $(build_dir)/%, %, $(all_spec_files)))
|
||||
|
||||
# Check for unknown command-line variables
|
||||
define CheckControlVariables
|
||||
command_line_variables := $$(strip $$(foreach var, \
|
||||
$$(subst \ ,_,$$(MAKEOVERRIDES)), \
|
||||
$$(firstword $$(subst =, , $$(var)))))
|
||||
unknown_command_line_variables := $$(strip \
|
||||
$$(filter-out $$(MAKE_CONTROL_VARIABLES), $$(command_line_variables)))
|
||||
ifneq ($$(unknown_command_line_variables), )
|
||||
$$(info Note: Command line contains non-control variables:)
|
||||
$$(foreach var, $$(unknown_command_line_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info Make sure it is not mistyped, and that you intend to override this variable.)
|
||||
$$(info 'make help' will list known control variables.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for deprecated ALT_ variables
|
||||
define CheckDeprecatedEnvironment
|
||||
defined_alt_variables := $$(filter ALT_%, $$(.VARIABLES))
|
||||
ifneq ($$(defined_alt_variables), )
|
||||
$$(info Warning: You have the following ALT_ variables set:)
|
||||
$$(foreach var, $$(defined_alt_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info ALT_ variables are deprecated, and may result in a failed build.)
|
||||
$$(info Please clean your environment.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for invalid make flags like -j
|
||||
define CheckInvalidMakeFlags
|
||||
# This is a trick to get this rule to execute before any other rules
|
||||
# MAKEFLAGS only indicate -j if read in a recipe (!)
|
||||
$$(topdir)/make/Init.gmk: .FORCE
|
||||
$$(if $$(findstring --jobserver, $$(MAKEFLAGS)), \
|
||||
$$(info Error: 'make -jN' is not supported, use 'make JOBS=N') \
|
||||
$$(error Cannot continue) \
|
||||
)
|
||||
.FORCE:
|
||||
.PHONY: .FORCE
|
||||
endef
|
||||
|
||||
# Check that the CONF_CHECK option is valid and set up handling
|
||||
define ParseConfCheckOption
|
||||
ifeq ($$(CONF_CHECK), )
|
||||
# Default behavior is fail
|
||||
CONF_CHECK := fail
|
||||
else ifneq ($$(filter-out auto fail ignore, $$(CONF_CHECK)), )
|
||||
$$(info Error: CONF_CHECK must be one of: auto, fail or ignore.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
define ParseConfAndSpec
|
||||
ifneq ($$(origin SPEC), undefined)
|
||||
# We have been given a SPEC, check that it works out properly
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
# We also have a CONF_NAME argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF_NAME=$$(CONF_NAME) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(wildcard $$(SPEC)), )
|
||||
$$(info Error: Cannot locate spec.gmk, given by SPEC=$$(SPEC).)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(filter /%, $$(SPEC)), )
|
||||
# If given with relative path, make it absolute
|
||||
SPECS := $$(CURDIR)/$$(strip $$(SPEC))
|
||||
else
|
||||
# Otherwise assume patch file
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(COMPARE_BUILD))
|
||||
SPECS := $$(SPEC)
|
||||
endif
|
||||
|
||||
# For now, unset this SPEC variable.
|
||||
override SPEC :=
|
||||
else
|
||||
# Use spec.gmk files in the build output directory
|
||||
ifeq ($$(all_spec_files), )
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
$$(info Error: No configurations found for $$(CUSTOM_ROOT).)
|
||||
else
|
||||
$$(info Error: No configurations found for $$(topdir).)
|
||||
endif
|
||||
$$(info Please run 'bash configure' to create a configuration.)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and CONF_NAME=$$(CONF_NAME) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
matching_conf := $$(strip $$(filter $$(CONF_NAME), $$(all_confs)))
|
||||
ifeq ($$(matching_conf), )
|
||||
$$(info Error: No configurations found matching CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else ifneq ($$(words $$(matching_conf)), 1)
|
||||
$$(info Error: Matching more than one configuration CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
$$(info Building configuration '$$(matching_conf)' (matching CONF_NAME=$$(CONF_NAME)))
|
||||
endif
|
||||
# Create a SPEC definition. This will contain the path to exactly one spec file.
|
||||
SPECS := $$(build_dir)/$$(matching_conf)/spec.gmk
|
||||
else ifneq ($$(origin CONF), undefined)
|
||||
# User have given a CONF= argument.
|
||||
ifeq ($$(CONF), )
|
||||
# If given CONF=, match all configurations
|
||||
matching_confs := $$(strip $$(all_confs))
|
||||
else
|
||||
# Otherwise select those that contain the given CONF string
|
||||
ifeq ($$(patsubst !%,,$$(CONF)), )
|
||||
# A CONF starting with ! means we should negate the search term
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(subst !,,$$(CONF)), $$(var)), ,$$(var))))
|
||||
else
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(CONF), $$(var)), $$(var))))
|
||||
endif
|
||||
ifneq ($$(filter $$(CONF), $$(matching_confs)), )
|
||||
ifneq ($$(word 2, $$(matching_confs)), )
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
$$(info Using exact match for CONF=$$(CONF) (other matches are possible))
|
||||
endif
|
||||
endif
|
||||
# If we found an exact match, use that
|
||||
matching_confs := $$(CONF)
|
||||
endif
|
||||
endif
|
||||
ifeq ($$(matching_confs), )
|
||||
$$(info Error: No configurations found matching CONF=$$(CONF).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
ifeq ($$(words $$(matching_confs)), 1)
|
||||
ifneq ($$(findstring $$(LOG_LEVEL), info debug trace), )
|
||||
$$(info Building configuration '$$(matching_confs)' (matching CONF=$$(CONF)))
|
||||
endif
|
||||
else
|
||||
$$(info Building these configurations (matching CONF=$$(CONF)):)
|
||||
$$(foreach var, $$(matching_confs), $$(info * $$(var)))
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a SPEC definition. This will contain the path to one or more spec.gmk files.
|
||||
SPECS := $$(addsuffix /spec.gmk, $$(addprefix $$(build_dir)/, $$(matching_confs)))
|
||||
else
|
||||
# No CONF or SPEC given, check the available configurations
|
||||
ifneq ($$(words $$(all_spec_files)), 1)
|
||||
$$(info Error: No CONF given, but more than one configuration found.)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(info Please retry building with CONF=<config pattern> (or SPEC=<spec file>).)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# We found exactly one configuration, use it
|
||||
SPECS := $$(strip $$(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_PATCH), )
|
||||
ifneq ($$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH)), )
|
||||
# Assume relative path, if file exists
|
||||
COMPARE_BUILD_PATCH := $$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH))
|
||||
else ifeq ($$(wildcard $$(COMPARE_BUILD_PATCH)), )
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not exist)
|
||||
endef
|
||||
|
||||
# Extract main targets from Main.gmk using the spec provided in $2.
|
||||
#
|
||||
# Param 1: FORCE = force generation of main-targets.gmk or LAZY = do not force.
|
||||
# Param 2: The SPEC file to use.
|
||||
define DefineMainTargets
|
||||
|
||||
# We will start by making sure the main-targets.gmk file is removed, if
|
||||
# make has not been restarted. By the -include, we will trigger the
|
||||
# rule for generating the file (which is never there since we removed it),
|
||||
# thus generating it fresh, and make will restart, incrementing the restart
|
||||
# count.
|
||||
main_targets_file := $$(dir $(strip $2))make-support/main-targets.gmk
|
||||
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
# Only do this if make has not been restarted, and if we do not force it.
|
||||
ifeq ($(strip $1), FORCE)
|
||||
$$(shell rm -f $$(main_targets_file))
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_NODRYRUN), true)
|
||||
PATCH_DRY_RUN := $$(shell cd $$(topdir) && $$(PATCH) --dry-run -p1 < $$(COMPARE_BUILD_PATCH) > /dev/null 2>&1 || $$(ECHO) FAILED)
|
||||
ifeq ($$(PATCH_DRY_RUN), FAILED)
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not apply cleanly)
|
||||
endif
|
||||
|
||||
$$(main_targets_file):
|
||||
@( cd $$(topdir) && \
|
||||
$$(MAKE) $$(MAKE_LOG_FLAGS) -r -R -f $$(topdir)/make/Main.gmk \
|
||||
-I $$(topdir)/make/common SPEC=$(strip $2) NO_RECIPES=true \
|
||||
$$(MAKE_LOG_VARS) \
|
||||
create-main-targets-include )
|
||||
|
||||
# Now include main-targets.gmk. This will define ALL_MAIN_TARGETS.
|
||||
-include $$(main_targets_file)
|
||||
endef
|
||||
|
||||
define PrintConfCheckFailed
|
||||
@echo ' '
|
||||
@echo "Please rerun configure! Easiest way to do this is by running"
|
||||
@echo "'make reconfigure'."
|
||||
@echo "This behavior may also be changed using CONF_CHECK=<ignore|auto>."
|
||||
@echo ' '
|
||||
endef
|
||||
|
||||
else # $(HAS_SPEC)=true
|
||||
##############################################################################
|
||||
# Helper functions for the 'main' target. These functions assume a single,
|
||||
# proper and existing SPEC is included.
|
||||
##############################################################################
|
||||
|
||||
include $(TOPDIR)/make/common/MakeBase.gmk
|
||||
|
||||
# Define basic logging setup
|
||||
BUILD_LOG := $(OUTPUTDIR)/build.log
|
||||
BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log
|
||||
|
||||
BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait
|
||||
# Use this for simple echo/printf commands that are never expected to print
|
||||
# to stderr.
|
||||
BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG)
|
||||
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
topdir = $(CUSTOM_ROOT)
|
||||
else
|
||||
topdir = $(TOPDIR)
|
||||
endif
|
||||
|
||||
# Setup the build environment to match the requested specification on
|
||||
# level of reproducible builds
|
||||
define SetupReproducibleBuild
|
||||
ifeq ($$(SOURCE_DATE), updated)
|
||||
# For static values of SOURCE_DATE (not "updated"), these are set in spec.gmk
|
||||
export SOURCE_DATE_EPOCH := $$(shell $$(DATE) +"%s")
|
||||
export SOURCE_DATE_ISO_8601 := $$(call EpochToISO8601, $$(SOURCE_DATE_EPOCH))
|
||||
endif
|
||||
endef
|
||||
|
||||
# Parse COMPARE_BUILD into COMPARE_BUILD_*
|
||||
# Syntax: COMPARE_BUILD=CONF=<configure options>:PATCH=<patch file>:
|
||||
# MAKE=<make targets>:COMP_OPTS=<compare script options>:
|
||||
# COMP_DIR=<compare script base dir>|<default>:
|
||||
# FAIL=<bool>
|
||||
# If neither CONF or PATCH is given, assume <default> means CONF if it
|
||||
# begins with "--", otherwise assume it means PATCH.
|
||||
# MAKE and COMP_OPTS can only be used with CONF and/or PATCH specified.
|
||||
# If any value contains "+", it will be replaced by space.
|
||||
# FAIL can be set to false to have the return value of compare be ignored.
|
||||
define ParseCompareBuild
|
||||
ifneq ($$(COMPARE_BUILD), )
|
||||
COMPARE_BUILD_OUTPUTDIR := $(topdir)/build/compare-build/$(CONF_NAME)
|
||||
COMPARE_BUILD_FAIL := true
|
||||
|
||||
ifneq ($$(findstring :, $$(COMPARE_BUILD)), )
|
||||
$$(foreach part, $$(subst :, , $$(COMPARE_BUILD)), \
|
||||
$$(if $$(filter PATCH=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(part)))) \
|
||||
) \
|
||||
$$(if $$(filter CONF=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter MAKE=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_MAKE = $$(strip $$(subst +, , $$(patsubst MAKE=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_OPTS=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_OPTS = $$(strip $$(subst +, , $$(patsubst COMP_OPTS=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_DIR=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_DIR = $$(strip $$(subst +, , $$(patsubst COMP_DIR=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter FAIL=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_FAIL = $$(strip $$(subst +, , $$(patsubst FAIL=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter NODRYRUN=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_NODRYRUN = $$(strip $$(subst +, , $$(patsubst NODRYRUN=%, %, $$(part))))) \
|
||||
) \
|
||||
)
|
||||
else
|
||||
# Separate handling for single field case, to allow for spaces in values.
|
||||
ifneq ($$(filter PATCH=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(COMPARE_BUILD)))
|
||||
else ifneq ($$(filter CONF=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(COMPARE_BUILD))))
|
||||
else ifneq ($$(filter --%, $$(COMPARE_BUILD)), )
|
||||
# Assume CONF if value begins with --
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(COMPARE_BUILD)))
|
||||
else
|
||||
# Otherwise assume patch file
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(COMPARE_BUILD))
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_PATCH), )
|
||||
ifneq ($$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH)), )
|
||||
# Assume relative path, if file exists
|
||||
COMPARE_BUILD_PATCH := $$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH))
|
||||
else ifeq ($$(wildcard $$(COMPARE_BUILD_PATCH)), )
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not exist)
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_NODRYRUN), true)
|
||||
PATCH_DRY_RUN := $$(shell cd $$(topdir) && $$(PATCH) --dry-run -p1 < $$(COMPARE_BUILD_PATCH) > /dev/null 2>&1 || $$(ECHO) FAILED)
|
||||
ifeq ($$(PATCH_DRY_RUN), FAILED)
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not apply cleanly)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_FAIL), true)
|
||||
COMPARE_BUILD_IGNORE_RESULT := || true
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_FAIL), true)
|
||||
COMPARE_BUILD_IGNORE_RESULT := || true
|
||||
endif
|
||||
endif
|
||||
endef
|
||||
endef
|
||||
|
||||
# Prepare for a comparison rebuild
|
||||
define PrepareCompareBuild
|
||||
# Prepare for a comparison rebuild
|
||||
define PrepareCompareBuild
|
||||
$(ECHO) "Preparing for comparison rebuild"
|
||||
# Apply patch, if any
|
||||
$(if $(COMPARE_BUILD_PATCH), cd $(topdir) && $(PATCH) -p1 < $(COMPARE_BUILD_PATCH))
|
||||
@@ -144,10 +425,10 @@ define PrepareCompareBuild
|
||||
# must be done after patching.
|
||||
( cd $(CONFIGURE_START_DIR) && PATH="$(ORIGINAL_PATH)" \
|
||||
$(BASH) $(topdir)/configure $(CONFIGURE_COMMAND_LINE) $(COMPARE_BUILD_CONF))
|
||||
endef
|
||||
endef
|
||||
|
||||
# Cleanup after a compare build
|
||||
define CleanupCompareBuild
|
||||
# Cleanup after a compare build
|
||||
define CleanupCompareBuild
|
||||
# If running with a COMPARE_BUILD patch, reverse-apply it, but continue
|
||||
# even if that fails (can happen with removed files).
|
||||
$(if $(COMPARE_BUILD_PATCH), cd $(topdir) && $(PATCH) -R -p1 < $(COMPARE_BUILD_PATCH) || true)
|
||||
@@ -156,10 +437,10 @@ define CleanupCompareBuild
|
||||
$(MV) $(OUTPUTDIR) $(COMPARE_BUILD_OUTPUTDIR)
|
||||
$(MV) $(topdir)/build/.compare-build-temp/$(CONF_NAME) $(OUTPUTDIR)
|
||||
$(RM) -r $(topdir)/build/.compare-build-temp
|
||||
endef
|
||||
endef
|
||||
|
||||
# Do the actual comparison of two builds
|
||||
define CompareBuildDoComparison
|
||||
# Do the actual comparison of two builds
|
||||
define CompareBuildDoComparison
|
||||
# Compare first and second build. Ignore any error code from compare.sh.
|
||||
$(ECHO) "Comparing between comparison rebuild (this/new) and baseline (other/old)"
|
||||
$(if $(COMPARE_BUILD_COMP_DIR), \
|
||||
@@ -169,9 +450,9 @@ define CompareBuildDoComparison
|
||||
+(cd $(COMPARE_BUILD_OUTPUTDIR) && ./compare.sh --diffs $(COMPARE_BUILD_COMP_OPTS) \
|
||||
-o $(OUTPUTDIR) $(COMPARE_BUILD_IGNORE_RESULT)) \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define PrintFailureReports
|
||||
define PrintFailureReports
|
||||
$(if $(filter none, $(LOG_REPORT)), , \
|
||||
$(RM) $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log ; \
|
||||
$(if $(wildcard $(MAKESUPPORT_OUTPUTDIR)/failure-logs/*.log), \
|
||||
@@ -193,9 +474,9 @@ define PrintFailureReports
|
||||
) >> $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log \
|
||||
) \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define PrintBuildLogFailures
|
||||
define PrintBuildLogFailures
|
||||
$(if $(filter none, $(LOG_REPORT)), , \
|
||||
if $(GREP) -q "recipe for target .* failed" $(BUILD_LOG) 2> /dev/null; then \
|
||||
$(PRINTF) "\n=== Make failed targets repeated here ===\n" ; \
|
||||
@@ -208,96 +489,96 @@ define PrintBuildLogFailures
|
||||
fi >> $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log ; \
|
||||
$(CAT) $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define RotateLogFiles
|
||||
define RotateLogFiles
|
||||
$(RM) $(BUILD_LOG).old 2> /dev/null && \
|
||||
$(MV) $(BUILD_LOG) $(BUILD_LOG).old 2> /dev/null || true
|
||||
$(if $(findstring true, $(LOG_PROFILE_TIMES_FILE)), \
|
||||
$(RM) $(BUILD_PROFILE_LOG).old 2> /dev/null && \
|
||||
$(MV) $(BUILD_PROFILE_LOG) $(BUILD_PROFILE_LOG).old 2> /dev/null || true \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
# Failure logs are only supported for "parallel" main targets, not the
|
||||
# (trivial) sequential make targets (such as clean and reconfigure),
|
||||
# since the failure-logs directory creation will conflict with clean.
|
||||
# We also make sure the javatmp directory exists, which is needed if a java
|
||||
# process (like javac) is using java.io.tmpdir.
|
||||
define PrepareFailureLogs
|
||||
# Failure logs are only supported for "parallel" main targets, not the
|
||||
# (trivial) sequential make targets (such as clean and reconfigure),
|
||||
# since the failure-logs directory creation will conflict with clean.
|
||||
# We also make sure the javatmp directory exists, which is needed if a java
|
||||
# process (like javac) is using java.io.tmpdir.
|
||||
define PrepareFailureLogs
|
||||
$(RM) -r $(MAKESUPPORT_OUTPUTDIR)/failure-logs 2> /dev/null && \
|
||||
$(MKDIR) -p $(MAKESUPPORT_OUTPUTDIR)/failure-logs
|
||||
$(MKDIR) -p $(JAVA_TMP_DIR)
|
||||
$(RM) $(MAKESUPPORT_OUTPUTDIR)/exit-with-error 2> /dev/null
|
||||
endef
|
||||
endef
|
||||
|
||||
# Remove any javac server logs and port files. This
|
||||
# prevents a new make run to reuse the previous servers.
|
||||
define PrepareJavacServer
|
||||
# Remove any javac server logs and port files. This
|
||||
# prevents a new make run to reuse the previous servers.
|
||||
define PrepareJavacServer
|
||||
$(if $(JAVAC_SERVER_DIR), \
|
||||
$(RM) -r $(JAVAC_SERVER_DIR) 2> /dev/null && \
|
||||
$(MKDIR) -p $(JAVAC_SERVER_DIR) \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define CleanupJavacServer
|
||||
define CleanupJavacServer
|
||||
[ -f $(JAVAC_SERVER_DIR)/server.port ] && $(ECHO) Stopping javac server && \
|
||||
$(TOUCH) $(JAVAC_SERVER_DIR)/server.port.stop; true
|
||||
endef
|
||||
endef
|
||||
|
||||
ifeq ($(call isBuildOs, windows), true)
|
||||
# On windows we need to synchronize with the javac server to be able to
|
||||
# move or remove the build output directory. Since we have no proper
|
||||
# synchronization process, wait for a while and hope it helps. This is only
|
||||
# used by build comparisons.
|
||||
ifeq ($(call isBuildOs, windows), true)
|
||||
# On windows we need to synchronize with the javac server to be able to
|
||||
# move or remove the build output directory. Since we have no proper
|
||||
# synchronization process, wait for a while and hope it helps. This is only
|
||||
# used by build comparisons.
|
||||
define WaitForJavacServerFinish
|
||||
$(if $(JAVAC_SERVER_DIR), \
|
||||
sleep 5 \
|
||||
)
|
||||
endef
|
||||
else
|
||||
define WaitForJavacServerFinish
|
||||
endef
|
||||
endif
|
||||
endef
|
||||
else
|
||||
define WaitForJavacServerFinish
|
||||
endef
|
||||
endif
|
||||
|
||||
##############################################################################
|
||||
# Functions for timers
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
# Functions for timers
|
||||
##############################################################################
|
||||
|
||||
# Store the build times in this directory.
|
||||
BUILDTIMESDIR = $(OUTPUTDIR)/make-support/build-times
|
||||
# Store the build times in this directory.
|
||||
BUILDTIMESDIR = $(OUTPUTDIR)/make-support/build-times
|
||||
|
||||
# Record starting time for build of a sub repository.
|
||||
define RecordStartTime
|
||||
# Record starting time for build of a sub repository.
|
||||
define RecordStartTime
|
||||
$(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_start_$(strip $1) && \
|
||||
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_start_$(strip $1)_human_readable
|
||||
endef
|
||||
endef
|
||||
|
||||
# Record ending time and calculate the difference and store it in a
|
||||
# easy to read format. Handles builds that cross midnight. Expects
|
||||
# that a build will never take 24 hours or more.
|
||||
define RecordEndTime
|
||||
# Record ending time and calculate the difference and store it in a
|
||||
# easy to read format. Handles builds that cross midnight. Expects
|
||||
# that a build will never take 24 hours or more.
|
||||
define RecordEndTime
|
||||
$(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)
|
||||
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)_human_readable
|
||||
$(ECHO) `$(CAT) $(BUILDTIMESDIR)/build_time_start_$(strip $1)` `$(CAT) $(BUILDTIMESDIR)/build_time_end_$(strip $1)` $1 | \
|
||||
$(AWK) '{ F=$$7; T=$$14; if (F > T) { T+=3600*24 }; D=T-F; H=int(D/3600); \
|
||||
M=int((D-H*3600)/60); S=D-H*3600-M*60; printf("%02d:%02d:%02d %s\n",H,M,S,$$15); }' \
|
||||
> $(BUILDTIMESDIR)/build_time_diff_$(strip $1)
|
||||
endef
|
||||
endef
|
||||
|
||||
define StartGlobalTimer
|
||||
define StartGlobalTimer
|
||||
$(RM) -r $(BUILDTIMESDIR) 2> /dev/null && \
|
||||
$(MKDIR) -p $(BUILDTIMESDIR) && \
|
||||
$(call RecordStartTime,TOTAL)
|
||||
endef
|
||||
endef
|
||||
|
||||
define StopGlobalTimer
|
||||
define StopGlobalTimer
|
||||
$(call RecordEndTime,TOTAL)
|
||||
endef
|
||||
endef
|
||||
|
||||
# Find all build_time_* files and print their contents in a list sorted
|
||||
# on the name of the sub repository.
|
||||
define ReportBuildTimes
|
||||
# Find all build_time_* files and print their contents in a list sorted
|
||||
# on the name of the sub repository.
|
||||
define ReportBuildTimes
|
||||
$(PRINTF) $(LOG_INFO) -- \
|
||||
"----- Build times -------\nStart %s\nEnd %s\n%s\n%s\n-------------------------\n" \
|
||||
"`$(CAT) $(BUILDTIMESDIR)/build_time_start_TOTAL_human_readable`" \
|
||||
@@ -306,15 +587,119 @@ define ReportBuildTimes
|
||||
$(XARGS) $(CAT) | $(SORT) -k 2`" \
|
||||
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`" \
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
endef
|
||||
|
||||
define ReportProfileTimes
|
||||
$(if $(findstring true, $(LOG_PROFILE_TIMES_LOG)), \
|
||||
[ ! -f $(BUILD_PROFILE_LOG) ] || \
|
||||
{ $(ECHO) Begin $(notdir $(BUILD_PROFILE_LOG)) && \
|
||||
$(CAT) $(BUILD_PROFILE_LOG) && \
|
||||
$(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \
|
||||
} \
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
)
|
||||
endef
|
||||
|
||||
endif # HAS_SPEC
|
||||
|
||||
# Look for a given option in the LOG variable, and if found, set a variable
|
||||
# and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to "true" if the option is found
|
||||
define ParseLogOption
|
||||
ifneq ($$(findstring $1, $$(LOG)), )
|
||||
override $2 := true
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
define ReportProfileTimes
|
||||
$(if $(findstring true, $(LOG_PROFILE_TIMES_LOG)), \
|
||||
[ ! -f $(BUILD_PROFILE_LOG) ] || \
|
||||
{ $(ECHO) Begin $(notdir $(BUILD_PROFILE_LOG)) && \
|
||||
$(CAT) $(BUILD_PROFILE_LOG) && \
|
||||
$(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \
|
||||
} \
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
)
|
||||
# Look for a given option with an assignment in the LOG variable, and if found,
|
||||
# set a variable to that value and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to the value of the option, if found
|
||||
define ParseLogValue
|
||||
ifneq ($$(findstring $1=, $$(LOG)), )
|
||||
# Make words of out comma-separated list and find the one with opt=val
|
||||
value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
|
||||
override $2 := $$(value)
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
|
||||
$$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
|
||||
define ParseLogLevel
|
||||
# Catch old-style VERBOSE= command lines.
|
||||
ifneq ($$(origin VERBOSE), undefined)
|
||||
$$(info Error: VERBOSE is deprecated. Use LOG=<warn|info|debug|trace> instead.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# Setup logging according to LOG
|
||||
|
||||
# If "nofile" is present, do not log to a file
|
||||
$$(eval $$(call ParseLogOption, nofile, LOG_NOFILE))
|
||||
|
||||
# If "cmdline" is present, print all executes "important" command lines.
|
||||
$$(eval $$(call ParseLogOption, cmdlines, LOG_CMDLINES))
|
||||
|
||||
# If "report" is present, use non-standard reporting options at build failure.
|
||||
$$(eval $$(call ParseLogValue, report, LOG_REPORT))
|
||||
ifneq ($$(LOG_REPORT), )
|
||||
ifeq ($$(filter $$(LOG_REPORT), none all default), )
|
||||
$$(info Error: LOG=report has invalid value: $$(LOG_REPORT).)
|
||||
$$(info Valid values: LOG=report=<none>|<all>|<default>)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endif
|
||||
|
||||
# If "profile-to-log" is present, write shell times in build log
|
||||
$$(eval $$(call ParseLogOption, profile-to-log, LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
# If "profile" is present, write shell times in separate log file
|
||||
# IMPORTANT: $(ParseLogOption profile-to-log) should go first. Otherwise
|
||||
# parsing of 'LOG=debug,profile-to-log,nofile' ends up in the following error:
|
||||
# Error: LOG contains unknown option or log level: debug-to-log.
|
||||
$$(eval $$(call ParseLogOption, profile, LOG_PROFILE_TIMES_FILE))
|
||||
|
||||
# Treat LOG=profile-to-log as if it were LOG=profile,profile-to-log
|
||||
LOG_PROFILE_TIMES_FILE := $$(firstword $$(LOG_PROFILE_TIMES_FILE) $$(LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
override LOG_LEVEL := $$(LOG)
|
||||
|
||||
ifeq ($$(LOG_LEVEL), )
|
||||
# Set LOG to "warn" as default if not set
|
||||
override LOG_LEVEL := warn
|
||||
endif
|
||||
|
||||
ifeq ($$(LOG_LEVEL), warn)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), info)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), debug)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else ifeq ($$(LOG_LEVEL), trace)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else
|
||||
$$(info Error: LOG contains unknown option or log level: $$(LOG).)
|
||||
$$(info LOG can be <level>[,<opt>[...]] where <opt> is nofile | cmdlines | profile | profile-to-log)
|
||||
$$(info and <level> is warn | info | debug | trace)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
MAKE_LOG_VARS = $(foreach v, \
|
||||
LOG_LEVEL LOG_NOFILE LOG_CMDLINES LOG_REPORT LOG_PROFILE_TIMES_LOG \
|
||||
LOG_PROFILE_TIMES_FILE, \
|
||||
$v=$($v) \
|
||||
)
|
||||
|
||||
endif # _INITSUPPORT_GMK
|
||||
|
||||
215
make/PreInit.gmk
215
make/PreInit.gmk
@@ -1,215 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# This file is the earliest part of the build bootstrap process (not counting
|
||||
# Makefile that includes it). Its main responsibility is to figure out what
|
||||
# configuration to use and pick up the corresponding SPEC file. It will then
|
||||
# call Init.gmk with this SPEC for further bootstrapping.
|
||||
################################################################################
|
||||
|
||||
# This must be the first rule
|
||||
default:
|
||||
.PHONY: default
|
||||
|
||||
# Inclusion of this pseudo-target will cause make to execute this file
|
||||
# serially, regardless of -j.
|
||||
.NOTPARALLEL:
|
||||
|
||||
# Include our helper functions.
|
||||
include $(topdir)/make/PreInitSupport.gmk
|
||||
include $(topdir)/make/common/LogUtils.gmk
|
||||
|
||||
# Here are "global" targets, i.e. targets that can be executed without having
|
||||
# a configuration. This will define ALL_GLOBAL_TARGETS.
|
||||
include $(topdir)/make/Global.gmk
|
||||
|
||||
# CALLED_TARGETS is the list of targets that the user provided,
|
||||
# or "default" if unspecified.
|
||||
CALLED_TARGETS := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), default)
|
||||
|
||||
# Extract non-global targets that require a spec file.
|
||||
CALLED_SPEC_TARGETS := $(filter-out $(ALL_GLOBAL_TARGETS), $(CALLED_TARGETS))
|
||||
|
||||
# If we have only global targets, or if we are called with -qp (assuming an
|
||||
# external part, e.g. bash completion, is trying to understand our targets),
|
||||
# we will skip SPEC location and the sanity checks.
|
||||
ifeq ($(CALLED_SPEC_TARGETS), )
|
||||
SKIP_SPEC := true
|
||||
endif
|
||||
ifeq ($(findstring p, $(MAKEFLAGS))$(findstring q, $(MAKEFLAGS)), pq)
|
||||
SKIP_SPEC := true
|
||||
endif
|
||||
|
||||
ifneq ($(SKIP_SPEC), true)
|
||||
|
||||
############################################################################
|
||||
# This is the common case: we have been called from the command line by the
|
||||
# user with a target that should be delegated to Main.gmk, so we need to
|
||||
# figure out a proper SPEC and call Init.gmk with it.
|
||||
############################################################################
|
||||
|
||||
# Basic checks on environment and command line.
|
||||
$(eval $(call CheckControlVariables))
|
||||
$(eval $(call CheckDeprecatedEnvironment))
|
||||
$(eval $(call CheckInvalidMakeFlags))
|
||||
|
||||
# Check that CONF_CHECK is valid.
|
||||
$(eval $(call ParseConfCheckOption))
|
||||
|
||||
# Check that the LOG given is valid, and set LOG_LEVEL, LOG_NOFILE, MAKE_LOG_VARS and MAKE_LOG_FLAGS.
|
||||
$(eval $(call ParseLogLevel))
|
||||
|
||||
# After this SPECS contain 1..N spec files (otherwise ParseConfAndSpec fails).
|
||||
$(eval $(call ParseConfAndSpec))
|
||||
|
||||
# Extract main targets from Main.gmk using the spec(s) provided. In theory,
|
||||
# with multiple specs, we should find the intersection of targets provided
|
||||
# by all specs, but we approximate this by an arbitrary spec from the list.
|
||||
# This will setup ALL_MAIN_TARGETS.
|
||||
$(eval $(call DefineMainTargets, FORCE, $(firstword $(SPECS))))
|
||||
|
||||
# Targets provided by Init.gmk.
|
||||
ALL_INIT_TARGETS := print-modules print-targets print-configuration \
|
||||
print-tests reconfigure pre-compare-build post-compare-build
|
||||
|
||||
# Separate called targets depending on type.
|
||||
INIT_TARGETS := $(filter $(ALL_INIT_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
MAIN_TARGETS := $(filter $(ALL_MAIN_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
SEQUENTIAL_TARGETS := $(filter dist-clean clean%, $(MAIN_TARGETS))
|
||||
PARALLEL_TARGETS := $(filter-out $(SEQUENTIAL_TARGETS), $(MAIN_TARGETS))
|
||||
|
||||
# The spec files depend on the autoconf source code. This check makes sure
|
||||
# the configuration is up to date after changes to configure.
|
||||
$(SPECS): $(wildcard $(topdir)/make/autoconf/*) \
|
||||
$(if $(CUSTOM_CONFIG_DIR), $(wildcard $(CUSTOM_CONFIG_DIR)/*)) \
|
||||
$(addprefix $(topdir)/make/conf/, version-numbers.conf branding.conf) \
|
||||
$(if $(CUSTOM_CONF_DIR), $(wildcard $(addprefix $(CUSTOM_CONF_DIR)/, \
|
||||
version-numbers.conf branding.conf)))
|
||||
ifeq ($(CONF_CHECK), fail)
|
||||
@echo Error: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
$(call PrintConfCheckFailed)
|
||||
@exit 2
|
||||
else ifeq ($(CONF_CHECK), auto)
|
||||
@echo Note: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
@( cd $(topdir) && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$@ HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
reconfigure )
|
||||
else ifeq ($(CONF_CHECK), ignore)
|
||||
# Do nothing
|
||||
endif
|
||||
|
||||
# Do not let make delete spec files even if aborted while doing a reconfigure
|
||||
.PRECIOUS: $(SPECS)
|
||||
|
||||
# Unless reconfigure is explicitly called, let all main targets depend on
|
||||
# the spec files to be up to date.
|
||||
ifeq ($(findstring reconfigure, $(INIT_TARGETS)), )
|
||||
$(MAIN_TARGETS): $(SPECS)
|
||||
endif
|
||||
|
||||
make-info:
|
||||
ifneq ($(findstring $(LOG_LEVEL), info debug trace), )
|
||||
$(info Running make as '$(strip $(MAKE) $(MFLAGS) \
|
||||
$(COMMAND_LINE_VARIABLES) $(MAKECMDGOALS))')
|
||||
endif
|
||||
|
||||
MAKE_INIT_WITH_SPEC_ARGUMENTS := ACTUAL_TOPDIR=$(topdir) \
|
||||
USER_MAKE_VARS="$(USER_MAKE_VARS)" MAKE_LOG_FLAGS=$(MAKE_LOG_FLAGS) \
|
||||
$(MAKE_LOG_VARS) \
|
||||
INIT_TARGETS="$(INIT_TARGETS)" \
|
||||
SEQUENTIAL_TARGETS="$(SEQUENTIAL_TARGETS)" \
|
||||
PARALLEL_TARGETS="$(PARALLEL_TARGETS)"
|
||||
|
||||
# Now the init and main targets will be called, once for each SPEC. The
|
||||
# recipe will be run once for every target specified, but we only want to
|
||||
# execute the recipe a single time, hence the TARGET_DONE with a dummy
|
||||
# command if true.
|
||||
# The COMPARE_BUILD part implements special support for makefile development.
|
||||
$(ALL_INIT_TARGETS) $(ALL_MAIN_TARGETS): make-info
|
||||
@$(if $(TARGET_DONE), \
|
||||
true \
|
||||
, \
|
||||
( cd $(topdir) && \
|
||||
$(foreach spec, $(SPECS), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
main && \
|
||||
$(if $(and $(COMPARE_BUILD), $(PARALLEL_TARGETS)), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD)" pre-compare-build && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" main && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" post-compare-build && \
|
||||
) \
|
||||
) true ) \
|
||||
$(eval TARGET_DONE=true) \
|
||||
)
|
||||
|
||||
.PHONY: $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS)
|
||||
|
||||
else # SKIP_SPEC=true
|
||||
|
||||
############################################################################
|
||||
# We have only global targets, or are called with -pq (from command
|
||||
# completion). In this case we might not even have a configuration at all, but
|
||||
# still need to handle the situation gracefully even if there is no SPEC file.
|
||||
############################################################################
|
||||
|
||||
ifeq ($(wildcard $(SPEC)), )
|
||||
# If we have no SPEC provided, we will just make a "best effort" target list.
|
||||
# First try to grab any available pre-existing main-targets.gmk.
|
||||
main_targets_file := $(firstword $(wildcard $(build_dir)/*/make-support/main-targets.gmk))
|
||||
ifneq ($(main_targets_file), )
|
||||
# Extract the SPEC that corresponds to this main-targets.gmk file.
|
||||
SPEC := $(patsubst %/make-support/main-targets.gmk, %/spec.gmk, $(main_targets_file))
|
||||
else
|
||||
# None found, pick an arbitrary SPEC for which to generate a file
|
||||
SPEC := $(firstword $(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(wildcard $(SPEC)), )
|
||||
$(eval $(call DefineMainTargets, LAZY, $(SPEC)))
|
||||
else
|
||||
# If we have no configurations we can not provide any main targets.
|
||||
ALL_MAIN_TARGETS :=
|
||||
endif
|
||||
|
||||
ALL_TARGETS := $(sort $(ALL_GLOBAL_TARGETS) $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS))
|
||||
|
||||
# Just list all our targets.
|
||||
$(ALL_TARGETS):
|
||||
|
||||
.PHONY: $(ALL_TARGETS)
|
||||
|
||||
endif # $(SKIP_SPEC)!=true
|
||||
@@ -1,297 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
##############################################################################
|
||||
# Helper functions for PreInit.gmk, the initial part of initialization before
|
||||
# the SPEC file is loaded. Most of these functions provide parsing and setting
|
||||
# up make options from the command-line.
|
||||
##############################################################################
|
||||
|
||||
# COMMA is defined in spec.gmk, but that is not included yet
|
||||
COMMA := ,
|
||||
|
||||
# Include the corresponding closed file, if present.
|
||||
ifneq ($(CUSTOM_MAKE_DIR), )
|
||||
-include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
|
||||
endif
|
||||
|
||||
# Essential control variables that are handled by PreInit.gmk or Init.gmk
|
||||
INIT_CONTROL_VARIABLES := LOG CONF CONF_NAME SPEC JOBS CONF_CHECK ALLOW \
|
||||
COMPARE_BUILD
|
||||
|
||||
# All known make control variables; these are handled in other makefiles
|
||||
MAKE_CONTROL_VARIABLES += JDK_FILTER SPEC_FILTER \
|
||||
TEST TEST_JOBS JTREG GTEST MICRO TEST_OPTS TEST_VM_OPTS TEST_DEPS
|
||||
|
||||
ALL_CONTROL_VARIABLES := $(INIT_CONTROL_VARIABLES) $(MAKE_CONTROL_VARIABLES)
|
||||
|
||||
# Define a simple reverse function.
|
||||
# Should maybe move to MakeBase.gmk, but we can't include that file now.
|
||||
reverse = \
|
||||
$(if $(strip $(1)), $(call reverse, $(wordlist 2, $(words $(1)), $(1)))) \
|
||||
$(firstword $(1))
|
||||
|
||||
# The variable MAKEOVERRIDES contains variable assignments from the command
|
||||
# line, but in reverse order to what the user entered.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
COMMAND_LINE_VARIABLES := $(subst §,\ , $(call reverse, $(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# A list like FOO="val1" BAR="val2" containing all user-supplied make
|
||||
# variables that we should propagate.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
# This explicit propagation is needed to avoid problems with characters that needs
|
||||
# escaping.
|
||||
USER_MAKE_VARS := $(subst §,\ , $(filter-out $(addsuffix =%, $(ALL_CONTROL_VARIABLES)), \
|
||||
$(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# Setup information about available configurations, if any.
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
build_dir = $(CUSTOM_ROOT)/build
|
||||
else
|
||||
build_dir = $(topdir)/build
|
||||
endif
|
||||
all_spec_files = $(wildcard $(build_dir)/*/spec.gmk)
|
||||
# Extract the configuration names from the path
|
||||
all_confs = $(patsubst %/spec.gmk, %, $(patsubst $(build_dir)/%, %, $(all_spec_files)))
|
||||
|
||||
# Check for unknown command-line variables
|
||||
define CheckControlVariables
|
||||
command_line_variables := $$(strip $$(foreach var, \
|
||||
$$(subst \ ,_,$$(MAKEOVERRIDES)), \
|
||||
$$(firstword $$(subst =, , $$(var)))))
|
||||
allowed_command_line_variables := $$(strip $$(subst $$(COMMA), , $$(ALLOW)))
|
||||
unknown_command_line_variables := $$(strip \
|
||||
$$(filter-out $$(ALL_CONTROL_VARIABLES) $$(allowed_command_line_variables), \
|
||||
$$(command_line_variables)))
|
||||
ifneq ($$(unknown_command_line_variables), )
|
||||
$$(info Note: Command line contains non-control variables:)
|
||||
$$(foreach var, $$(unknown_command_line_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info Make sure it is not mistyped, and that you intend to override this variable.)
|
||||
$$(info 'make help' will list known control variables.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for deprecated ALT_ variables
|
||||
define CheckDeprecatedEnvironment
|
||||
defined_alt_variables := $$(filter ALT_%, $$(.VARIABLES))
|
||||
ifneq ($$(defined_alt_variables), )
|
||||
$$(info Warning: You have the following ALT_ variables set:)
|
||||
$$(foreach var, $$(defined_alt_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info ALT_ variables are deprecated, and may result in a failed build.)
|
||||
$$(info Please clean your environment.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for invalid make flags like -j
|
||||
define CheckInvalidMakeFlags
|
||||
# This is a trick to get this rule to execute before any other rules
|
||||
# MAKEFLAGS only indicate -j if read in a recipe (!)
|
||||
$$(topdir)/make/PreInit.gmk: .FORCE
|
||||
$$(if $$(findstring --jobserver, $$(MAKEFLAGS)), \
|
||||
$$(info Error: 'make -jN' is not supported, use 'make JOBS=N') \
|
||||
$$(error Cannot continue) \
|
||||
)
|
||||
.FORCE:
|
||||
.PHONY: .FORCE
|
||||
endef
|
||||
|
||||
# Check that the CONF_CHECK option is valid and set up handling
|
||||
define ParseConfCheckOption
|
||||
ifeq ($$(CONF_CHECK), )
|
||||
# Default behavior is fail
|
||||
CONF_CHECK := fail
|
||||
else ifneq ($$(filter-out auto fail ignore, $$(CONF_CHECK)), )
|
||||
$$(info Error: CONF_CHECK must be one of: auto, fail or ignore.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
define ParseConfAndSpec
|
||||
ifneq ($$(origin SPEC), undefined)
|
||||
# We have been given a SPEC, check that it works out properly
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
# We also have a CONF_NAME argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF_NAME=$$(CONF_NAME) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(wildcard $$(SPEC)), )
|
||||
$$(info Error: Cannot locate spec.gmk, given by SPEC=$$(SPEC).)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(filter /%, $$(SPEC)), )
|
||||
# If given with relative path, make it absolute
|
||||
SPECS := $$(CURDIR)/$$(strip $$(SPEC))
|
||||
else
|
||||
SPECS := $$(SPEC)
|
||||
endif
|
||||
|
||||
# For now, unset this SPEC variable.
|
||||
override SPEC :=
|
||||
else
|
||||
# Use spec.gmk files in the build output directory
|
||||
ifeq ($$(all_spec_files), )
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
$$(info Error: No configurations found for $$(CUSTOM_ROOT).)
|
||||
else
|
||||
$$(info Error: No configurations found for $$(topdir).)
|
||||
endif
|
||||
$$(info Please run 'bash configure' to create a configuration.)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and CONF_NAME=$$(CONF_NAME) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
matching_conf := $$(strip $$(filter $$(CONF_NAME), $$(all_confs)))
|
||||
ifeq ($$(matching_conf), )
|
||||
$$(info Error: No configurations found matching CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else ifneq ($$(words $$(matching_conf)), 1)
|
||||
$$(info Error: Matching more than one configuration CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
$$(info Building configuration '$$(matching_conf)' (matching CONF_NAME=$$(CONF_NAME)))
|
||||
endif
|
||||
# Create a SPEC definition. This will contain the path to exactly one spec file.
|
||||
SPECS := $$(build_dir)/$$(matching_conf)/spec.gmk
|
||||
else ifneq ($$(origin CONF), undefined)
|
||||
# User have given a CONF= argument.
|
||||
ifeq ($$(CONF), )
|
||||
# If given CONF=, match all configurations
|
||||
matching_confs := $$(strip $$(all_confs))
|
||||
else
|
||||
# Otherwise select those that contain the given CONF string
|
||||
ifeq ($$(patsubst !%,,$$(CONF)), )
|
||||
# A CONF starting with ! means we should negate the search term
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(subst !,,$$(CONF)), $$(var)), ,$$(var))))
|
||||
else
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(CONF), $$(var)), $$(var))))
|
||||
endif
|
||||
ifneq ($$(filter $$(CONF), $$(matching_confs)), )
|
||||
ifneq ($$(word 2, $$(matching_confs)), )
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
$$(info Using exact match for CONF=$$(CONF) (other matches are possible))
|
||||
endif
|
||||
endif
|
||||
# If we found an exact match, use that
|
||||
matching_confs := $$(CONF)
|
||||
endif
|
||||
endif
|
||||
ifeq ($$(matching_confs), )
|
||||
$$(info Error: No configurations found matching CONF=$$(CONF).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
ifeq ($$(words $$(matching_confs)), 1)
|
||||
ifneq ($$(findstring $$(LOG_LEVEL), info debug trace), )
|
||||
$$(info Building configuration '$$(matching_confs)' (matching CONF=$$(CONF)))
|
||||
endif
|
||||
else
|
||||
$$(info Building these configurations (matching CONF=$$(CONF)):)
|
||||
$$(foreach var, $$(matching_confs), $$(info * $$(var)))
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a SPEC definition. This will contain the path to one or more spec.gmk files.
|
||||
SPECS := $$(addsuffix /spec.gmk, $$(addprefix $$(build_dir)/, $$(matching_confs)))
|
||||
else
|
||||
# No CONF or SPEC given, check the available configurations
|
||||
ifneq ($$(words $$(all_spec_files)), 1)
|
||||
$$(info Error: No CONF given, but more than one configuration found.)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(info Please retry building with CONF=<config pattern> (or SPEC=<spec file>).)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# We found exactly one configuration, use it
|
||||
SPECS := $$(strip $$(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
endef
|
||||
|
||||
# Extract main targets from Main.gmk using the spec provided in $2.
|
||||
#
|
||||
# Param 1: FORCE = force generation of main-targets.gmk or LAZY = do not force.
|
||||
# Param 2: The SPEC file to use.
|
||||
define DefineMainTargets
|
||||
|
||||
# We will start by making sure the main-targets.gmk file is removed, if
|
||||
# make has not been restarted. By the -include, we will trigger the
|
||||
# rule for generating the file (which is never there since we removed it),
|
||||
# thus generating it fresh, and make will restart, incrementing the restart
|
||||
# count.
|
||||
main_targets_file := $$(dir $(strip $2))make-support/main-targets.gmk
|
||||
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
# Only do this if make has not been restarted, and if we do not force it.
|
||||
ifeq ($(strip $1), FORCE)
|
||||
$$(shell rm -f $$(main_targets_file))
|
||||
endif
|
||||
endif
|
||||
|
||||
$$(main_targets_file):
|
||||
@( cd $$(topdir) && \
|
||||
$$(MAKE) $$(MAKE_LOG_FLAGS) -r -R -f $$(topdir)/make/Main.gmk \
|
||||
-I $$(topdir)/make/common SPEC=$(strip $2) NO_RECIPES=true \
|
||||
$$(MAKE_LOG_VARS) \
|
||||
create-main-targets-include )
|
||||
|
||||
# Now include main-targets.gmk. This will define ALL_MAIN_TARGETS.
|
||||
-include $$(main_targets_file)
|
||||
endef
|
||||
|
||||
define PrintConfCheckFailed
|
||||
@echo ' '
|
||||
@echo "Please rerun configure! Easiest way to do this is by running"
|
||||
@echo "'make reconfigure'."
|
||||
@echo "This behavior may also be changed using CONF_CHECK=<ignore|auto>."
|
||||
@echo ' '
|
||||
endef
|
||||
@@ -78,9 +78,6 @@ $(eval $(call IncludeCustomExtension, RunTests.gmk))
|
||||
|
||||
# This is the JDK that we will test
|
||||
JDK_UNDER_TEST := $(JDK_IMAGE_DIR)
|
||||
# The JDK used to compile jtreg test code. By default it is the same as
|
||||
# JDK_UNDER_TEST.
|
||||
JDK_FOR_COMPILE := $(JDK_IMAGE_DIR)
|
||||
|
||||
TEST_RESULTS_DIR := $(OUTPUTDIR)/test-results
|
||||
TEST_SUPPORT_DIR := $(OUTPUTDIR)/test-support
|
||||
@@ -982,7 +979,6 @@ define SetupRunJtregTestBody
|
||||
$$(JTREG_JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
|
||||
-Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
|
||||
$$($1_JTREG_BASIC_OPTIONS) \
|
||||
-compilejdk:$$(JDK_FOR_COMPILE) \
|
||||
-testjdk:$$(JDK_UNDER_TEST) \
|
||||
-dir:$$(JTREG_TOPDIR) \
|
||||
-reportDir:$$($1_TEST_RESULTS_DIR) \
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# Initial bootstrapping, copied and stripped down from Makefile and PreInit.gmk.
|
||||
# Initial bootstrapping, copied and stripped down from Makefile and Init.gmk
|
||||
################################################################################
|
||||
|
||||
# In Cygwin, the MAKE variable gets prepended with the current directory if the
|
||||
@@ -136,8 +136,7 @@ $(eval $(call SetupVariable,JIB_JAR,OPTIONAL))
|
||||
# wrapper. This is required so we can include MakeBase which is needed for
|
||||
# CreateNewSpec.
|
||||
HAS_SPEC :=
|
||||
include $(TOPDIR)/make/PreInitSupport.gmk
|
||||
include $(TOPDIR)/make/common/LogUtils.gmk
|
||||
include $(TOPDIR)/make/InitSupport.gmk
|
||||
|
||||
$(eval $(call CheckDeprecatedEnvironment))
|
||||
$(eval $(call CheckInvalidMakeFlags))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -624,10 +624,4 @@ AC_DEFUN_ONCE([BASIC_POST_CONFIG_OUTPUT],
|
||||
|
||||
# Make the compare script executable
|
||||
$CHMOD +x $OUTPUTDIR/compare.sh
|
||||
|
||||
# Copy the linker wrapper script for clang on AIX and make it executable
|
||||
if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
$CP -f "$TOPDIR/make/scripts/aix/ld.sh" "$OUTPUTDIR/ld.sh"
|
||||
$CHMOD +x "$OUTPUTDIR/ld.sh"
|
||||
fi
|
||||
])
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -235,10 +235,9 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
|
||||
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
|
||||
|
||||
# Additional warnings that are not activated by -Wall and -Wextra
|
||||
WARNINGS_ENABLE_ADDITIONAL="-Winvalid-pch -Wpointer-arith -Wreturn-type \
|
||||
-Wsign-compare -Wtrampolines -Wtype-limits -Wundef -Wuninitialized \
|
||||
-Wunused-const-variable=1 -Wunused-function -Wunused-result \
|
||||
-Wunused-value"
|
||||
WARNINGS_ENABLE_ADDITIONAL="-Wpointer-arith -Wreturn-type -Wsign-compare \
|
||||
-Wtrampolines -Wundef -Wunused-const-variable=1 -Wunused-function \
|
||||
-Wunused-result -Wunused-value -Wtype-limits -Wuninitialized"
|
||||
WARNINGS_ENABLE_ADDITIONAL_CXX="-Woverloaded-virtual -Wreorder"
|
||||
WARNINGS_ENABLE_ALL_CFLAGS="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
|
||||
WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
|
||||
@@ -278,7 +277,7 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
|
||||
AC_DEFUN([FLAGS_SETUP_QUALITY_CHECKS],
|
||||
[
|
||||
# bounds, memory and behavior checking options
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
case $DEBUG_LEVEL in
|
||||
release )
|
||||
# no adjustment
|
||||
@@ -517,6 +516,12 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
-fvisibility=hidden -fno-strict-aliasing -fno-omit-frame-pointer"
|
||||
fi
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
# clang compiler on aix needs -ffunction-sections
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno -fstack-protector"
|
||||
TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char -fstack-protector"
|
||||
fi
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector"
|
||||
TOOLCHAIN_CFLAGS_JDK="-fvisibility=hidden -pipe -fstack-protector"
|
||||
@@ -536,7 +541,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
# Restrict the debug information created by Clang to avoid
|
||||
# too big object files and speed the build up a little bit
|
||||
# (see http://llvm.org/bugs/show_bug.cgi?id=7554)
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -flimit-debug-info -fstack-protector"
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -flimit-debug-info"
|
||||
|
||||
# In principle the stack alignment below is cpu- and ABI-dependent and
|
||||
# should agree with values of StackAlignmentInBytes in various
|
||||
@@ -554,13 +559,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
TOOLCHAIN_CFLAGS_JDK="-pipe"
|
||||
TOOLCHAIN_CFLAGS_JDK_CONLY="-fno-strict-aliasing" # technically NOT for CXX
|
||||
fi
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno"
|
||||
TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char"
|
||||
fi
|
||||
|
||||
TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -fvisibility=hidden -fstack-protector"
|
||||
TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -fvisibility=hidden"
|
||||
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
# The -utf-8 option sets source and execution character sets to UTF-8 to enable correct
|
||||
@@ -737,11 +736,6 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
|
||||
# for all archs except arm and ppc, prevent gcc to omit frame pointer
|
||||
$1_CFLAGS_CPU_JDK="${$1_CFLAGS_CPU_JDK} -fno-omit-frame-pointer"
|
||||
fi
|
||||
if test "x$FLAGS_CPU" = xppc64le; then
|
||||
# Little endian machine uses ELFv2 ABI.
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
$1_CFLAGS_CPU_JVM="${$1_CFLAGS_CPU_JVM} -DABI_ELFv2 -mcpu=power8 -mtune=power8"
|
||||
fi
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
$1_CFLAGS_CPU="-mcpu=pwr8"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -79,7 +79,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
BASIC_LDFLAGS="-Wl,-b64 -Wl,-brtl -Wl,-bnorwexec -Wl,-bnolibpath -Wl,-bnoexpall \
|
||||
-Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k -fuse-ld=$OUTPUTDIR/ld.sh"
|
||||
-Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k"
|
||||
BASIC_LDFLAGS_JVM_ONLY="$BASIC_LDFLAGS_JVM_ONLY -Wl,-lC_r -Wl,-bbigtoc"
|
||||
fi
|
||||
|
||||
|
||||
@@ -1,127 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# This file contains helper functions for logging.
|
||||
################################################################################
|
||||
|
||||
# Look for a given option in the LOG variable, and if found, set a variable
|
||||
# and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to "true" if the option is found
|
||||
define ParseLogOption
|
||||
ifneq ($$(findstring $1, $$(LOG)), )
|
||||
override $2 := true
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
# Look for a given option with an assignment in the LOG variable, and if found,
|
||||
# set a variable to that value and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to the value of the option, if found
|
||||
define ParseLogValue
|
||||
ifneq ($$(findstring $1=, $$(LOG)), )
|
||||
# Make words of out comma-separated list and find the one with opt=val
|
||||
value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
|
||||
override $2 := $$(value)
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
|
||||
$$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
define ParseLogLevel
|
||||
# Catch old-style VERBOSE= command lines.
|
||||
ifneq ($$(origin VERBOSE), undefined)
|
||||
$$(info Error: VERBOSE is deprecated. Use LOG=<warn|info|debug|trace> instead.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# Setup logging according to LOG
|
||||
|
||||
# If "nofile" is present, do not log to a file
|
||||
$$(eval $$(call ParseLogOption, nofile, LOG_NOFILE))
|
||||
|
||||
# If "cmdline" is present, print all executes "important" command lines.
|
||||
$$(eval $$(call ParseLogOption, cmdlines, LOG_CMDLINES))
|
||||
|
||||
# If "report" is present, use non-standard reporting options at build failure.
|
||||
$$(eval $$(call ParseLogValue, report, LOG_REPORT))
|
||||
ifneq ($$(LOG_REPORT), )
|
||||
ifeq ($$(filter $$(LOG_REPORT), none all default), )
|
||||
$$(info Error: LOG=report has invalid value: $$(LOG_REPORT).)
|
||||
$$(info Valid values: LOG=report=<none>|<all>|<default>)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endif
|
||||
|
||||
# If "profile-to-log" is present, write shell times in build log
|
||||
$$(eval $$(call ParseLogOption, profile-to-log, LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
# If "profile" is present, write shell times in separate log file
|
||||
# IMPORTANT: $(ParseLogOption profile-to-log) should go first. Otherwise
|
||||
# parsing of 'LOG=debug,profile-to-log,nofile' ends up in the following error:
|
||||
# Error: LOG contains unknown option or log level: debug-to-log.
|
||||
$$(eval $$(call ParseLogOption, profile, LOG_PROFILE_TIMES_FILE))
|
||||
|
||||
# Treat LOG=profile-to-log as if it were LOG=profile,profile-to-log
|
||||
LOG_PROFILE_TIMES_FILE := $$(firstword $$(LOG_PROFILE_TIMES_FILE) $$(LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
override LOG_LEVEL := $$(LOG)
|
||||
|
||||
ifeq ($$(LOG_LEVEL), )
|
||||
# Set LOG to "warn" as default if not set
|
||||
override LOG_LEVEL := warn
|
||||
endif
|
||||
|
||||
ifeq ($$(LOG_LEVEL), warn)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), info)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), debug)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else ifeq ($$(LOG_LEVEL), trace)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else
|
||||
$$(info Error: LOG contains unknown option or log level: $$(LOG).)
|
||||
$$(info LOG can be <level>[,<opt>[...]] where <opt> is nofile | cmdlines | profile | profile-to-log)
|
||||
$$(info and <level> is warn | info | debug | trace)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
MAKE_LOG_VARS = $(foreach v, \
|
||||
LOG_LEVEL LOG_NOFILE LOG_CMDLINES LOG_REPORT LOG_PROFILE_TIMES_LOG \
|
||||
LOG_PROFILE_TIMES_FILE, \
|
||||
$v=$($v) \
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -92,7 +92,7 @@ SRC_SUBDIRS += share/classes
|
||||
|
||||
SPEC_SUBDIRS += share/specs
|
||||
|
||||
MAN_SUBDIRS += share/man windows/man
|
||||
MAN_SUBDIRS += share/man
|
||||
|
||||
# Find all module-info.java files for the current build target platform and
|
||||
# configuration.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -1092,9 +1092,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
windows_x64: "VS2022-17.6.5+1.0",
|
||||
linux_aarch64: "gcc13.2.0-OL7.6+1.0",
|
||||
linux_arm: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_ppc64le: "gcc13.2.0-Fedora_41+1.0",
|
||||
linux_s390x: "gcc13.2.0-Fedora_41+1.0",
|
||||
linux_riscv64: "gcc13.2.0-Fedora_41+1.0"
|
||||
linux_ppc64le: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_s390x: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_riscv64: "gcc11.3.0-Fedora_rawhide_68692+1.1"
|
||||
};
|
||||
|
||||
var devkit_platform = (input.target_cpu == "x86"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -63,14 +63,18 @@ ifeq ($(BASE_OS), OL)
|
||||
LINUX_VERSION := OL6.4
|
||||
endif
|
||||
else ifeq ($(BASE_OS), Fedora)
|
||||
DEFAULT_OS_VERSION := 41
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
ifeq ($(ARCH), riscv64)
|
||||
BASE_URL := http://fedora.riscv.rocks/repos-dist/f$(BASE_OS_VERSION)/latest/$(ARCH)/Packages/
|
||||
DEFAULT_OS_VERSION := rawhide/68692
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
BASE_URL := http://fedora.riscv.rocks/repos-dist/$(BASE_OS_VERSION)/$(ARCH)/Packages/
|
||||
else
|
||||
DEFAULT_OS_VERSION := 27
|
||||
LATEST_ARCHIVED_OS_VERSION := 35
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
ifeq ($(filter x86_64 armhfp, $(ARCH)), )
|
||||
FEDORA_TYPE := fedora-secondary
|
||||
else
|
||||
@@ -199,7 +203,7 @@ RPM_LIST := \
|
||||
glibc glibc-headers glibc-devel \
|
||||
cups-libs cups-devel \
|
||||
libX11 libX11-devel \
|
||||
libxcb xorg-x11-proto-devel \
|
||||
xorg-x11-proto-devel \
|
||||
alsa-lib alsa-lib-devel \
|
||||
libXext libXext-devel \
|
||||
libXtst libXtst-devel \
|
||||
@@ -437,9 +441,8 @@ $(gcc) \
|
||||
# wants.
|
||||
$(BUILDDIR)/$(binutils_ver)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
|
||||
|
||||
ifeq ($(filter $(ARCH), s390x riscv64 ppc64le), )
|
||||
# gold compiles but cannot link properly on s390x @ gcc 13.2 and Fedore 41
|
||||
# gold is not available for riscv64 and ppc64le,
|
||||
ifneq ($(ARCH), riscv64)
|
||||
# gold is not available for riscv64 for some reason,
|
||||
# and subsequent linking will fail if we try to enable it.
|
||||
LINKER_CONFIG := --enable-gold=default
|
||||
endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -125,7 +125,9 @@ else ifeq ($(call And, $(call isTargetOs, linux) $(call isTargetCpu, aarch64)),
|
||||
endif
|
||||
endif
|
||||
|
||||
JVM_PRECOMPILED_HEADER := $(TOPDIR)/src/hotspot/share/precompiled/precompiled.hpp
|
||||
ifeq ($(call isTargetOs, linux macosx windows), true)
|
||||
JVM_PRECOMPILED_HEADER := $(TOPDIR)/src/hotspot/share/precompiled/precompiled.hpp
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetCpu, x86), true)
|
||||
JVM_EXCLUDE_PATTERNS += x86_64
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -52,23 +52,15 @@ ifneq ($(FDLIBM_CFLAGS), )
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOs, linux), true)
|
||||
BUILD_LIBJVM_sharedRuntimeTrig.cpp_CXXFLAGS := $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
BUILD_LIBJVM_sharedRuntimeTrig.cpp_CXXFLAGS := -DNO_PCH $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := -DNO_PCH $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
|
||||
ifeq ($(TOOLCHAIN_TYPE), clang)
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
$(OPT_SPEED_SRC) \
|
||||
#
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetCpu, ppc64le)+$(TOOLCHAIN_TYPE), true+gcc)
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
$(OPT_SPEED_SRC) \
|
||||
#
|
||||
#
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetCpu, x86), true)
|
||||
@@ -118,7 +110,11 @@ else ifeq ($(call isTargetOs, macosx), true)
|
||||
endif
|
||||
|
||||
else ifeq ($(call isTargetOs, aix), true)
|
||||
BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -fno-inline
|
||||
ifeq ($(TOOLCHAIN_TYPE), clang)
|
||||
BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -fno-inline
|
||||
else
|
||||
BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -qnoinline
|
||||
endif
|
||||
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := $(CXX_O_FLAG_NONE)
|
||||
# Disable aggressive optimizations for functions in sharedRuntimeTrig.cpp
|
||||
# and sharedRuntimeTrans.cpp on ppc64.
|
||||
@@ -142,13 +138,6 @@ else ifeq ($(call isTargetOs, aix), true)
|
||||
# Disable ELF decoder on AIX (AIX uses XCOFF).
|
||||
JVM_EXCLUDE_PATTERNS += elf
|
||||
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
synchronizer.cpp \
|
||||
$(OPT_SPEED_SRC) \
|
||||
#
|
||||
|
||||
else ifeq ($(call isTargetOs, windows), true)
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
bytecodeInterpreter.cpp \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -393,6 +393,8 @@ else
|
||||
LIBFONTMANAGER_JDK_LIBS += libfreetype
|
||||
endif
|
||||
|
||||
LIBFONTMANAGER_OPTIMIZATION := HIGHEST
|
||||
|
||||
ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
|
||||
# gcc (and to an extent clang) is particularly bad at optimizing these files,
|
||||
# causing a massive spike in compile time. We don't care about these
|
||||
@@ -403,6 +405,7 @@ endif
|
||||
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c X11TextRenderer.c
|
||||
LIBFONTMANAGER_OPTIMIZATION := HIGHEST
|
||||
else ifeq ($(call isTargetOs, macosx), true)
|
||||
LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c X11TextRenderer.c \
|
||||
lcdglyph.c lcdglyphDW.cpp
|
||||
@@ -423,7 +426,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
|
||||
AccelGlyphCache.c, \
|
||||
CFLAGS := $(LIBFONTMANAGER_CFLAGS), \
|
||||
CXXFLAGS := $(LIBFONTMANAGER_CFLAGS), \
|
||||
OPTIMIZATION := HIGHEST, \
|
||||
OPTIMIZATION := $(LIBFONTMANAGER_OPTIMIZATION), \
|
||||
CFLAGS_windows = -DCC_NOEX, \
|
||||
EXTRA_HEADER_DIRS := $(LIBFONTMANAGER_EXTRA_HEADER_DIRS), \
|
||||
EXTRA_SRC := $(LIBFONTMANAGER_EXTRA_SRC), \
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2025 SAP SE. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
unset LIBPATH
|
||||
exec /usr/bin/ld "$@"
|
||||
@@ -1217,24 +1217,15 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
void LIR_Assembler::type_profile_helper(Register mdo,
|
||||
ciMethodData *md, ciProfileData *data,
|
||||
Register recv, Label* update_done) {
|
||||
|
||||
// Given a profile data offset, generate an Address which points to
|
||||
// the corresponding slot in mdo->data().
|
||||
// Clobbers rscratch2.
|
||||
auto slot_at = [=](ByteSize offset) -> Address {
|
||||
return __ form_address(rscratch2, mdo,
|
||||
md->byte_offset_of_slot(data, offset),
|
||||
LogBytesPerWord);
|
||||
};
|
||||
|
||||
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
|
||||
Label next_test;
|
||||
// See if the receiver is receiver[n].
|
||||
__ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
|
||||
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
|
||||
__ ldr(rscratch1, Address(rscratch2));
|
||||
__ cmp(recv, rscratch1);
|
||||
__ br(Assembler::NE, next_test);
|
||||
__ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
|
||||
DataLayout::counter_increment);
|
||||
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
|
||||
__ addptr(data_addr, DataLayout::counter_increment);
|
||||
__ b(*update_done);
|
||||
__ bind(next_test);
|
||||
}
|
||||
@@ -1242,12 +1233,15 @@ void LIR_Assembler::type_profile_helper(Register mdo,
|
||||
// Didn't find receiver; find next empty slot and fill it in
|
||||
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
|
||||
Label next_test;
|
||||
Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
|
||||
__ lea(rscratch2,
|
||||
Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
|
||||
Address recv_addr(rscratch2);
|
||||
__ ldr(rscratch1, recv_addr);
|
||||
__ cbnz(rscratch1, next_test);
|
||||
__ str(recv, recv_addr);
|
||||
__ mov(rscratch1, DataLayout::counter_increment);
|
||||
__ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
|
||||
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
|
||||
__ str(rscratch1, Address(rscratch2));
|
||||
__ b(*update_done);
|
||||
__ bind(next_test);
|
||||
}
|
||||
@@ -1419,7 +1413,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
// Object is null; update MDO and exit
|
||||
Address data_addr
|
||||
= __ form_address(rscratch2, mdo,
|
||||
md->byte_offset_of_slot(data, DataLayout::flags_offset()), 0);
|
||||
md->byte_offset_of_slot(data, DataLayout::flags_offset()),
|
||||
0);
|
||||
__ ldrb(rscratch1, data_addr);
|
||||
__ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
|
||||
__ strb(rscratch1, data_addr);
|
||||
@@ -2570,12 +2565,10 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
for (i = 0; i < VirtualCallData::row_limit(); i++) {
|
||||
ciKlass* receiver = vc_data->receiver(i);
|
||||
if (receiver == nullptr) {
|
||||
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
|
||||
__ mov_metadata(rscratch1, known_klass->constant_encoding());
|
||||
Address recv_addr =
|
||||
__ form_address(rscratch2, mdo,
|
||||
md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
|
||||
LogBytesPerWord);
|
||||
__ str(rscratch1, recv_addr);
|
||||
__ lea(rscratch2, recv_addr);
|
||||
__ str(rscratch1, Address(rscratch2));
|
||||
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
|
||||
__ addptr(data_addr, DataLayout::counter_increment);
|
||||
return;
|
||||
|
||||
@@ -1173,10 +1173,7 @@ public:
|
||||
|
||||
// Arithmetics
|
||||
|
||||
// Clobber: rscratch1, rscratch2
|
||||
void addptr(const Address &dst, int32_t src);
|
||||
|
||||
// Clobber: rscratch1
|
||||
void cmpptr(Register src1, Address src2);
|
||||
|
||||
void cmpoop(Register obj1, Register obj2);
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
#define CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 10000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 30000 ZGC_ONLY(+10000)) \
|
||||
do_stub(compiler, vector_iota_indices) \
|
||||
do_arch_entry(aarch64, compiler, vector_iota_indices, \
|
||||
vector_iota_indices, vector_iota_indices) \
|
||||
do_stub(compiler, large_array_equals) \
|
||||
do_arch_entry(aarch64, compiler, large_array_equals, \
|
||||
large_array_equals, large_array_equals) \
|
||||
do_stub(compiler, large_arrays_hashcode_boolean) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_boolean, \
|
||||
large_arrays_hashcode_boolean, \
|
||||
large_arrays_hashcode_boolean) \
|
||||
do_stub(compiler, large_arrays_hashcode_byte) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_byte, \
|
||||
large_arrays_hashcode_byte, \
|
||||
large_arrays_hashcode_byte) \
|
||||
do_stub(compiler, large_arrays_hashcode_char) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_char, \
|
||||
large_arrays_hashcode_char, \
|
||||
large_arrays_hashcode_char) \
|
||||
do_stub(compiler, large_arrays_hashcode_short) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_short, \
|
||||
large_arrays_hashcode_short, \
|
||||
large_arrays_hashcode_short) \
|
||||
do_stub(compiler, large_arrays_hashcode_int) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_int, \
|
||||
large_arrays_hashcode_int, \
|
||||
large_arrays_hashcode_int) \
|
||||
do_stub(compiler, large_byte_array_inflate) \
|
||||
do_arch_entry(aarch64, compiler, large_byte_array_inflate, \
|
||||
large_byte_array_inflate, large_byte_array_inflate) \
|
||||
do_stub(compiler, count_positives) \
|
||||
do_arch_entry(aarch64, compiler, count_positives, count_positives, \
|
||||
count_positives) \
|
||||
do_stub(compiler, count_positives_long) \
|
||||
do_arch_entry(aarch64, compiler, count_positives_long, \
|
||||
count_positives_long, count_positives_long) \
|
||||
do_stub(compiler, compare_long_string_LL) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_LL, \
|
||||
compare_long_string_LL, compare_long_string_LL) \
|
||||
do_stub(compiler, compare_long_string_UU) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_UU, \
|
||||
compare_long_string_UU, compare_long_string_UU) \
|
||||
do_stub(compiler, compare_long_string_LU) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_LU, \
|
||||
compare_long_string_LU, compare_long_string_LU) \
|
||||
do_stub(compiler, compare_long_string_UL) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_UL, \
|
||||
compare_long_string_UL, compare_long_string_UL) \
|
||||
do_stub(compiler, string_indexof_linear_ll) \
|
||||
do_arch_entry(aarch64, compiler, string_indexof_linear_ll, \
|
||||
string_indexof_linear_ll, string_indexof_linear_ll) \
|
||||
do_stub(compiler, string_indexof_linear_uu) \
|
||||
do_arch_entry(aarch64, compiler, string_indexof_linear_uu, \
|
||||
string_indexof_linear_uu, string_indexof_linear_uu) \
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_arch_entry(aarch64, compiler, string_indexof_linear_ul, \
|
||||
string_indexof_linear_ul, string_indexof_linear_ul) \
|
||||
/* this uses the entry for ghash_processBlocks */ \
|
||||
do_stub(compiler, ghash_processBlocks_wide) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 20000 ZGC_ONLY(+100000)) \
|
||||
do_stub(final, copy_byte_f) \
|
||||
do_arch_entry(aarch64, final, copy_byte_f, copy_byte_f, \
|
||||
copy_byte_f) \
|
||||
do_stub(final, copy_byte_b) \
|
||||
do_arch_entry(aarch64, final, copy_byte_b, copy_byte_b, \
|
||||
copy_byte_b) \
|
||||
do_stub(final, copy_oop_f) \
|
||||
do_arch_entry(aarch64, final, copy_oop_f, copy_oop_f, copy_oop_f) \
|
||||
do_stub(final, copy_oop_b) \
|
||||
do_arch_entry(aarch64, final, copy_oop_b, copy_oop_b, copy_oop_b) \
|
||||
do_stub(final, copy_oop_uninit_f) \
|
||||
do_arch_entry(aarch64, final, copy_oop_uninit_f, copy_oop_uninit_f, \
|
||||
copy_oop_uninit_f) \
|
||||
do_stub(final, copy_oop_uninit_b) \
|
||||
do_arch_entry(aarch64, final, copy_oop_uninit_b, copy_oop_uninit_b, \
|
||||
copy_oop_uninit_b) \
|
||||
do_stub(final, zero_blocks) \
|
||||
do_arch_entry(aarch64, final, zero_blocks, zero_blocks, \
|
||||
zero_blocks) \
|
||||
do_stub(final, spin_wait) \
|
||||
do_arch_entry_init(aarch64, final, spin_wait, spin_wait, \
|
||||
spin_wait, empty_spin_wait) \
|
||||
/* stub only -- entries are not stored in StubRoutines::aarch64 */ \
|
||||
/* n.b. these are not the same as the generic atomic stubs */ \
|
||||
do_stub(final, atomic_entry_points) \
|
||||
|
||||
|
||||
#endif // CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
@@ -29,22 +29,40 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// function used as default for spin_wait stub
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
address StubRoutines::aarch64::_get_previous_sp_entry = nullptr;
|
||||
|
||||
address StubRoutines::aarch64::_f2i_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_f2l_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_d2i_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_d2l_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_vector_iota_indices = nullptr;
|
||||
address StubRoutines::aarch64::_float_sign_mask = nullptr;
|
||||
address StubRoutines::aarch64::_float_sign_flip = nullptr;
|
||||
address StubRoutines::aarch64::_double_sign_mask = nullptr;
|
||||
address StubRoutines::aarch64::_double_sign_flip = nullptr;
|
||||
address StubRoutines::aarch64::_zero_blocks = nullptr;
|
||||
address StubRoutines::aarch64::_count_positives = nullptr;
|
||||
address StubRoutines::aarch64::_count_positives_long = nullptr;
|
||||
address StubRoutines::aarch64::_large_array_equals = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_boolean = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_byte = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_char = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_int = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_short = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_LL = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_UU = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_LU = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_UL = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ll = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_uu = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ul = nullptr;
|
||||
address StubRoutines::aarch64::_large_byte_array_inflate = nullptr;
|
||||
|
||||
static void empty_spin_wait() { }
|
||||
|
||||
// define fields for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
address StubRoutines::aarch64::_spin_wait = CAST_FROM_FN_PTR(address, empty_spin_wait);
|
||||
|
||||
bool StubRoutines::aarch64::_completed = false;
|
||||
|
||||
|
||||
@@ -34,66 +34,134 @@ static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 10000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 30000 ZGC_ONLY(+10000),
|
||||
_final_stubs_code_size = 20000 ZGC_ONLY(+100000)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
class aarch64 {
|
||||
friend class StubGenerator;
|
||||
#if INCLUDE_JVMCI
|
||||
friend class JVMCIVMStructs;
|
||||
#endif
|
||||
|
||||
// declare fields for arch-specific entries
|
||||
private:
|
||||
static address _get_previous_sp_entry;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
static address _f2i_fixup;
|
||||
static address _f2l_fixup;
|
||||
static address _d2i_fixup;
|
||||
static address _d2l_fixup;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
static address _vector_iota_indices;
|
||||
static address _float_sign_mask;
|
||||
static address _float_sign_flip;
|
||||
static address _double_sign_mask;
|
||||
static address _double_sign_flip;
|
||||
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
static address _zero_blocks;
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
static address _large_array_equals;
|
||||
static address _large_arrays_hashcode_boolean;
|
||||
static address _large_arrays_hashcode_byte;
|
||||
static address _large_arrays_hashcode_char;
|
||||
static address _large_arrays_hashcode_int;
|
||||
static address _large_arrays_hashcode_short;
|
||||
static address _compare_long_string_LL;
|
||||
static address _compare_long_string_LU;
|
||||
static address _compare_long_string_UL;
|
||||
static address _compare_long_string_UU;
|
||||
static address _string_indexof_linear_ll;
|
||||
static address _string_indexof_linear_uu;
|
||||
static address _string_indexof_linear_ul;
|
||||
static address _large_byte_array_inflate;
|
||||
|
||||
static address _spin_wait;
|
||||
|
||||
static bool _completed;
|
||||
|
||||
public:
|
||||
|
||||
// declare getters for arch-specific entries
|
||||
static address _count_positives;
|
||||
static address _count_positives_long;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
static address get_previous_sp_entry()
|
||||
{
|
||||
return _get_previous_sp_entry;
|
||||
}
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
static address f2i_fixup()
|
||||
{
|
||||
return _f2i_fixup;
|
||||
}
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
static address f2l_fixup()
|
||||
{
|
||||
return _f2l_fixup;
|
||||
}
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
static address d2i_fixup()
|
||||
{
|
||||
return _d2i_fixup;
|
||||
}
|
||||
|
||||
static address d2l_fixup()
|
||||
{
|
||||
return _d2l_fixup;
|
||||
}
|
||||
|
||||
static address vector_iota_indices() {
|
||||
return _vector_iota_indices;
|
||||
}
|
||||
|
||||
static address float_sign_mask()
|
||||
{
|
||||
return _float_sign_mask;
|
||||
}
|
||||
|
||||
static address float_sign_flip()
|
||||
{
|
||||
return _float_sign_flip;
|
||||
}
|
||||
|
||||
static address double_sign_mask()
|
||||
{
|
||||
return _double_sign_mask;
|
||||
}
|
||||
|
||||
static address double_sign_flip()
|
||||
{
|
||||
return _double_sign_flip;
|
||||
}
|
||||
|
||||
static address zero_blocks() {
|
||||
return _zero_blocks;
|
||||
}
|
||||
|
||||
static address count_positives() {
|
||||
return _count_positives;
|
||||
}
|
||||
|
||||
static address count_positives_long() {
|
||||
return _count_positives_long;
|
||||
}
|
||||
|
||||
static address large_array_equals() {
|
||||
return _large_array_equals;
|
||||
}
|
||||
|
||||
static address large_arrays_hashcode(BasicType eltype) {
|
||||
switch (eltype) {
|
||||
case T_BOOLEAN:
|
||||
return large_arrays_hashcode_boolean();
|
||||
return _large_arrays_hashcode_boolean;
|
||||
case T_BYTE:
|
||||
return large_arrays_hashcode_byte();
|
||||
return _large_arrays_hashcode_byte;
|
||||
case T_CHAR:
|
||||
return large_arrays_hashcode_char();
|
||||
return _large_arrays_hashcode_char;
|
||||
case T_SHORT:
|
||||
return large_arrays_hashcode_short();
|
||||
return _large_arrays_hashcode_short;
|
||||
case T_INT:
|
||||
return large_arrays_hashcode_int();
|
||||
return _large_arrays_hashcode_int;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@@ -101,6 +169,42 @@ private:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static address compare_long_string_LL() {
|
||||
return _compare_long_string_LL;
|
||||
}
|
||||
|
||||
static address compare_long_string_LU() {
|
||||
return _compare_long_string_LU;
|
||||
}
|
||||
|
||||
static address compare_long_string_UL() {
|
||||
return _compare_long_string_UL;
|
||||
}
|
||||
|
||||
static address compare_long_string_UU() {
|
||||
return _compare_long_string_UU;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ul() {
|
||||
return _string_indexof_linear_ul;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ll() {
|
||||
return _string_indexof_linear_ll;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_uu() {
|
||||
return _string_indexof_linear_uu;
|
||||
}
|
||||
|
||||
static address large_byte_array_inflate() {
|
||||
return _large_byte_array_inflate;
|
||||
}
|
||||
|
||||
static address spin_wait() {
|
||||
return _spin_wait;
|
||||
}
|
||||
|
||||
static bool complete() {
|
||||
return _completed;
|
||||
}
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_STUBDECLARATIONS_HPP
|
||||
#define CPU_ARM_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 9000) \
|
||||
do_stub(initial, idiv_irem) \
|
||||
do_arch_entry(Arm, initial, idiv_irem, \
|
||||
idiv_irem_entry, idiv_irem_entry) \
|
||||
do_stub(initial, atomic_load_long) \
|
||||
do_arch_entry(Arm, initial, atomic_load_long, \
|
||||
atomic_load_long_entry, atomic_load_long_entry) \
|
||||
do_stub(initial, atomic_store_long) \
|
||||
do_arch_entry(Arm, initial, atomic_load_long, \
|
||||
atomic_store_long_entry, atomic_store_long_entry) \
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 22000) \
|
||||
do_stub(compiler, partial_subtype_check) \
|
||||
do_arch_entry(Arm, compiler, partial_subtype_check, \
|
||||
partial_subtype_check, partial_subtype_check) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 22000) \
|
||||
|
||||
|
||||
#endif // CPU_ARM_STUBDECLARATIONS_HPP
|
||||
@@ -172,8 +172,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
private:
|
||||
|
||||
address generate_call_stub(address& return_address) {
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "call_stub");
|
||||
address start = __ pc();
|
||||
|
||||
|
||||
@@ -252,8 +251,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// (in) Rexception_obj: exception oop
|
||||
address generate_catch_exception() {
|
||||
StubGenStubId stub_id = StubGenStubId::catch_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "catch_exception");
|
||||
address start = __ pc();
|
||||
|
||||
__ str(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
|
||||
@@ -265,8 +263,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// (in) Rexception_pc: return address
|
||||
address generate_forward_exception() {
|
||||
StubGenStubId stub_id = StubGenStubId::forward_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "forward exception");
|
||||
address start = __ pc();
|
||||
|
||||
__ mov(c_rarg0, Rthread);
|
||||
@@ -315,8 +312,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register tmp = LR;
|
||||
assert(dividend == remainder, "must be");
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::idiv_irem_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// Check for special cases: divisor <= 0 or dividend < 0
|
||||
@@ -458,8 +453,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_add() {
|
||||
address start;
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_add_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_add");
|
||||
Label retry;
|
||||
start = __ pc();
|
||||
Register addval = R0;
|
||||
@@ -510,8 +504,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_xchg() {
|
||||
address start;
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_xchg_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
|
||||
start = __ pc();
|
||||
Register newval = R0;
|
||||
Register dest = R1;
|
||||
@@ -561,8 +554,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_cmpxchg() {
|
||||
address start;
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
|
||||
start = __ pc();
|
||||
Register cmp = R0;
|
||||
Register newval = R1;
|
||||
@@ -600,8 +592,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_cmpxchg_long() {
|
||||
address start;
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_long_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
|
||||
start = __ pc();
|
||||
Register cmp_lo = R0;
|
||||
Register cmp_hi = R1;
|
||||
@@ -638,8 +629,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_load_long() {
|
||||
address start;
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_load_long_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_load_long");
|
||||
start = __ pc();
|
||||
Register result_lo = R0;
|
||||
Register result_hi = R1;
|
||||
@@ -663,8 +653,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_store_long() {
|
||||
address start;
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_store_long_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_store_long");
|
||||
start = __ pc();
|
||||
Register newval_lo = R0;
|
||||
Register newval_hi = R1;
|
||||
@@ -706,8 +695,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// raddr: LR, blown by call
|
||||
address generate_partial_subtype_check() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
|
||||
address start = __ pc();
|
||||
|
||||
// based on SPARC check_klass_subtype_[fast|slow]_path (without CompressedOops)
|
||||
@@ -796,8 +784,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Non-destructive plausibility checks for oops
|
||||
|
||||
address generate_verify_oop() {
|
||||
StubGenStubId stub_id = StubGenStubId::verify_oop_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "verify_oop");
|
||||
address start = __ pc();
|
||||
|
||||
// Incoming arguments:
|
||||
@@ -1998,23 +1985,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start_pc;
|
||||
}
|
||||
|
||||
/* Internal development flag */
|
||||
/* enabled by defining TEST_C2_GENERIC_ARRAYCOPY */
|
||||
|
||||
// With this flag, the C2 stubs are tested by generating calls to
|
||||
// generic_arraycopy instead of Runtime1::arraycopy
|
||||
|
||||
// Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
|
||||
// and the result is tested to see whether the arraycopy stub should
|
||||
// be called.
|
||||
|
||||
// When we test arraycopy this way, we must generate extra code in the
|
||||
// arraycopy methods callable from C2 generic_arraycopy to set the
|
||||
// status to 0 for those who always succeed (calling the slow path stub might
|
||||
// lead to errors since the copy has already been performed).
|
||||
|
||||
static const bool set_status;
|
||||
|
||||
//
|
||||
// Generate stub for primitive array copy. If "aligned" is true, the
|
||||
// "from" and "to" addresses are assumed to be heapword aligned.
|
||||
@@ -2027,109 +1997,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to: R1
|
||||
// count: R2 treated as signed 32-bit int
|
||||
//
|
||||
address generate_primitive_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) {
|
||||
bool aligned;
|
||||
bool status;
|
||||
int bytes_per_count;
|
||||
bool disjoint;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 1;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jshort_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 2;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jint_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 4;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jlong_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 8;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jbyte_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 1;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jshort_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 2;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jint_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 4;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jlong_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = set_status;
|
||||
bytes_per_count = 8;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jbyte_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 1;
|
||||
disjoint = false;
|
||||
break;
|
||||
case jshort_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 2;
|
||||
disjoint = false;
|
||||
break;
|
||||
case jint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 4;
|
||||
disjoint = false;
|
||||
break;
|
||||
case jlong_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 8;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_jbyte_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 1;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_jshort_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 2;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_jint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 4;
|
||||
disjoint = false;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = nullptr) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = R0; // source array address
|
||||
@@ -2301,38 +2171,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to: R1
|
||||
// count: R2 treated as signed 32-bit int
|
||||
//
|
||||
address generate_oop_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) {
|
||||
bool aligned;
|
||||
bool status;
|
||||
bool disjoint;
|
||||
|
||||
switch (stub_id) {
|
||||
case oop_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_oop_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
disjoint = true;
|
||||
break;
|
||||
case oop_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_oop_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
disjoint = false;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = nullptr) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Register from = R0;
|
||||
@@ -2467,7 +2308,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to a long, int, short, or byte copy loop.
|
||||
//
|
||||
address generate_unsafe_copy() {
|
||||
address generate_unsafe_copy(const char* name) {
|
||||
|
||||
const Register R0_from = R0; // source array address
|
||||
const Register R1_to = R1; // destination array address
|
||||
@@ -2476,8 +2317,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register R3_bits = R3; // test copy of low bits
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
const Register tmp = Rtemp;
|
||||
|
||||
@@ -2602,10 +2442,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// ckval: R4 (super_klass)
|
||||
// ret: R0 zero for success; (-1^K) where K is partial transfer count (32-bit)
|
||||
//
|
||||
address generate_checkcast_copy() {
|
||||
address generate_checkcast_copy(const char * name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::checkcast_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = R0; // source array address
|
||||
@@ -2756,7 +2595,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// R0 == 0 - success
|
||||
// R0 < 0 - need to call System.arraycopy
|
||||
//
|
||||
address generate_generic_copy() {
|
||||
address generate_generic_copy(const char *name) {
|
||||
Label L_failed, L_objArray;
|
||||
|
||||
// Input registers
|
||||
@@ -2772,8 +2611,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register R8_temp = R8;
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
__ zap_high_non_significant_bits(R1);
|
||||
@@ -3004,55 +2842,72 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
|
||||
bool status = false; // non failing C2 stubs need not return a status in R0
|
||||
|
||||
#ifdef TEST_C2_GENERIC_ARRAYCOPY /* Internal development flag */
|
||||
// With this flag, the C2 stubs are tested by generating calls to
|
||||
// generic_arraycopy instead of Runtime1::arraycopy
|
||||
|
||||
// Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
|
||||
// and the result is tested to see whether the arraycopy stub should
|
||||
// be called.
|
||||
|
||||
// When we test arraycopy this way, we must generate extra code in the
|
||||
// arraycopy methods callable from C2 generic_arraycopy to set the
|
||||
// status to 0 for those who always succeed (calling the slow path stub might
|
||||
// lead to errors since the copy has already been performed).
|
||||
|
||||
status = true; // generate a status compatible with C1 calls
|
||||
#endif
|
||||
|
||||
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
|
||||
|
||||
// these need always status in case they are called from generic_arraycopy
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id);
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(false, "jbyte_disjoint_arraycopy", true, 1, true);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(false, "jint_disjoint_arraycopy", true, 4, true);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(false, "jlong_disjoint_arraycopy", true, 8, true);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (false, "oop_disjoint_arraycopy", true, true);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jbyte_disjoint_arraycopy", status, 1, true);
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jshort_disjoint_arraycopy",status, 2, true);
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jint_disjoint_arraycopy", status, 4, true);
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jlong_disjoint_arraycopy", status, 8, true);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (true, "arrayof_oop_disjoint_arraycopy", status, true);
|
||||
|
||||
// these need always status in case they are called from generic_arraycopy
|
||||
StubRoutines::_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_arraycopy_id, StubRoutines::_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_jshort_arraycopy = generate_primitive_copy(StubGenStubId::jshort_arraycopy_id, StubRoutines::_jshort_disjoint_arraycopy);
|
||||
StubRoutines::_jint_arraycopy = generate_primitive_copy(StubGenStubId::jint_arraycopy_id, StubRoutines::_jint_disjoint_arraycopy);
|
||||
StubRoutines::_jlong_arraycopy = generate_primitive_copy(StubGenStubId::jlong_arraycopy_id, StubRoutines::_jlong_disjoint_arraycopy);
|
||||
StubRoutines::_oop_arraycopy = generate_oop_copy (StubGenStubId::oop_arraycopy_id, StubRoutines::_oop_disjoint_arraycopy);
|
||||
StubRoutines::_jbyte_arraycopy = generate_primitive_copy(false, "jbyte_arraycopy", true, 1, false, StubRoutines::_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_jshort_arraycopy = generate_primitive_copy(false, "jshort_arraycopy", true, 2, false, StubRoutines::_jshort_disjoint_arraycopy);
|
||||
StubRoutines::_jint_arraycopy = generate_primitive_copy(false, "jint_arraycopy", true, 4, false, StubRoutines::_jint_disjoint_arraycopy);
|
||||
StubRoutines::_jlong_arraycopy = generate_primitive_copy(false, "jlong_arraycopy", true, 8, false, StubRoutines::_jlong_disjoint_arraycopy);
|
||||
StubRoutines::_oop_arraycopy = generate_oop_copy (false, "oop_arraycopy", true, false, StubRoutines::_oop_disjoint_arraycopy);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_arraycopy_id, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(true, "arrayof_jbyte_arraycopy", status, 1, false, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(true, "arrayof_jshort_arraycopy", status, 2, false, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
|
||||
#ifdef _LP64
|
||||
// since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_arraycopy_id, StubRoutines::_arrayof_jint_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(true, "arrayof_jint_arraycopy", status, 4, false, StubRoutines::_arrayof_jint_disjoint_arraycopy);
|
||||
#else
|
||||
StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
|
||||
#endif
|
||||
if (BytesPerHeapOop < HeapWordSize) {
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_arraycopy_id, StubRoutines::_arrayof_oop_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (true, "arrayof_oop_arraycopy", status, false, StubRoutines::_arrayof_oop_disjoint_arraycopy);
|
||||
} else {
|
||||
StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
|
||||
}
|
||||
StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy();
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy();
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy();
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
|
||||
|
||||
|
||||
}
|
||||
|
||||
address generate_method_entry_barrier() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");
|
||||
|
||||
Label deoptimize_label;
|
||||
|
||||
@@ -3105,22 +2960,22 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
address generate_cont_thaw(StubGenStubId stub_id) {
|
||||
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) {
|
||||
if (!Continuations::enabled()) return nullptr;
|
||||
Unimplemented();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
address generate_cont_thaw() {
|
||||
return generate_cont_thaw(StubGenStubId::cont_thaw_id);
|
||||
return generate_cont_thaw("Cont thaw", Continuation::thaw_top);
|
||||
}
|
||||
|
||||
address generate_cont_returnBarrier() {
|
||||
return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id);
|
||||
return generate_cont_thaw("Cont thaw return barrier", Continuation::thaw_return_barrier);
|
||||
}
|
||||
|
||||
address generate_cont_returnBarrier_exception() {
|
||||
return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id);
|
||||
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -3152,8 +3007,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||
StubRoutines::Arm::_atomic_load_long_entry = generate_atomic_load_long();
|
||||
StubRoutines::Arm::_atomic_store_long_entry = generate_atomic_store_long();
|
||||
StubRoutines::_atomic_load_long_entry = generate_atomic_load_long();
|
||||
StubRoutines::_atomic_store_long_entry = generate_atomic_store_long();
|
||||
|
||||
}
|
||||
|
||||
@@ -3203,36 +3058,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case initial_id:
|
||||
StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) {
|
||||
switch(kind) {
|
||||
case Initial_stubs:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
case continuation_id:
|
||||
case Continuation_stubs:
|
||||
generate_continuation_stubs();
|
||||
break;
|
||||
case compiler_id:
|
||||
case Compiler_stubs:
|
||||
generate_compiler_stubs();
|
||||
break;
|
||||
case final_id:
|
||||
case Final_stubs:
|
||||
generate_final_stubs();
|
||||
break;
|
||||
default:
|
||||
fatal("unexpected blob id: %d", blob_id);
|
||||
fatal("unexpected stubs kind: %d", kind);
|
||||
break;
|
||||
};
|
||||
}
|
||||
}; // end class declaration
|
||||
|
||||
void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) {
|
||||
StubGenerator g(code, blob_id);
|
||||
void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) {
|
||||
StubGenerator g(code, kind);
|
||||
}
|
||||
|
||||
// implementation of internal development flag
|
||||
|
||||
#ifdef TEST_C2_GENERIC_ARRAYCOPY
|
||||
const bool StubGenerator::set_status = true; // generate a status compatible with C1 calls
|
||||
#else
|
||||
const bool StubGenerator::set_status = false; // non failing C2 stubs need not return a status in R0
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -119,8 +119,7 @@ void aes_init() {
|
||||
|
||||
address generate_aescrypt_encryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "aesencryptBlock");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -317,8 +316,7 @@ address generate_aescrypt_encryptBlock() {
|
||||
|
||||
address generate_aescrypt_decryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -538,8 +536,7 @@ address generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
// [sp+4] Transposition Box reference
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -604,8 +601,7 @@ address generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
|
||||
address generate_cipherBlockChaining_decryptAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
|
||||
@@ -26,13 +26,9 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
address StubRoutines::Arm::_idiv_irem_entry = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
address StubRoutines::Arm::_partial_subtype_check = nullptr;
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
address StubRoutines::_atomic_load_long_entry = nullptr;
|
||||
address StubRoutines::_atomic_store_long_entry = nullptr;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -29,53 +29,38 @@
|
||||
// definition. See stubRoutines.hpp for a description on how to
|
||||
// extend it.
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 9000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 22000,
|
||||
_final_stubs_code_size = 22000
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
public:
|
||||
static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
class Arm {
|
||||
friend class StubGenerator;
|
||||
friend class VMStructs;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
private:
|
||||
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
static address _idiv_irem_entry;
|
||||
static address _partial_subtype_check;
|
||||
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
|
||||
public:
|
||||
|
||||
// declare getters for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
public:
|
||||
|
||||
static address idiv_irem_entry() { return _idiv_irem_entry; }
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
};
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
static address _atomic_load_long_entry;
|
||||
static address _atomic_store_long_entry;
|
||||
|
||||
static address atomic_load_long_entry() { return _atomic_load_long_entry; }
|
||||
static address atomic_store_long_entry() { return _atomic_store_long_entry; }
|
||||
|
||||
|
||||
#endif // CPU_ARM_STUBROUTINES_ARM_HPP
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -562,20 +562,20 @@ void Assembler::test_asm() {
|
||||
li( R3, -4711);
|
||||
|
||||
// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
|
||||
cmpi( CR7, 0, R27, 4711);
|
||||
cmp( CR0, 1, R14, R11);
|
||||
cmpli( CR5, 1, R17, 45);
|
||||
cmpl( CR3, 0, R9, R10);
|
||||
cmpi( CCR7, 0, R27, 4711);
|
||||
cmp( CCR0, 1, R14, R11);
|
||||
cmpli( CCR5, 1, R17, 45);
|
||||
cmpl( CCR3, 0, R9, R10);
|
||||
|
||||
cmpwi( CR7, R27, 4711);
|
||||
cmpw( CR0, R14, R11);
|
||||
cmplwi( CR5, R17, 45);
|
||||
cmplw( CR3, R9, R10);
|
||||
cmpwi( CCR7, R27, 4711);
|
||||
cmpw( CCR0, R14, R11);
|
||||
cmplwi( CCR5, R17, 45);
|
||||
cmplw( CCR3, R9, R10);
|
||||
|
||||
cmpdi( CR7, R27, 4711);
|
||||
cmpd( CR0, R14, R11);
|
||||
cmpldi( CR5, R17, 45);
|
||||
cmpld( CR3, R9, R10);
|
||||
cmpdi( CCR7, R27, 4711);
|
||||
cmpd( CCR0, R14, R11);
|
||||
cmpldi( CCR5, R17, 45);
|
||||
cmpld( CCR3, R9, R10);
|
||||
|
||||
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
|
||||
andi_( R4, R5, 0xff);
|
||||
@@ -715,23 +715,23 @@ void Assembler::test_asm() {
|
||||
bcctr( 4, 6, 0);
|
||||
bcctrl(4, 6, 0);
|
||||
|
||||
blt(CR0, lbl2);
|
||||
bgt(CR1, lbl2);
|
||||
beq(CR2, lbl2);
|
||||
bso(CR3, lbl2);
|
||||
bge(CR4, lbl2);
|
||||
ble(CR5, lbl2);
|
||||
bne(CR6, lbl2);
|
||||
bns(CR7, lbl2);
|
||||
blt(CCR0, lbl2);
|
||||
bgt(CCR1, lbl2);
|
||||
beq(CCR2, lbl2);
|
||||
bso(CCR3, lbl2);
|
||||
bge(CCR4, lbl2);
|
||||
ble(CCR5, lbl2);
|
||||
bne(CCR6, lbl2);
|
||||
bns(CCR7, lbl2);
|
||||
|
||||
bltl(CR0, lbl2);
|
||||
bgtl(CR1, lbl2);
|
||||
beql(CR2, lbl2);
|
||||
bsol(CR3, lbl2);
|
||||
bgel(CR4, lbl2);
|
||||
blel(CR5, lbl2);
|
||||
bnel(CR6, lbl2);
|
||||
bnsl(CR7, lbl2);
|
||||
bltl(CCR0, lbl2);
|
||||
bgtl(CCR1, lbl2);
|
||||
beql(CCR2, lbl2);
|
||||
bsol(CCR3, lbl2);
|
||||
bgel(CCR4, lbl2);
|
||||
blel(CCR5, lbl2);
|
||||
bnel(CCR6, lbl2);
|
||||
bnsl(CCR7, lbl2);
|
||||
blr();
|
||||
|
||||
sync();
|
||||
@@ -794,7 +794,7 @@ void Assembler::test_asm() {
|
||||
fcfid( F22, F23);
|
||||
|
||||
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
|
||||
fcmpu( CR7, F24, F25);
|
||||
fcmpu( CCR7, F24, F25);
|
||||
|
||||
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end()));
|
||||
code()->decode();
|
||||
|
||||
@@ -294,8 +294,6 @@ class Assembler : public AbstractAssembler {
|
||||
CLRRWI_OPCODE = RLWINM_OPCODE,
|
||||
CLRLWI_OPCODE = RLWINM_OPCODE,
|
||||
|
||||
RLWNM_OPCODE = (23u << OPCODE_SHIFT),
|
||||
|
||||
RLWIMI_OPCODE = (20u << OPCODE_SHIFT),
|
||||
|
||||
SLW_OPCODE = (31u << OPCODE_SHIFT | 24u << 1),
|
||||
@@ -426,9 +424,6 @@ class Assembler : public AbstractAssembler {
|
||||
RLDIC_OPCODE = (30u << OPCODE_SHIFT | 2u << XO_27_29_SHIFT), // MD-FORM
|
||||
RLDIMI_OPCODE = (30u << OPCODE_SHIFT | 3u << XO_27_29_SHIFT), // MD-FORM
|
||||
|
||||
RLDCL_OPCODE = (30u << OPCODE_SHIFT | 8u << 1),
|
||||
RLDCR_OPCODE = (30u << OPCODE_SHIFT | 9u << 1),
|
||||
|
||||
SRADI_OPCODE = (31u << OPCODE_SHIFT | 413u << XO_21_29_SHIFT), // XS-FORM
|
||||
|
||||
SLD_OPCODE = (31u << OPCODE_SHIFT | 27u << 1), // X-FORM
|
||||
@@ -1701,14 +1696,6 @@ class Assembler : public AbstractAssembler {
|
||||
inline void insrdi( Register a, Register s, int n, int b);
|
||||
inline void insrwi( Register a, Register s, int n, int b);
|
||||
|
||||
// Rotate variable
|
||||
inline void rlwnm( Register a, Register s, Register b, int mb, int me);
|
||||
inline void rlwnm_(Register a, Register s, Register b, int mb, int me);
|
||||
inline void rldcl( Register a, Register s, Register b, int mb);
|
||||
inline void rldcl_(Register a, Register s, Register b, int mb);
|
||||
inline void rldcr( Register a, Register s, Register b, int me);
|
||||
inline void rldcr_(Register a, Register s, Register b, int me);
|
||||
|
||||
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
|
||||
// 4 bytes
|
||||
inline void lwzx( Register d, Register s1, Register s2);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -246,9 +246,9 @@ inline void Assembler::nop() { Assembler::ori(R0, R
|
||||
// NOP for FP and BR units (different versions to allow them to be in one group)
|
||||
inline void Assembler::fpnop0() { Assembler::fmr(F30, F30); }
|
||||
inline void Assembler::fpnop1() { Assembler::fmr(F31, F31); }
|
||||
inline void Assembler::brnop0() { Assembler::mcrf(CR2, CR2); }
|
||||
inline void Assembler::brnop1() { Assembler::mcrf(CR3, CR3); }
|
||||
inline void Assembler::brnop2() { Assembler::mcrf(CR4, CR4); }
|
||||
inline void Assembler::brnop0() { Assembler::mcrf(CCR2, CCR2); }
|
||||
inline void Assembler::brnop1() { Assembler::mcrf(CCR3, CCR3); }
|
||||
inline void Assembler::brnop2() { Assembler::mcrf(CCR4, CCR4); }
|
||||
|
||||
inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s, s); }
|
||||
inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); }
|
||||
@@ -303,7 +303,7 @@ inline void Assembler::clrlsldi_(Register a, Register s, int clrl6, int shl6) {
|
||||
inline void Assembler::extrdi( Register a, Register s, int n, int b){ Assembler::rldicl(a, s, b+n, 64-n); }
|
||||
// testbit with condition register.
|
||||
inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, int ui6) {
|
||||
if (cr == CR0) {
|
||||
if (cr == CCR0) {
|
||||
Assembler::rldicr_(a, s, 63-ui6, 0);
|
||||
} else {
|
||||
Assembler::rldicr(a, s, 63-ui6, 0);
|
||||
@@ -336,13 +336,6 @@ inline void Assembler::rldimi_( Register a, Register s, int sh6, int mb6)
|
||||
inline void Assembler::insrdi( Register a, Register s, int n, int b) { Assembler::rldimi(a, s, 64-(b+n), b); }
|
||||
inline void Assembler::insrwi( Register a, Register s, int n, int b) { Assembler::rlwimi(a, s, 32-(b+n), b, b+n-1); }
|
||||
|
||||
inline void Assembler::rlwnm( Register a, Register s, Register b, int mb, int me) { emit_int32(RLWNM_OPCODE | rta(a) | rs(s) | rb(b) | mb2125(mb) | me2630(me) | rc(0)); }
|
||||
inline void Assembler::rlwnm_(Register a, Register s, Register b, int mb, int me) { emit_int32(RLWNM_OPCODE | rta(a) | rs(s) | rb(b) | mb2125(mb) | me2630(me) | rc(1)); }
|
||||
inline void Assembler::rldcl( Register a, Register s, Register b, int mb) { emit_int32(RLDCL_OPCODE | rta(a) | rs(s) | rb(b) | mb2126(mb) | rc(0)); }
|
||||
inline void Assembler::rldcl_( Register a, Register s, Register b, int mb) { emit_int32(RLDCL_OPCODE | rta(a) | rs(s) | rb(b) | mb2126(mb) | rc(1)); }
|
||||
inline void Assembler::rldcr( Register a, Register s, Register b, int me) { emit_int32(RLDCR_OPCODE | rta(a) | rs(s) | rb(b) | me2126(me) | rc(0)); }
|
||||
inline void Assembler::rldcr_( Register a, Register s, Register b, int me) { emit_int32(RLDCR_OPCODE | rta(a) | rs(s) | rb(b) | me2126(me) | rc(1)); }
|
||||
|
||||
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
|
||||
inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lwz( Register d, Address &a) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -367,9 +367,9 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
__ mr(R0, _obj); // spill
|
||||
__ ld(_obj, java_lang_Class::klass_offset(), _obj);
|
||||
__ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
|
||||
__ cmpd(CR0, _obj, R16_thread);
|
||||
__ cmpd(CCR0, _obj, R16_thread);
|
||||
__ mr(_obj, R0); // restore
|
||||
__ bne(CR0, call_patch);
|
||||
__ bne(CCR0, call_patch);
|
||||
|
||||
// Load_klass patches may execute the patched code before it's
|
||||
// copied back into place so we need to jump back into the main
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -48,7 +48,7 @@
|
||||
#define __ _masm->
|
||||
|
||||
|
||||
const ConditionRegister LIR_Assembler::BOOL_RESULT = CR5;
|
||||
const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5;
|
||||
|
||||
|
||||
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
|
||||
@@ -156,8 +156,8 @@ void LIR_Assembler::osr_entry() {
|
||||
{
|
||||
Label L;
|
||||
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ bne(CR0, L);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ stop("locked object is null");
|
||||
__ bind(L);
|
||||
}
|
||||
@@ -410,11 +410,11 @@ void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right,
|
||||
|
||||
Label regular, done;
|
||||
if (is_int) {
|
||||
__ cmpwi(CR0, Rdivisor, -1);
|
||||
__ cmpwi(CCR0, Rdivisor, -1);
|
||||
} else {
|
||||
__ cmpdi(CR0, Rdivisor, -1);
|
||||
__ cmpdi(CCR0, Rdivisor, -1);
|
||||
}
|
||||
__ bne(CR0, regular);
|
||||
__ bne(CCR0, regular);
|
||||
if (code == lir_idiv) {
|
||||
__ neg(Rresult, Rdividend);
|
||||
__ b(done);
|
||||
@@ -597,14 +597,14 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : Address();
|
||||
Label L;
|
||||
// Result must be 0 if value is NaN; test by comparing value to itself.
|
||||
__ fcmpu(CR0, rsrc, rsrc);
|
||||
__ fcmpu(CCR0, rsrc, rsrc);
|
||||
if (dst_in_memory) {
|
||||
__ li(R0, 0); // 0 in case of NAN
|
||||
__ std(R0, addr);
|
||||
} else {
|
||||
__ li(dst->as_register(), 0);
|
||||
}
|
||||
__ bso(CR0, L);
|
||||
__ bso(CCR0, L);
|
||||
__ fctiwz(rsrc, rsrc); // USE_KILL
|
||||
if (dst_in_memory) {
|
||||
__ stfd(rsrc, addr.disp(), addr.base());
|
||||
@@ -621,14 +621,14 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : Address();
|
||||
Label L;
|
||||
// Result must be 0 if value is NaN; test by comparing value to itself.
|
||||
__ fcmpu(CR0, rsrc, rsrc);
|
||||
__ fcmpu(CCR0, rsrc, rsrc);
|
||||
if (dst_in_memory) {
|
||||
__ li(R0, 0); // 0 in case of NAN
|
||||
__ std(R0, addr);
|
||||
} else {
|
||||
__ li(dst->as_register_lo(), 0);
|
||||
}
|
||||
__ bso(CR0, L);
|
||||
__ bso(CCR0, L);
|
||||
__ fctidz(rsrc, rsrc); // USE_KILL
|
||||
if (dst_in_memory) {
|
||||
__ stfd(rsrc, addr.disp(), addr.base());
|
||||
@@ -1530,15 +1530,15 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
|
||||
bool is_unordered_less = (code == lir_ucmp_fd2i);
|
||||
if (left->is_single_fpu()) {
|
||||
__ fcmpu(CR0, left->as_float_reg(), right->as_float_reg());
|
||||
__ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg());
|
||||
} else if (left->is_double_fpu()) {
|
||||
__ fcmpu(CR0, left->as_double_reg(), right->as_double_reg());
|
||||
__ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg());
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ set_cmpu3(Rdst, is_unordered_less); // is_unordered_less ? -1 : 1
|
||||
} else if (code == lir_cmp_l2i) {
|
||||
__ cmpd(CR0, left->as_register_lo(), right->as_register_lo());
|
||||
__ cmpd(CCR0, left->as_register_lo(), right->as_register_lo());
|
||||
__ set_cmp3(Rdst); // set result as follows: <: -1, =: 0, >: 1
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
@@ -1893,8 +1893,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ add(src_pos, tmp, src_pos);
|
||||
__ add(dst_pos, tmp, dst_pos);
|
||||
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::less), *stub->entry());
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
return;
|
||||
}
|
||||
@@ -1910,12 +1910,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
// Use only one conditional branch for simple checks.
|
||||
if (simple_check_flag_set) {
|
||||
ConditionRegister combined_check = CR1, tmp_check = CR1;
|
||||
ConditionRegister combined_check = CCR1, tmp_check = CCR1;
|
||||
|
||||
// Make sure src and dst are non-null.
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ cmpdi(combined_check, src, 0);
|
||||
tmp_check = CR0;
|
||||
tmp_check = CCR0;
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::dst_null_check) {
|
||||
@@ -1923,13 +1923,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
if (tmp_check != combined_check) {
|
||||
__ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal);
|
||||
}
|
||||
tmp_check = CR0;
|
||||
tmp_check = CCR0;
|
||||
}
|
||||
|
||||
// Clear combined_check.eq if not already used.
|
||||
if (tmp_check == combined_check) {
|
||||
__ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal);
|
||||
tmp_check = CR0;
|
||||
tmp_check = CCR0;
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
|
||||
@@ -1960,15 +1960,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
|
||||
__ load_klass(tmp, dst);
|
||||
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
|
||||
__ cmpwi(CR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CR0, slow);
|
||||
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CCR0, slow);
|
||||
}
|
||||
|
||||
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
|
||||
__ load_klass(tmp, src);
|
||||
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
|
||||
__ cmpwi(CR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CR0, slow);
|
||||
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CCR0, slow);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1979,16 +1979,16 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
if (flags & LIR_OpArrayCopy::src_range_check) {
|
||||
__ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src);
|
||||
__ add(tmp, length, src_pos);
|
||||
__ cmpld(CR0, tmp2, tmp);
|
||||
__ ble(CR0, slow);
|
||||
__ cmpld(CCR0, tmp2, tmp);
|
||||
__ ble(CCR0, slow);
|
||||
}
|
||||
|
||||
__ extsw(dst_pos, dst_pos);
|
||||
if (flags & LIR_OpArrayCopy::dst_range_check) {
|
||||
__ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst);
|
||||
__ add(tmp, length, dst_pos);
|
||||
__ cmpld(CR0, tmp2, tmp);
|
||||
__ ble(CR0, slow);
|
||||
__ cmpld(CCR0, tmp2, tmp);
|
||||
__ ble(CCR0, slow);
|
||||
}
|
||||
|
||||
int shift = shift_amount(basic_type);
|
||||
@@ -2003,8 +2003,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// We don't know the array types are compatible.
|
||||
if (basic_type != T_OBJECT) {
|
||||
// Simple test for basic type arrays.
|
||||
__ cmp_klasses_from_objects(CR0, src, dst, tmp, tmp2);
|
||||
__ beq(CR0, cont);
|
||||
__ cmp_klasses_from_objects(CCR0, src, dst, tmp, tmp2);
|
||||
__ beq(CCR0, cont);
|
||||
} else {
|
||||
// For object arrays, if src is a sub class of dst then we can
|
||||
// safely do the copy.
|
||||
@@ -2024,7 +2024,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
|
||||
__ mtctr(tmp);
|
||||
__ bctrl(); // sets CR0
|
||||
__ beq(CR0, cont);
|
||||
__ beq(CCR0, cont);
|
||||
|
||||
if (copyfunc_addr != nullptr) { // Use stub if available.
|
||||
__ bind(copyfunc);
|
||||
@@ -2044,8 +2044,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
|
||||
__ load_const_optimized(tmp, objArray_lh);
|
||||
__ cmpw(CR0, tmp, tmp2);
|
||||
__ bne(CR0, slow);
|
||||
__ cmpw(CCR0, tmp, tmp2);
|
||||
__ bne(CCR0, slow);
|
||||
}
|
||||
|
||||
Register src_ptr = R3_ARG1;
|
||||
@@ -2080,8 +2080,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
Label failed;
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ bne(CR0, failed);
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ bne(CCR0, failed);
|
||||
address counter = (address)&Runtime1::_arraycopy_checkcast_cnt;
|
||||
int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
|
||||
__ lwz(R11_scratch1, simm16_offs, tmp);
|
||||
@@ -2092,8 +2092,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
#endif
|
||||
|
||||
__ nand(tmp, R3_RET, R3_RET);
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
@@ -2126,15 +2126,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
metadata2reg(default_type->constant_encoding(), tmp);
|
||||
__ cmp_klass(CR0, dst, tmp, R11_scratch1, R12_scratch2);
|
||||
__ cmp_klass(CCR0, dst, tmp, R11_scratch1, R12_scratch2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ bne(CR0, halt);
|
||||
__ cmp_klass(CR0, src, tmp, R11_scratch1, R12_scratch2);
|
||||
__ beq(CR0, known_ok);
|
||||
__ bne(CCR0, halt);
|
||||
__ cmp_klass(CCR0, src, tmp, R11_scratch1, R12_scratch2);
|
||||
__ beq(CCR0, known_ok);
|
||||
} else {
|
||||
__ beq(CR0, known_ok);
|
||||
__ cmpw(CR0, src, dst);
|
||||
__ beq(CR0, known_ok);
|
||||
__ beq(CCR0, known_ok);
|
||||
__ cmpw(CCR0, src, dst);
|
||||
__ beq(CCR0, known_ok);
|
||||
}
|
||||
__ bind(halt);
|
||||
__ stop("incorrect type information in arraycopy");
|
||||
@@ -2269,8 +2269,8 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
__ lbz(op->tmp1()->as_register(),
|
||||
in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
|
||||
// acquire barrier included in membar_storestore() which follows the allocation immediately.
|
||||
__ cmpwi(CR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *op->stub()->entry());
|
||||
__ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
|
||||
}
|
||||
__ allocate_object(op->obj()->as_register(),
|
||||
op->tmp1()->as_register(),
|
||||
@@ -2317,8 +2317,8 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
// See if the receiver is receiver[n].
|
||||
__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ verify_klass_ptr(tmp1);
|
||||
__ cmpd(CR0, recv, tmp1);
|
||||
__ bne(CR0, next_test);
|
||||
__ cmpd(CCR0, recv, tmp1);
|
||||
__ bne(CCR0, next_test);
|
||||
|
||||
__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ addi(tmp1, tmp1, DataLayout::counter_increment);
|
||||
@@ -2332,8 +2332,8 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
for (i = 0; i < VirtualCallData::row_limit(); i++) {
|
||||
Label next_test;
|
||||
__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ cmpdi(CR0, tmp1, 0);
|
||||
__ bne(CR0, next_test);
|
||||
__ cmpdi(CCR0, tmp1, 0);
|
||||
__ bne(CCR0, next_test);
|
||||
__ li(tmp1, DataLayout::counter_increment);
|
||||
__ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
|
||||
@@ -2394,8 +2394,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
Label not_null;
|
||||
metadata2reg(md->constant_encoding(), mdo);
|
||||
__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ bne(CR0, not_null);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ bne(CCR0, not_null);
|
||||
__ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
__ ori(data_val, data_val, BitData::null_seen_byte_constant());
|
||||
__ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
@@ -2412,8 +2412,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ std(Rtmp1, slot_offset, mdo);
|
||||
__ bind(update_done);
|
||||
} else {
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ beq(CR0, *obj_is_null);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ beq(CCR0, *obj_is_null);
|
||||
}
|
||||
|
||||
// get object class
|
||||
@@ -2427,8 +2427,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
|
||||
if (op->fast_check()) {
|
||||
assert_different_registers(klass_RInfo, k_RInfo);
|
||||
__ cmpd(CR0, k_RInfo, klass_RInfo);
|
||||
__ beq(CR0, *success);
|
||||
__ cmpd(CCR0, k_RInfo, klass_RInfo);
|
||||
__ beq(CCR0, *success);
|
||||
// Fall through to failure case.
|
||||
} else {
|
||||
bool need_slow_path = true;
|
||||
@@ -2462,7 +2462,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ mtctr(original_Rtmp1);
|
||||
__ bctrl(); // sets CR0
|
||||
if (keep_obj_alive) { __ mr(obj, dst); }
|
||||
__ beq(CR0, *success);
|
||||
__ beq(CCR0, *success);
|
||||
// Fall through to failure case.
|
||||
}
|
||||
}
|
||||
@@ -2501,8 +2501,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
Register data_val = Rtmp1;
|
||||
metadata2reg(md->constant_encoding(), mdo);
|
||||
__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ bne(CR0, not_null);
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ bne(CCR0, not_null);
|
||||
__ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
__ ori(data_val, data_val, BitData::null_seen_byte_constant());
|
||||
__ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
@@ -2519,8 +2519,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ std(Rtmp1, slot_offset, mdo);
|
||||
__ bind(update_done);
|
||||
} else {
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done);
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done);
|
||||
}
|
||||
if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
|
||||
explicit_null_check(array, op->info_for_exception());
|
||||
@@ -2543,7 +2543,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
|
||||
__ mtctr(R0);
|
||||
__ bctrl(); // sets CR0
|
||||
__ beq(CR0, done);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
__ bind(failure);
|
||||
__ b(*stub->entry());
|
||||
@@ -3024,9 +3024,9 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
}
|
||||
|
||||
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
|
||||
__ bne_predict_not_taken(CR0, Lretry);
|
||||
__ bne_predict_not_taken(CCR0, Lretry);
|
||||
} else {
|
||||
__ bne( CR0, Lretry);
|
||||
__ bne( CCR0, Lretry);
|
||||
}
|
||||
|
||||
if (UseCompressedOops && data->is_oop()) {
|
||||
@@ -3063,8 +3063,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
if (do_null) {
|
||||
if (!TypeEntries::was_null_seen(current_klass)) {
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ bne(CR0, Lupdate);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ bne(CCR0, Lupdate);
|
||||
__ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
|
||||
__ ori(R0, R0, TypeEntries::null_seen);
|
||||
if (do_update) {
|
||||
@@ -3074,14 +3074,14 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
}
|
||||
} else {
|
||||
if (do_update) {
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ beq(CR0, Ldone);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ beq(CCR0, Ldone);
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
} else {
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ bne(CR0, Lupdate);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ bne(CCR0, Lupdate);
|
||||
__ stop("unexpected null obj");
|
||||
#endif
|
||||
}
|
||||
@@ -3097,8 +3097,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
klass_reg_used = true;
|
||||
__ load_klass(klass, obj);
|
||||
metadata2reg(exact_klass->constant_encoding(), R0);
|
||||
__ cmpd(CR0, klass, R0);
|
||||
__ beq(CR0, ok);
|
||||
__ cmpd(CCR0, klass, R0);
|
||||
__ beq(CCR0, ok);
|
||||
__ stop("exact klass and actual klass differ");
|
||||
__ bind(ok);
|
||||
}
|
||||
@@ -3118,20 +3118,20 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
// Like InterpreterMacroAssembler::profile_obj_type
|
||||
__ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
__ cmpd(CR1, R0, klass);
|
||||
__ cmpd(CCR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
//beq(CR1, do_nothing);
|
||||
//beq(CCR1, do_nothing);
|
||||
|
||||
__ andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
//bne(CR0, do_nothing);
|
||||
__ crorc(CR0, Assembler::equal, CR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
__ beq(CR0, Lnext);
|
||||
//bne(CCR0, do_nothing);
|
||||
__ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
__ beq(CCR0, Lnext);
|
||||
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
__ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
|
||||
__ beq(CR0, Ldo_update); // First time here. Set profile type.
|
||||
__ beq(CCR0, Ldo_update); // First time here. Set profile type.
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -3141,7 +3141,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
|
||||
__ andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
__ bne(CR0, Lnext);
|
||||
__ bne(CCR0, Lnext);
|
||||
}
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
@@ -3157,14 +3157,14 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
__ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
__ cmpd(CR1, R0, klass);
|
||||
__ cmpd(CCR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
__ beq(CR1, Lnext);
|
||||
__ beq(CCR1, Lnext);
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label ok;
|
||||
__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
__ beq(CR0, ok); // First time here.
|
||||
__ beq(CCR0, ok); // First time here.
|
||||
|
||||
__ stop("unexpected profiling mismatch");
|
||||
__ bind(ok);
|
||||
@@ -3178,7 +3178,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
// Already unknown. Nothing to do anymore.
|
||||
__ andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
__ bne(CR0, Lnext);
|
||||
__ bne(CCR0, Lnext);
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
__ ori(R0, tmp, TypeEntries::type_unknown);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -86,8 +86,8 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
lbz(Rscratch, in_bytes(Klass::misc_flags_offset()), Rscratch);
|
||||
testbitdi(CR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CR0, slow_int);
|
||||
testbitdi(CCR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_int);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
@@ -101,7 +101,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
|
||||
// Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/Rscratch,
|
||||
/*compare_value=*/Rmark,
|
||||
/*exchange_value=*/Rbox,
|
||||
@@ -128,7 +128,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
|
||||
and_(R0/*==0?*/, Rscratch, R0);
|
||||
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
bne(CR0, slow_int);
|
||||
bne(CCR0, slow_int);
|
||||
}
|
||||
|
||||
bind(done);
|
||||
@@ -149,8 +149,8 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||
if (LockingMode != LM_LIGHTWEIGHT) {
|
||||
// Test first if it is a fast recursive unlock.
|
||||
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
cmpdi(CR0, Rmark, 0);
|
||||
beq(CR0, done);
|
||||
cmpdi(CCR0, Rmark, 0);
|
||||
beq(CCR0, done);
|
||||
}
|
||||
|
||||
// Load object.
|
||||
@@ -162,7 +162,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/R0,
|
||||
/*compare_value=*/Rbox,
|
||||
/*exchange_value=*/Rmark,
|
||||
@@ -285,9 +285,9 @@ void C1_MacroAssembler::initialize_object(
|
||||
{
|
||||
lwz(t1, in_bytes(Klass::layout_helper_offset()), klass);
|
||||
if (var_size_in_bytes != noreg) {
|
||||
cmpw(CR0, t1, var_size_in_bytes);
|
||||
cmpw(CCR0, t1, var_size_in_bytes);
|
||||
} else {
|
||||
cmpwi(CR0, t1, con_size_in_bytes);
|
||||
cmpwi(CCR0, t1, con_size_in_bytes);
|
||||
}
|
||||
asm_assert_eq("bad size in initialize_object");
|
||||
}
|
||||
@@ -340,8 +340,8 @@ void C1_MacroAssembler::allocate_array(
|
||||
if (max_tlab < max_length) { max_length = max_tlab; }
|
||||
}
|
||||
load_const_optimized(t1, max_length);
|
||||
cmpld(CR0, len, t1);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CR0, Assembler::greater), slow_case);
|
||||
cmpld(CCR0, len, t1);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
|
||||
|
||||
// compute array size
|
||||
// note: If 0 <= len <= max_length, len*elt_size + header + alignment is
|
||||
@@ -399,8 +399,8 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
|
||||
|
||||
void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
||||
Label not_null;
|
||||
cmpdi(CR0, r, 0);
|
||||
bne(CR0, not_null);
|
||||
cmpdi(CCR0, r, 0);
|
||||
bne(CCR0, not_null);
|
||||
stop("non-null oop required");
|
||||
bind(not_null);
|
||||
verify_oop(r, FILE_AND_LINE);
|
||||
@@ -414,7 +414,7 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
|
||||
} else { // explicit
|
||||
//const address exception_entry = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
|
||||
assert(Lnull != nullptr, "must have Label for explicit check");
|
||||
cmpdi(CR0, r, 0);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CR0, Assembler::equal), *Lnull);
|
||||
cmpdi(CCR0, r, 0);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -69,14 +69,14 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
|
||||
// Check for pending exceptions.
|
||||
{
|
||||
ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread);
|
||||
cmpdi(CR0, R0, 0);
|
||||
cmpdi(CCR0, R0, 0);
|
||||
|
||||
// This used to conditionally jump to forward_exception however it is
|
||||
// possible if we relocate that the branch will not reach. So we must jump
|
||||
// around so we can always reach.
|
||||
|
||||
Label ok;
|
||||
beq(CR0, ok);
|
||||
beq(CCR0, ok);
|
||||
|
||||
// Make sure that the vm_results are cleared.
|
||||
if (oop_result1->is_valid() || metadata_result->is_valid()) {
|
||||
@@ -368,7 +368,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
int call_offset = __ call_RT(noreg, noreg, target);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
|
||||
// Re-execute the patched instruction or, if the nmethod was deoptmized,
|
||||
// return to the deoptimization handler entry that will cause re-execution
|
||||
@@ -382,7 +382,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
|
||||
restore_live_registers(sasm, noreg, noreg);
|
||||
// Return if patching routine returned 0.
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
|
||||
address stub = deopt_blob->unpack_with_reexecution();
|
||||
//__ load_const_optimized(R0, stub);
|
||||
@@ -448,8 +448,8 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
Label ok;
|
||||
__ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2);
|
||||
__ srawi(R0, R0, Klass::_lh_array_tag_shift);
|
||||
__ cmpwi(CR0, R0, tag);
|
||||
__ beq(CR0, ok);
|
||||
__ cmpwi(CCR0, R0, tag);
|
||||
__ beq(CCR0, ok);
|
||||
__ stop("assert(is an array klass)");
|
||||
__ should_not_reach_here();
|
||||
__ bind(ok);
|
||||
@@ -485,9 +485,9 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
// Load the klass and check the has finalizer flag.
|
||||
__ load_klass(t, R3_ARG1);
|
||||
__ lbz(t, in_bytes(Klass::misc_flags_offset()), t);
|
||||
__ testbitdi(CR0, R0, t, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ testbitdi(CCR0, R0, t, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
// Return if has_finalizer bit == 0 (CR0.eq).
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
|
||||
__ mflr(R0);
|
||||
__ std(R0, _abi0(lr), R1_SP);
|
||||
@@ -602,9 +602,10 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
{ // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
|
||||
const Register sub_klass = R5,
|
||||
super_klass = R4,
|
||||
temp1_reg = R6;
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, noreg);
|
||||
// Result is in CR0.
|
||||
temp1_reg = R6,
|
||||
temp2_reg = R0;
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful
|
||||
__ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne
|
||||
__ blr();
|
||||
}
|
||||
break;
|
||||
@@ -805,10 +806,10 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm)
|
||||
// Check that fields in JavaThread for exception oop and issuing pc are
|
||||
// empty before writing to them.
|
||||
__ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ asm_assert_eq("exception oop already set");
|
||||
__ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ asm_assert_eq("exception pc already set");
|
||||
#endif
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ void C2_MacroAssembler::string_compress_16(Register src, Register dst, Register
|
||||
// Check if cnt >= 8 (= 16 bytes)
|
||||
lis(tmp1, byte_mask); // tmp1 = 0x00FF00FF00FF00FF (non ascii case)
|
||||
srwi_(tmp2, cnt, 3);
|
||||
beq(CR0, Lslow);
|
||||
beq(CCR0, Lslow);
|
||||
ori(tmp1, tmp1, byte_mask);
|
||||
rldimi(tmp1, tmp1, 32, 0);
|
||||
mtctr(tmp2);
|
||||
@@ -87,7 +87,7 @@ void C2_MacroAssembler::string_compress_16(Register src, Register dst, Register
|
||||
rldimi(tmp4, tmp4, 2*8, 2*8); // _4_6_7_7
|
||||
|
||||
andc_(tmp0, tmp0, tmp1);
|
||||
bne(CR0, Lfailure); // Not latin1/ascii.
|
||||
bne(CCR0, Lfailure); // Not latin1/ascii.
|
||||
addi(src, src, 16);
|
||||
|
||||
rlwimi(tmp3, tmp2, 0*8, 24, 31);// _____1_3
|
||||
@@ -115,8 +115,8 @@ void C2_MacroAssembler::string_compress(Register src, Register dst, Register cnt
|
||||
|
||||
bind(Lloop);
|
||||
lhz(tmp, 0, src);
|
||||
cmplwi(CR0, tmp, byte_mask);
|
||||
bgt(CR0, Lfailure); // Not latin1/ascii.
|
||||
cmplwi(CCR0, tmp, byte_mask);
|
||||
bgt(CCR0, Lfailure); // Not latin1/ascii.
|
||||
addi(src, src, 2);
|
||||
stb(tmp, 0, dst);
|
||||
addi(dst, dst, 1);
|
||||
@@ -130,7 +130,7 @@ void C2_MacroAssembler::encode_iso_array(Register src, Register dst, Register le
|
||||
|
||||
string_compress_16(src, dst, len, tmp1, tmp2, tmp3, tmp4, tmp5, Lfailure1, ascii);
|
||||
rldicl_(result, len, 0, 64-3); // Remaining characters.
|
||||
beq(CR0, Ldone);
|
||||
beq(CCR0, Ldone);
|
||||
bind(Lslow);
|
||||
string_compress(src, dst, result, tmp2, Lfailure2, ascii);
|
||||
li(result, 0);
|
||||
@@ -140,7 +140,7 @@ void C2_MacroAssembler::encode_iso_array(Register src, Register dst, Register le
|
||||
mr(result, len);
|
||||
mfctr(tmp1);
|
||||
rldimi_(result, tmp1, 3, 0); // Remaining characters.
|
||||
beq(CR0, Ldone);
|
||||
beq(CCR0, Ldone);
|
||||
b(Lslow);
|
||||
|
||||
bind(Lfailure2);
|
||||
@@ -159,7 +159,7 @@ void C2_MacroAssembler::string_inflate_16(Register src, Register dst, Register c
|
||||
|
||||
// Check if cnt >= 8
|
||||
srwi_(tmp2, cnt, 3);
|
||||
beq(CR0, Lslow);
|
||||
beq(CCR0, Lslow);
|
||||
lis(tmp1, 0xFF); // tmp1 = 0x00FF00FF
|
||||
ori(tmp1, tmp1, 0xFF);
|
||||
mtctr(tmp2);
|
||||
@@ -235,10 +235,10 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
subf_(diff, cnt2, cnt1); // diff = cnt1 - cnt2
|
||||
// if (diff > 0) { cnt1 = cnt2; }
|
||||
if (VM_Version::has_isel()) {
|
||||
isel(cnt1, CR0, Assembler::greater, /*invert*/ false, cnt2);
|
||||
isel(cnt1, CCR0, Assembler::greater, /*invert*/ false, cnt2);
|
||||
} else {
|
||||
Label Lskip;
|
||||
blt(CR0, Lskip);
|
||||
blt(CCR0, Lskip);
|
||||
mr(cnt1, cnt2);
|
||||
bind(Lskip);
|
||||
}
|
||||
@@ -254,7 +254,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
Label Lfastloop, Lskipfast;
|
||||
|
||||
srwi_(tmp0, cnt1, log2_chars_per_iter);
|
||||
beq(CR0, Lskipfast);
|
||||
beq(CCR0, Lskipfast);
|
||||
rldicl(cnt2, cnt1, 0, 64 - log2_chars_per_iter); // Remaining characters.
|
||||
li(cnt1, 1 << log2_chars_per_iter); // Initialize for failure case: Rescan characters from current iteration.
|
||||
mtctr(tmp0);
|
||||
@@ -262,8 +262,8 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
bind(Lfastloop);
|
||||
ld(chr1, 0, str1);
|
||||
ld(chr2, 0, str2);
|
||||
cmpd(CR0, chr1, chr2);
|
||||
bne(CR0, Lslow);
|
||||
cmpd(CCR0, chr1, chr2);
|
||||
bne(CCR0, Lslow);
|
||||
addi(str1, str1, stride1);
|
||||
addi(str2, str2, stride2);
|
||||
bdnz(Lfastloop);
|
||||
@@ -272,8 +272,8 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
}
|
||||
|
||||
// Loop which searches the first difference character by character.
|
||||
cmpwi(CR0, cnt1, 0);
|
||||
beq(CR0, Lreturn_diff);
|
||||
cmpwi(CCR0, cnt1, 0);
|
||||
beq(CCR0, Lreturn_diff);
|
||||
bind(Lslow);
|
||||
mtctr(cnt1);
|
||||
|
||||
@@ -289,7 +289,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
if (stride1 == 1) { lbz(chr1, 0, str1); } else { lhz(chr1, 0, str1); }
|
||||
if (stride2 == 1) { lbz(chr2, 0, str2); } else { lhz(chr2, 0, str2); }
|
||||
subf_(result, chr2, chr1); // result = chr1 - chr2
|
||||
bne(CR0, Ldone);
|
||||
bne(CCR0, Ldone);
|
||||
addi(str1, str1, stride1);
|
||||
addi(str2, str2, stride2);
|
||||
bdnz(Lloop);
|
||||
@@ -317,23 +317,23 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
const int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
|
||||
|
||||
// Return true if the same array.
|
||||
cmpd(CR0, ary1, ary2);
|
||||
beq(CR0, Lskiploop);
|
||||
cmpd(CCR0, ary1, ary2);
|
||||
beq(CCR0, Lskiploop);
|
||||
|
||||
// Return false if one of them is null.
|
||||
cmpdi(CR0, ary1, 0);
|
||||
cmpdi(CR1, ary2, 0);
|
||||
cmpdi(CCR0, ary1, 0);
|
||||
cmpdi(CCR1, ary2, 0);
|
||||
li(result, 0);
|
||||
cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
beq(CR0, Ldone);
|
||||
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
beq(CCR0, Ldone);
|
||||
|
||||
// Load the lengths of arrays.
|
||||
lwz(limit, length_offset, ary1);
|
||||
lwz(tmp0, length_offset, ary2);
|
||||
|
||||
// Return false if the two arrays are not equal length.
|
||||
cmpw(CR0, limit, tmp0);
|
||||
bne(CR0, Ldone);
|
||||
cmpw(CCR0, limit, tmp0);
|
||||
bne(CCR0, Ldone);
|
||||
|
||||
// Load array addresses.
|
||||
addi(ary1, ary1, base_offset);
|
||||
@@ -351,7 +351,7 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
const int log2_chars_per_iter = is_byte ? 3 : 2;
|
||||
|
||||
srwi_(tmp0, limit, log2_chars_per_iter + (limit_needs_shift ? 1 : 0));
|
||||
beq(CR0, Lskipfast);
|
||||
beq(CCR0, Lskipfast);
|
||||
mtctr(tmp0);
|
||||
|
||||
bind(Lfastloop);
|
||||
@@ -359,13 +359,13 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
ld(chr2, 0, ary2);
|
||||
addi(ary1, ary1, 8);
|
||||
addi(ary2, ary2, 8);
|
||||
cmpd(CR0, chr1, chr2);
|
||||
bne(CR0, Ldone);
|
||||
cmpd(CCR0, chr1, chr2);
|
||||
bne(CCR0, Ldone);
|
||||
bdnz(Lfastloop);
|
||||
|
||||
bind(Lskipfast);
|
||||
rldicl_(limit, limit, limit_needs_shift ? 64 - 1 : 0, 64 - log2_chars_per_iter); // Remaining characters.
|
||||
beq(CR0, Lskiploop);
|
||||
beq(CCR0, Lskiploop);
|
||||
mtctr(limit);
|
||||
|
||||
// Character by character.
|
||||
@@ -381,8 +381,8 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
addi(ary1, ary1, 2);
|
||||
addi(ary2, ary2, 2);
|
||||
}
|
||||
cmpw(CR0, chr1, chr2);
|
||||
bne(CR0, Ldone);
|
||||
cmpw(CCR0, chr1, chr2);
|
||||
bne(CCR0, Ldone);
|
||||
bdnz(Lloop);
|
||||
|
||||
bind(Lskiploop);
|
||||
@@ -414,9 +414,9 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
clrldi(haycnt, haycnt, 32); // Ensure positive int is valid as 64 bit value.
|
||||
addi(addr, haystack, -h_csize); // Accesses use pre-increment.
|
||||
if (needlecntval == 0) { // variable needlecnt
|
||||
cmpwi(CR6, needlecnt, 2);
|
||||
cmpwi(CCR6, needlecnt, 2);
|
||||
clrldi(needlecnt, needlecnt, 32); // Ensure positive int is valid as 64 bit value.
|
||||
blt(CR6, L_TooShort); // Variable needlecnt: handle short needle separately.
|
||||
blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately.
|
||||
}
|
||||
|
||||
if (n_csize == 2) { lwz(n_start, 0, needle); } else { lhz(n_start, 0, needle); } // Load first 2 characters of needle.
|
||||
@@ -447,7 +447,7 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
|
||||
addi(addr, addr, h_csize); // This is the new address we want to use for comparing.
|
||||
srdi_(ch2, addr_diff, h_csize);
|
||||
beq(CR0, L_FinalCheck); // 2 characters left?
|
||||
beq(CCR0, L_FinalCheck); // 2 characters left?
|
||||
mtctr(ch2); // num of characters / 2
|
||||
bind(L_InnerLoop); // Main work horse (2x unrolled search loop)
|
||||
if (h_csize == 2) { // Load 2 characters of haystack (ignore alignment).
|
||||
@@ -457,18 +457,18 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
lhz(ch1, 0, addr);
|
||||
lhz(ch2, 1, addr);
|
||||
}
|
||||
cmpw(CR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
|
||||
cmpw(CR1, ch2, n_start);
|
||||
beq(CR0, L_Comp1); // Did we find the needle start?
|
||||
beq(CR1, L_Comp2);
|
||||
cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
|
||||
cmpw(CCR1, ch2, n_start);
|
||||
beq(CCR0, L_Comp1); // Did we find the needle start?
|
||||
beq(CCR1, L_Comp2);
|
||||
addi(addr, addr, 2 * h_csize);
|
||||
bdnz(L_InnerLoop);
|
||||
bind(L_FinalCheck);
|
||||
andi_(addr_diff, addr_diff, h_csize); // Remaining characters not covered by InnerLoop: (num of characters) & 1.
|
||||
beq(CR0, L_NotFound);
|
||||
beq(CCR0, L_NotFound);
|
||||
if (h_csize == 2) { lwz(ch1, 0, addr); } else { lhz(ch1, 0, addr); } // One position left at which we have to compare.
|
||||
cmpw(CR1, ch1, n_start);
|
||||
beq(CR1, L_Comp1);
|
||||
cmpw(CCR1, ch1, n_start);
|
||||
beq(CCR1, L_Comp1);
|
||||
bind(L_NotFound);
|
||||
li(result, -1); // not found
|
||||
b(L_End);
|
||||
@@ -483,8 +483,8 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
if (n_csize == 2) { lhz(n_start, 0, needle); } else { lbz(n_start, 0, needle); } // First character of needle
|
||||
bind(L_OneCharLoop);
|
||||
if (h_csize == 2) { lhzu(ch1, 2, addr); } else { lbzu(ch1, 1, addr); }
|
||||
cmpw(CR1, ch1, n_start);
|
||||
beq(CR1, L_Found); // Did we find the one character needle?
|
||||
cmpw(CCR1, ch1, n_start);
|
||||
beq(CCR1, L_Found); // Did we find the one character needle?
|
||||
bdnz(L_OneCharLoop);
|
||||
li(result, -1); // Not found.
|
||||
b(L_End);
|
||||
@@ -500,7 +500,7 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
bind(L_Comp1); // Addr points to possible needle start.
|
||||
if (needlecntval != 2) { // Const needlecnt==2?
|
||||
if (needlecntval != 3) {
|
||||
if (needlecntval == 0) { beq(CR6, L_Found); } // Variable needlecnt==2?
|
||||
if (needlecntval == 0) { beq(CCR6, L_Found); } // Variable needlecnt==2?
|
||||
Register n_ind = tmp4,
|
||||
h_ind = n_ind;
|
||||
li(n_ind, 2 * n_csize); // First 2 characters are already compared, use index 2.
|
||||
@@ -513,15 +513,15 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
}
|
||||
if (n_csize == 2) { lhzx(ch2, needle, n_ind); } else { lbzx(ch2, needle, n_ind); }
|
||||
if (h_csize == 2) { lhzx(ch1, addr, h_ind); } else { lbzx(ch1, addr, h_ind); }
|
||||
cmpw(CR1, ch1, ch2);
|
||||
bne(CR1, L_OuterLoop);
|
||||
cmpw(CCR1, ch1, ch2);
|
||||
bne(CCR1, L_OuterLoop);
|
||||
addi(n_ind, n_ind, n_csize);
|
||||
bdnz(L_CompLoop);
|
||||
} else { // No loop required if there's only one needle character left.
|
||||
if (n_csize == 2) { lhz(ch2, 2 * 2, needle); } else { lbz(ch2, 2 * 1, needle); }
|
||||
if (h_csize == 2) { lhz(ch1, 2 * 2, addr); } else { lbz(ch1, 2 * 1, addr); }
|
||||
cmpw(CR1, ch1, ch2);
|
||||
bne(CR1, L_OuterLoop);
|
||||
cmpw(CCR1, ch1, ch2);
|
||||
bne(CCR1, L_OuterLoop);
|
||||
}
|
||||
}
|
||||
// Return index ...
|
||||
@@ -545,7 +545,7 @@ void C2_MacroAssembler::string_indexof_char(Register result, Register haystack,
|
||||
//4:
|
||||
srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR).
|
||||
mr(addr, haystack);
|
||||
beq(CR0, L_FinalCheck);
|
||||
beq(CCR0, L_FinalCheck);
|
||||
mtctr(tmp2); // Move to count register.
|
||||
//8:
|
||||
bind(L_InnerLoop); // Main work horse (2x unrolled search loop).
|
||||
@@ -556,19 +556,19 @@ void C2_MacroAssembler::string_indexof_char(Register result, Register haystack,
|
||||
lbz(ch1, 0, addr);
|
||||
lbz(ch2, 1, addr);
|
||||
}
|
||||
(needle != R0) ? cmpw(CR0, ch1, needle) : cmplwi(CR0, ch1, (unsigned int)needleChar);
|
||||
(needle != R0) ? cmpw(CR1, ch2, needle) : cmplwi(CR1, ch2, (unsigned int)needleChar);
|
||||
beq(CR0, L_Found1); // Did we find the needle?
|
||||
beq(CR1, L_Found2);
|
||||
(needle != R0) ? cmpw(CCR0, ch1, needle) : cmplwi(CCR0, ch1, (unsigned int)needleChar);
|
||||
(needle != R0) ? cmpw(CCR1, ch2, needle) : cmplwi(CCR1, ch2, (unsigned int)needleChar);
|
||||
beq(CCR0, L_Found1); // Did we find the needle?
|
||||
beq(CCR1, L_Found2);
|
||||
addi(addr, addr, 2 * h_csize);
|
||||
bdnz(L_InnerLoop);
|
||||
//16:
|
||||
bind(L_FinalCheck);
|
||||
andi_(R0, haycnt, 1);
|
||||
beq(CR0, L_NotFound);
|
||||
beq(CCR0, L_NotFound);
|
||||
if (!is_byte) { lhz(ch1, 0, addr); } else { lbz(ch1, 0, addr); } // One position left at which we have to compare.
|
||||
(needle != R0) ? cmpw(CR1, ch1, needle) : cmplwi(CR1, ch1, (unsigned int)needleChar);
|
||||
beq(CR1, L_Found1);
|
||||
(needle != R0) ? cmpw(CCR1, ch1, needle) : cmplwi(CCR1, ch1, (unsigned int)needleChar);
|
||||
beq(CCR1, L_Found1);
|
||||
//21:
|
||||
bind(L_NotFound);
|
||||
li(result, -1); // Not found.
|
||||
@@ -594,7 +594,7 @@ void C2_MacroAssembler::count_positives(Register src, Register cnt, Register res
|
||||
lis(tmp1, (int)(short)0x8080); // tmp1 = 0x8080808080808080
|
||||
srwi_(tmp2, cnt, 4);
|
||||
mr(result, src); // Use result reg to point to the current position.
|
||||
beq(CR0, Lslow);
|
||||
beq(CCR0, Lslow);
|
||||
ori(tmp1, tmp1, 0x8080);
|
||||
rldimi(tmp1, tmp1, 32, 0);
|
||||
mtctr(tmp2);
|
||||
@@ -607,19 +607,19 @@ void C2_MacroAssembler::count_positives(Register src, Register cnt, Register res
|
||||
orr(tmp0, tmp2, tmp0);
|
||||
|
||||
and_(tmp0, tmp0, tmp1);
|
||||
bne(CR0, Lslow); // Found negative byte.
|
||||
bne(CCR0, Lslow); // Found negative byte.
|
||||
addi(result, result, 16);
|
||||
bdnz(Lfastloop);
|
||||
|
||||
bind(Lslow); // Fallback to slow version.
|
||||
subf(tmp0, src, result); // Bytes known positive.
|
||||
subf_(tmp0, tmp0, cnt); // Remaining Bytes.
|
||||
beq(CR0, Ldone);
|
||||
beq(CCR0, Ldone);
|
||||
mtctr(tmp0);
|
||||
bind(Lloop);
|
||||
lbz(tmp0, 0, result);
|
||||
andi_(tmp0, tmp0, 0x80);
|
||||
bne(CR0, Ldone); // Found negative byte.
|
||||
bne(CCR0, Ldone); // Found negative byte.
|
||||
addi(result, result, 1);
|
||||
bdnz(Lloop);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2020, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -282,8 +282,8 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, tmp, true /* at_return */, false /* in_nmethod */);
|
||||
|
||||
__ lwz(tmp, in_bytes(JavaThread::suspend_flags_offset()), R16_thread);
|
||||
__ cmpwi(CR0, tmp, 0);
|
||||
__ bne(CR0, L_safepoint_poll_slow_path);
|
||||
__ cmpwi(CCR0, tmp, 0);
|
||||
__ bne(CCR0, L_safepoint_poll_slow_path);
|
||||
__ bind(L_after_safepoint_poll);
|
||||
|
||||
// change thread state
|
||||
@@ -293,8 +293,8 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
|
||||
__ block_comment("reguard stack check");
|
||||
__ lwz(tmp, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
|
||||
__ cmpwi(CR0, tmp, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ beq(CR0, L_reguard);
|
||||
__ cmpwi(CCR0, tmp, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ beq(CCR0, L_reguard);
|
||||
__ bind(L_after_reguard);
|
||||
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -51,7 +51,7 @@ static void generate_marking_inactive_test(MacroAssembler* masm) {
|
||||
int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbz(R0, active_offset, R16_thread); // tmp1 := *(mark queue active address)
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
@@ -68,7 +68,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
|
||||
|
||||
// Is marking active?
|
||||
generate_marking_inactive_test(masm);
|
||||
__ beq(CR0, filtered);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
__ save_LR(R0);
|
||||
__ push_frame(frame_size, R0);
|
||||
@@ -118,8 +118,8 @@ static void generate_queue_insertion(MacroAssembler* masm, ByteSize index_offset
|
||||
// Can we store a value in the given thread's buffer?
|
||||
// (The index field is typed as size_t.)
|
||||
__ ld(temp, in_bytes(index_offset), R16_thread); // temp := *(index address)
|
||||
__ cmpdi(CR0, temp, 0); // jump to runtime if index == 0 (full buffer)
|
||||
__ beq(CR0, runtime);
|
||||
__ cmpdi(CCR0, temp, 0); // jump to runtime if index == 0 (full buffer)
|
||||
__ beq(CCR0, runtime);
|
||||
// The buffer is not full, store value into it.
|
||||
__ ld(R0, in_bytes(buffer_offset), R16_thread); // R0 := buffer address
|
||||
__ addi(temp, temp, -wordSize); // temp := next index
|
||||
@@ -154,7 +154,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
Label runtime, filtered;
|
||||
|
||||
generate_marking_inactive_test(masm);
|
||||
__ beq(CR0, filtered);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (!preloaded) {
|
||||
@@ -171,12 +171,12 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
// Is the previous value null?
|
||||
if (preloaded && not_null) {
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ asm_assert_ne("null oop not allowed (G1 pre)"); // Checked by caller.
|
||||
#endif
|
||||
} else {
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ beq(CR0, filtered);
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ beq(CCR0, filtered);
|
||||
}
|
||||
|
||||
if (!preloaded && UseCompressedOops) {
|
||||
@@ -240,14 +240,14 @@ static Address generate_card_young_test(MacroAssembler* masm, const Register sto
|
||||
__ load_const_optimized(tmp1, (address)(ct->card_table()->byte_map_base()), tmp2);
|
||||
__ srdi(tmp2, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base
|
||||
__ lbzx(R0, tmp1, tmp2); // tmp1 := card address
|
||||
__ cmpwi(CR0, R0, (int)G1CardTable::g1_young_card_val());
|
||||
__ cmpwi(CCR0, R0, (int)G1CardTable::g1_young_card_val());
|
||||
return Address(tmp1, tmp2); // return card address
|
||||
}
|
||||
|
||||
static void generate_card_dirty_test(MacroAssembler* masm, Address card_addr) {
|
||||
__ membar(Assembler::StoreLoad); // Must reload after StoreLoad membar due to concurrent refinement
|
||||
__ lbzx(R0, card_addr.base(), card_addr.index()); // tmp2 := card
|
||||
__ cmpwi(CR0, R0, (int)G1CardTable::dirty_card_val()); // tmp2 := card == dirty_card_val?
|
||||
__ cmpwi(CCR0, R0, (int)G1CardTable::dirty_card_val()); // tmp2 := card == dirty_card_val?
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators,
|
||||
@@ -262,24 +262,24 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
|
||||
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
|
||||
generate_region_crossing_test(masm, store_addr, new_val);
|
||||
__ beq(CR0, filtered);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
// Crosses regions, storing null?
|
||||
if (not_null) {
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CR0, new_val, 0);
|
||||
__ cmpdi(CCR0, new_val, 0);
|
||||
__ asm_assert_ne("null oop not allowed (G1 post)"); // Checked by caller.
|
||||
#endif
|
||||
} else {
|
||||
__ cmpdi(CR0, new_val, 0);
|
||||
__ beq(CR0, filtered);
|
||||
__ cmpdi(CCR0, new_val, 0);
|
||||
__ beq(CCR0, filtered);
|
||||
}
|
||||
|
||||
Address card_addr = generate_card_young_test(masm, store_addr, tmp1, tmp2);
|
||||
__ beq(CR0, filtered);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
generate_card_dirty_test(masm, card_addr);
|
||||
__ beq(CR0, filtered);
|
||||
__ beq(CCR0, filtered);
|
||||
|
||||
__ li(R0, (int)G1CardTable::dirty_card_val());
|
||||
__ stbx(R0, card_addr.base(), card_addr.index()); // *(card address) := dirty_card_val
|
||||
@@ -371,14 +371,14 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done, not_weak;
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
|
||||
__ clrrdi(tmp1, value, JNIHandles::tag_size);
|
||||
__ andi_(tmp2, value, JNIHandles::TypeTag::weak_global);
|
||||
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
|
||||
|
||||
__ beq(CR0, not_weak); // Test for jweak tag.
|
||||
__ beq(CCR0, not_weak); // Test for jweak tag.
|
||||
__ verify_oop(value, FILE_AND_LINE);
|
||||
g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,
|
||||
noreg, noreg, value,
|
||||
@@ -409,7 +409,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
stub->initialize_registers(obj, pre_val, R16_thread, tmp1, tmp2);
|
||||
|
||||
generate_marking_inactive_test(masm);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *stub->entry());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
@@ -433,8 +433,8 @@ void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
__ ld(pre_val, 0, obj);
|
||||
}
|
||||
}
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
Register pre_val_decoded = pre_val;
|
||||
if (UseCompressedOops) {
|
||||
@@ -472,25 +472,25 @@ void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
if (null_check_required && CompressedOops::base() != nullptr) {
|
||||
// We prefer doing the null check after the region crossing check.
|
||||
// Only compressed oop modes with base != null require a null check here.
|
||||
__ cmpwi(CR0, new_val, 0);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
__ cmpwi(CCR0, new_val, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
null_check_required = false;
|
||||
}
|
||||
new_val_decoded = __ decode_heap_oop_not_null(tmp2, new_val);
|
||||
}
|
||||
|
||||
generate_region_crossing_test(masm, store_addr, new_val_decoded);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
|
||||
// crosses regions, storing null?
|
||||
if (null_check_required) {
|
||||
__ cmpdi(CR0, new_val_decoded, 0);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
__ cmpdi(CCR0, new_val_decoded, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
}
|
||||
|
||||
Address card_addr = generate_card_young_test(masm, store_addr, tmp1, tmp2);
|
||||
assert(card_addr.base() == tmp1 && card_addr.index() == tmp2, "needed by post barrier stub");
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *stub->entry());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
@@ -504,7 +504,7 @@ void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
__ bind(*stub->entry());
|
||||
|
||||
generate_card_dirty_test(masm, card_addr);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
__ li(R0, (int)G1CardTable::dirty_card_val());
|
||||
__ stbx(R0, card_addr.base(), card_addr.index()); // *(card address) := dirty_card_val
|
||||
@@ -546,8 +546,8 @@ void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrier
|
||||
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
|
||||
}
|
||||
|
||||
__ cmpdi(CR0, pre_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CCR0, pre_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
|
||||
//__ load_const_optimized(R0, c_code);
|
||||
@@ -567,8 +567,8 @@ void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarri
|
||||
Register addr_reg = stub->addr()->as_pointer_register();
|
||||
Register new_val_reg = stub->new_val()->as_register();
|
||||
|
||||
__ cmpdi(CR0, new_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CCR0, new_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
|
||||
//__ load_const_optimized(R0, c_code);
|
||||
@@ -604,7 +604,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
|
||||
// Is marking still active?
|
||||
generate_marking_inactive_test(sasm);
|
||||
__ beq(CR0, marking_not_active);
|
||||
__ beq(CCR0, marking_not_active);
|
||||
|
||||
__ bind(restart);
|
||||
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
|
||||
@@ -612,8 +612,8 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
__ ld(tmp, satb_q_index_byte_offset, R16_thread);
|
||||
|
||||
// index == 0?
|
||||
__ cmpdi(CR0, tmp, 0);
|
||||
__ beq(CR0, refill);
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ beq(CCR0, refill);
|
||||
|
||||
__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
|
||||
__ ld(pre_val, -8, R1_SP); // Load from stack.
|
||||
@@ -666,15 +666,15 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
||||
|
||||
// Return if young card.
|
||||
__ cmpwi(CR0, tmp, G1CardTable::g1_young_card_val());
|
||||
__ beq(CR0, ret);
|
||||
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
|
||||
__ beq(CCR0, ret);
|
||||
|
||||
// Return if sequential consistent value is already dirty.
|
||||
__ membar(Assembler::StoreLoad);
|
||||
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
||||
|
||||
__ cmpwi(CR0, tmp, G1CardTable::dirty_card_val());
|
||||
__ beq(CR0, ret);
|
||||
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
|
||||
__ beq(CCR0, ret);
|
||||
|
||||
// Not dirty.
|
||||
|
||||
@@ -692,8 +692,8 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
|
||||
|
||||
// index == 0?
|
||||
__ cmpdi(CR0, tmp2, 0);
|
||||
__ beq(CR0, refill);
|
||||
__ cmpdi(CCR0, tmp2, 0);
|
||||
__ beq(CCR0, refill);
|
||||
|
||||
__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
|
||||
__ addi(tmp2, tmp2, -oopSize);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2025 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2024 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@@ -164,7 +164,7 @@ instruct g1CompareAndExchangeP(iRegPdst res, indirect mem, iRegPsrc oldval, iReg
|
||||
format %{ "cmpxchgd $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgd(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -194,7 +194,7 @@ instruct g1CompareAndExchangeP_acq(iRegPdst res, indirect mem, iRegPsrc oldval,
|
||||
format %{ "cmpxchgd acq $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgd(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -230,7 +230,7 @@ instruct g1CompareAndExchangeN(iRegNdst res, indirect mem, iRegNsrc oldval, iReg
|
||||
format %{ "cmpxchgw $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgw(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -261,7 +261,7 @@ instruct g1CompareAndExchangeN_acq(iRegNdst res, indirect mem, iRegNsrc oldval,
|
||||
format %{ "cmpxchgw acq $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgw(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -299,7 +299,7 @@ instruct g1CompareAndSwapP(iRegIdst res, indirect mem, iRegPsrc oldval, iRegPsrc
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -332,7 +332,7 @@ instruct g1CompareAndSwapP_acq(iRegIdst res, indirect mem, iRegPsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -371,7 +371,7 @@ instruct g1CompareAndSwapN(iRegIdst res, indirect mem, iRegNsrc oldval, iRegNsrc
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -405,7 +405,7 @@ instruct g1CompareAndSwapN_acq(iRegIdst res, indirect mem, iRegNsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -445,7 +445,7 @@ instruct weakG1CompareAndSwapP(iRegIdst res, indirect mem, iRegPsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -478,7 +478,7 @@ instruct weakG1CompareAndSwapP_acq(iRegIdst res, indirect mem, iRegPsrc oldval,
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -517,7 +517,7 @@ instruct weakG1CompareAndSwapN(iRegIdst res, indirect mem, iRegNsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -551,7 +551,7 @@ instruct weakG1CompareAndSwapN_acq(iRegIdst res, indirect mem, iRegNsrc oldval,
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -89,8 +89,8 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
|
||||
if (UseCompressedOops && in_heap) {
|
||||
if (L_handle_null != nullptr) { // Label provided.
|
||||
__ lwz(dst, ind_or_offs, base);
|
||||
__ cmpwi(CR0, dst, 0);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
__ cmpwi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
__ decode_heap_oop_not_null(dst);
|
||||
} else if (not_null) { // Guaranteed to be not null.
|
||||
Register narrowOop = (tmp1 != noreg && CompressedOops::base_disjoint()) ? tmp1 : dst;
|
||||
@@ -103,8 +103,8 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
|
||||
} else {
|
||||
__ ld(dst, ind_or_offs, base);
|
||||
if (L_handle_null != nullptr) {
|
||||
__ cmpdi(CR0, dst, 0);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
__ cmpdi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -118,11 +118,11 @@ void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done, tagged, weak_tagged, verify;
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
|
||||
__ andi_(tmp1, value, JNIHandles::tag_mask);
|
||||
__ bne(CR0, tagged); // Test for tag.
|
||||
__ bne(CCR0, tagged); // Test for tag.
|
||||
|
||||
__ access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, // no uncoloring
|
||||
value, (intptr_t)0, value, tmp1, tmp2, preservation_level);
|
||||
@@ -131,7 +131,7 @@ void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
|
||||
__ bind(tagged);
|
||||
__ andi_(tmp1, value, JNIHandles::TypeTag::weak_global);
|
||||
__ clrrdi(value, value, JNIHandles::tag_size); // Untag.
|
||||
__ bne(CR0, weak_tagged); // Test for jweak tag.
|
||||
__ bne(CCR0, weak_tagged); // Test for jweak tag.
|
||||
|
||||
__ access_load_at(T_OBJECT, IN_NATIVE,
|
||||
value, (intptr_t)0, value, tmp1, tmp2, preservation_level);
|
||||
@@ -152,14 +152,14 @@ void BarrierSetAssembler::resolve_global_jobject(MacroAssembler* masm, Register
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done;
|
||||
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label valid_global_tag;
|
||||
__ andi_(tmp1, value, JNIHandles::TypeTag::global);
|
||||
__ bne(CR0, valid_global_tag); // Test for global tag.
|
||||
__ bne(CCR0, valid_global_tag); // Test for global tag.
|
||||
__ stop("non global jobject using resolve_global_jobject");
|
||||
__ bind(valid_global_tag);
|
||||
}
|
||||
@@ -200,9 +200,9 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Register t
|
||||
|
||||
// Low order half of 64 bit value is currently used.
|
||||
__ ld(R0, in_bytes(bs_nm->thread_disarmed_guard_value_offset()), R16_thread);
|
||||
__ cmpw(CR0, R0, tmp);
|
||||
__ cmpw(CCR0, R0, tmp);
|
||||
|
||||
__ bnectrl(CR0);
|
||||
__ bnectrl(CCR0);
|
||||
|
||||
// Oops may have been changed. Make those updates observable.
|
||||
// "isync" can serve both, data and instruction patching.
|
||||
@@ -229,8 +229,8 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1,
|
||||
Label bad_call, skip_barrier;
|
||||
|
||||
// Fast path: If no method is given, the call is definitely bad.
|
||||
__ cmpdi(CR0, R19_method, 0);
|
||||
__ beq(CR0, bad_call);
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, bad_call);
|
||||
|
||||
// Load class loader data to determine whether the method's holder is concurrently unloading.
|
||||
__ load_method_holder(tmp1, R19_method);
|
||||
@@ -238,14 +238,14 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1,
|
||||
|
||||
// Fast path: If class loader is strong, the holder cannot be unloaded.
|
||||
__ lwz(tmp2, in_bytes(ClassLoaderData::keep_alive_ref_count_offset()), tmp1_class_loader_data);
|
||||
__ cmpdi(CR0, tmp2, 0);
|
||||
__ bne(CR0, skip_barrier);
|
||||
__ cmpdi(CCR0, tmp2, 0);
|
||||
__ bne(CCR0, skip_barrier);
|
||||
|
||||
// Class loader is weak. Determine whether the holder is still alive.
|
||||
__ ld(tmp2, in_bytes(ClassLoaderData::holder_offset()), tmp1_class_loader_data);
|
||||
__ resolve_weak_handle(tmp2, tmp1, tmp3, MacroAssembler::PreservationLevel::PRESERVATION_FRAME_LR_GP_FP_REGS);
|
||||
__ cmpdi(CR0, tmp2, 0);
|
||||
__ bne(CR0, skip_barrier);
|
||||
__ cmpdi(CCR0, tmp2, 0);
|
||||
__ bne(CCR0, skip_barrier);
|
||||
|
||||
__ bind(bad_call);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -49,7 +49,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
|
||||
Label Lskip_loop, Lstore_loop;
|
||||
|
||||
__ sldi_(count, count, LogBytesPerHeapOop);
|
||||
__ beq(CR0, Lskip_loop); // zero length
|
||||
__ beq(CCR0, Lskip_loop); // zero length
|
||||
__ addi(count, count, -BytesPerHeapOop);
|
||||
__ add(count, addr, count);
|
||||
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -80,8 +80,8 @@ void ModRefBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register v
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done;
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
|
||||
__ clrrdi(tmp1, value, JNIHandles::tag_size);
|
||||
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -102,8 +102,8 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec
|
||||
Label skip_prologue;
|
||||
|
||||
// Fast path: Array is of length zero.
|
||||
__ cmpdi(CR0, count, 0);
|
||||
__ beq(CR0, skip_prologue);
|
||||
__ cmpdi(CCR0, count, 0);
|
||||
__ beq(CCR0, skip_prologue);
|
||||
|
||||
/* ==== Check whether barrier is required (gc state) ==== */
|
||||
__ lbz(R11_tmp, in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
|
||||
@@ -118,7 +118,7 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec
|
||||
: ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
|
||||
|
||||
__ andi_(R11_tmp, R11_tmp, required_states);
|
||||
__ beq(CR0, skip_prologue);
|
||||
__ beq(CCR0, skip_prologue);
|
||||
|
||||
/* ==== Invoke runtime ==== */
|
||||
// Save to-be-preserved registers.
|
||||
@@ -216,7 +216,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
__ lbz(tmp1, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
|
||||
|
||||
__ andi_(tmp1, tmp1, ShenandoahHeap::MARKING);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Determine the reference's previous value ==== */
|
||||
bool preloaded_mode = base == noreg;
|
||||
@@ -235,12 +235,12 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
|
||||
if ((decorators & IS_NOT_NULL) != 0) {
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ asm_assert_ne("null oop is not allowed");
|
||||
#endif // ASSERT
|
||||
} else {
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
}
|
||||
} else {
|
||||
// Load from the reference address to determine the reference's current value (before the store is being performed).
|
||||
@@ -254,8 +254,8 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
__ ld(pre_val, ind_or_offs, base);
|
||||
}
|
||||
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ decode_heap_oop_not_null(pre_val);
|
||||
@@ -271,8 +271,8 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
// If not, jump to the runtime to commit the buffer and to allocate a new one.
|
||||
// (The buffer's index corresponds to the amount of remaining free space.)
|
||||
__ ld(Rindex, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
|
||||
__ cmpdi(CR0, Rindex, 0);
|
||||
__ beq(CR0, runtime); // If index == 0 (buffer is full), goto runtime.
|
||||
__ cmpdi(CCR0, Rindex, 0);
|
||||
__ beq(CCR0, runtime); // If index == 0 (buffer is full), goto runtime.
|
||||
|
||||
// Capacity suffices. Decrement the queue's size by the size of one oop.
|
||||
// (The buffer is filled contrary to the heap's growing direction, i.e., it is filled downwards.)
|
||||
@@ -362,9 +362,9 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
|
||||
"marked value must equal the value obtained when all lock bits are being set");
|
||||
if (VM_Version::has_isel()) {
|
||||
__ xori(tmp1, tmp1, markWord::lock_mask_in_place);
|
||||
__ isel(dst, CR0, Assembler::equal, false, tmp1);
|
||||
__ isel(dst, CCR0, Assembler::equal, false, tmp1);
|
||||
} else {
|
||||
__ bne(CR0, done);
|
||||
__ bne(CCR0, done);
|
||||
__ xori(dst, tmp1, markWord::lock_mask_in_place);
|
||||
}
|
||||
|
||||
@@ -402,7 +402,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
|
||||
if (is_strong) {
|
||||
// For strong references, the heap is considered stable if "has forwarded" is not active.
|
||||
__ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
#ifdef ASSERT
|
||||
// "evacuation" -> (implies) "has forwarded". If we reach this code, "has forwarded" must thus be set.
|
||||
__ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
|
||||
@@ -414,10 +414,10 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
|
||||
// The additional phase conditions are in place to avoid the resurrection of weak references (see JDK-8266440).
|
||||
Label skip_fastpath;
|
||||
__ andi_(tmp1, tmp2, ShenandoahHeap::WEAK_ROOTS);
|
||||
__ bne(CR0, skip_fastpath);
|
||||
__ bne(CCR0, skip_fastpath);
|
||||
|
||||
__ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
#ifdef ASSERT
|
||||
// "evacuation" -> (implies) "has forwarded". If we reach this code, "has forwarded" must thus be set.
|
||||
__ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
|
||||
@@ -453,7 +453,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
|
||||
__ srdi(tmp1, dst, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ lbzx(tmp2, tmp1, tmp2);
|
||||
__ andi_(tmp2, tmp2, 1);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
}
|
||||
|
||||
/* ==== Invoke runtime ==== */
|
||||
@@ -639,8 +639,8 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
Label done;
|
||||
|
||||
// Fast path: Reference is null (JNI tags are zero for null pointers).
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ beq(CR0, done);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
// Resolve jobject using standard implementation.
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
|
||||
@@ -651,7 +651,7 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
jni_env);
|
||||
|
||||
__ andi_(tmp, tmp, ShenandoahHeap::EVACUATION | ShenandoahHeap::HAS_FORWARDED);
|
||||
__ bne(CR0, slowpath);
|
||||
__ bne(CCR0, slowpath);
|
||||
|
||||
__ bind(done);
|
||||
__ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
|
||||
@@ -701,23 +701,23 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
|
||||
// Given that 'expected' must refer to the to-space object of an evacuated object (strong to-space invariant),
|
||||
// no special processing is required.
|
||||
if (UseCompressedOops) {
|
||||
__ cmpxchgw(CR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgw(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag, nullptr, true);
|
||||
} else {
|
||||
__ cmpxchgd(CR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgd(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag, nullptr, true);
|
||||
}
|
||||
|
||||
// Skip the rest of the barrier if the CAS operation succeeds immediately.
|
||||
// If it does not, the value stored at the address is either the from-space pointer of the
|
||||
// referenced object (success criteria s2)) or simply another object.
|
||||
__ beq(CR0, done);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
/* ==== Step 2 (Null check) ==== */
|
||||
// The success criteria s2) cannot be matched with a null pointer
|
||||
// (null pointers cannot be subject to concurrent evacuation). The failure of the CAS operation is thus legitimate.
|
||||
__ cmpdi(CR0, current_value, 0);
|
||||
__ beq(CR0, done);
|
||||
__ cmpdi(CCR0, current_value, 0);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
/* ==== Step 3 (reference pointer refers to from-space version; success criteria s2)) ==== */
|
||||
// To check whether the reference pointer refers to the from-space version, the forward
|
||||
@@ -737,15 +737,15 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
|
||||
// Load zero into register for the potential failure case.
|
||||
__ li(success_flag, 0);
|
||||
}
|
||||
__ cmpd(CR0, current_value, expected);
|
||||
__ bne(CR0, done);
|
||||
__ cmpd(CCR0, current_value, expected);
|
||||
__ bne(CCR0, done);
|
||||
|
||||
// Discard fetched value as it might be a reference to the from-space version of an object.
|
||||
if (UseCompressedOops) {
|
||||
__ cmpxchgw(CR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgw(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag);
|
||||
} else {
|
||||
__ cmpxchgd(CR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgd(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag);
|
||||
}
|
||||
|
||||
@@ -770,7 +770,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
|
||||
// guaranteed to be the case.
|
||||
// In case of a concurrent update, the CAS would be retried again. This is legitimate
|
||||
// in terms of program correctness (even though it is not desired).
|
||||
__ bne(CR0, step_four);
|
||||
__ bne(CCR0, step_four);
|
||||
|
||||
__ bind(done);
|
||||
__ block_comment("} cmpxchg_oop (shenandoahgc)");
|
||||
@@ -789,7 +789,7 @@ void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssemb
|
||||
__ sldi_(count, count, LogBytesPerHeapOop);
|
||||
|
||||
// Zero length? Skip.
|
||||
__ beq(CR0, L_skip_loop);
|
||||
__ beq(CCR0, L_skip_loop);
|
||||
|
||||
__ addi(count, count, -BytesPerHeapOop);
|
||||
__ add(count, addr, count);
|
||||
@@ -835,8 +835,8 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler *ce, Shen
|
||||
}
|
||||
|
||||
// Fast path: Reference is null.
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
// Argument passing via the stack.
|
||||
__ std(pre_val, -8, R1_SP);
|
||||
@@ -866,7 +866,7 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
|
||||
// Ensure that 'res' is 'R3_ARG1' and contains the same value as 'obj' to reduce the number of required
|
||||
// copy instructions.
|
||||
assert(R3_RET == res, "res must be r3");
|
||||
__ cmpd(CR0, res, obj);
|
||||
__ cmpd(CCR0, res, obj);
|
||||
__ asm_assert_eq("result register must contain the reference stored in obj");
|
||||
#endif
|
||||
|
||||
@@ -888,7 +888,7 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
|
||||
__ lbzx(tmp2, tmp1, tmp2);
|
||||
|
||||
__ andi_(tmp2, tmp2, 1);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
}
|
||||
|
||||
address blob_addr = nullptr;
|
||||
@@ -946,13 +946,13 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
__ lbz(R12_tmp2, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
|
||||
|
||||
__ andi_(R12_tmp2, R12_tmp2, ShenandoahHeap::MARKING);
|
||||
__ beq(CR0, skip_barrier);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
|
||||
/* ==== Add previous value directly to thread-local SATB mark queue ==== */
|
||||
// Check queue's capacity. Jump to runtime if no free slot is available.
|
||||
__ ld(R12_tmp2, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R12_tmp2, 0);
|
||||
__ beq(CR0, runtime);
|
||||
__ cmpdi(CCR0, R12_tmp2, 0);
|
||||
__ beq(CCR0, runtime);
|
||||
|
||||
// Capacity suffices. Decrement the queue's size by one slot (size of one oop).
|
||||
__ addi(R12_tmp2, R12_tmp2, -wordSize);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2021, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -169,7 +169,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
// if the pointer is not dirty.
|
||||
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
|
||||
__ and_(tmp1, tmp1, dst);
|
||||
__ beq(CR0, uncolor);
|
||||
__ beq(CCR0, uncolor);
|
||||
|
||||
/* ==== Invoke barrier ==== */
|
||||
{
|
||||
@@ -193,8 +193,8 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
|
||||
// Slow-path has already uncolored
|
||||
if (L_handle_null != nullptr) {
|
||||
__ cmpdi(CR0, dst, 0);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
__ cmpdi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
}
|
||||
__ b(done);
|
||||
|
||||
@@ -203,7 +203,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
__ srdi(dst, dst, ZPointerLoadShift);
|
||||
} else {
|
||||
__ srdi_(dst, dst, ZPointerLoadShift);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
@@ -234,7 +234,7 @@ static void emit_store_fast_path_check(MacroAssembler* masm, Register base, Regi
|
||||
// A not relocatable object could have spurious raw null pointers in its fields after
|
||||
// getting promoted to the old generation.
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ cmplwi(CR0, R0, barrier_Relocation::unpatched);
|
||||
__ cmplwi(CCR0, R0, barrier_Relocation::unpatched);
|
||||
} else {
|
||||
__ ld(R0, ind_or_offs, base);
|
||||
// Stores on relocatable objects never need to deal with raw null pointers in fields.
|
||||
@@ -244,7 +244,7 @@ static void emit_store_fast_path_check(MacroAssembler* masm, Register base, Regi
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreBadMask);
|
||||
__ andi_(R0, R0, barrier_Relocation::unpatched);
|
||||
}
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), medium_path);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), medium_path);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm,
|
||||
@@ -274,7 +274,7 @@ void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm,
|
||||
__ ld(R0, ind_or_offset, ref_base);
|
||||
__ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_bad_mask_offset()), R16_thread);
|
||||
__ and_(R0, R0, rnew_zpointer);
|
||||
__ bne(CR0, medium_path);
|
||||
__ bne(CCR0, medium_path);
|
||||
__ bind(medium_path_continuation);
|
||||
__ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
}
|
||||
@@ -293,7 +293,7 @@ static void store_barrier_buffer_add(MacroAssembler* masm,
|
||||
// Combined pointer bump and check if the buffer is disabled or full
|
||||
__ ld(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1);
|
||||
__ addic_(R0, R0, -(int)sizeof(ZStoreBarrierEntry));
|
||||
__ blt(CR0, slow_path);
|
||||
__ blt(CCR0, slow_path);
|
||||
__ std(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1);
|
||||
|
||||
// Entry is at ZStoreBarrierBuffer (tmp1) + buffer_offset + scaled index (R0)
|
||||
@@ -327,8 +327,8 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm,
|
||||
// Atomic accesses can get to the medium fast path because the value was a
|
||||
// raw null value. If it was not null, then there is no doubt we need to take a slow path.
|
||||
__ ld(tmp, ind_or_offs, ref_base);
|
||||
__ cmpdi(CR0, tmp, 0);
|
||||
__ bne(CR0, slow_path);
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ bne(CCR0, slow_path);
|
||||
|
||||
// If we get this far, we know there is a young raw null value in the field.
|
||||
// Try to self-heal null values for atomic accesses
|
||||
@@ -338,12 +338,12 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm,
|
||||
need_restore = true;
|
||||
}
|
||||
__ ld(R0, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
__ cmpxchgd(CR0, tmp, (intptr_t)0, R0, ref_base,
|
||||
__ cmpxchgd(CCR0, tmp, (intptr_t)0, R0, ref_base,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, need_restore ? nullptr : &slow_path);
|
||||
if (need_restore) {
|
||||
__ sub(ref_base, ref_base, ind_or_offs);
|
||||
__ bne(CR0, slow_path);
|
||||
__ bne(CCR0, slow_path);
|
||||
}
|
||||
} else {
|
||||
// A non-atomic relocatable object won't get to the medium fast path due to a
|
||||
@@ -447,7 +447,7 @@ void ZBarrierSetAssembler::copy_load_at_fast(MacroAssembler* masm,
|
||||
Label& continuation) const {
|
||||
__ ldx(zpointer, addr);
|
||||
__ and_(R0, zpointer, load_bad_mask);
|
||||
__ bne(CR0, slow_path);
|
||||
__ bne(CCR0, slow_path);
|
||||
__ bind(continuation);
|
||||
}
|
||||
void ZBarrierSetAssembler::copy_load_at_slow(MacroAssembler* masm,
|
||||
@@ -480,7 +480,7 @@ void ZBarrierSetAssembler::copy_store_at_fast(MacroAssembler* masm,
|
||||
if (!dest_uninitialized) {
|
||||
__ ldx(R0, addr);
|
||||
__ and_(R0, R0, store_bad_mask);
|
||||
__ bne(CR0, medium_path);
|
||||
__ bne(CCR0, medium_path);
|
||||
__ bind(continuation);
|
||||
}
|
||||
__ rldimi(zpointer, store_good_mask, 0, 64 - ZPointerLoadShift); // Replace color bits.
|
||||
@@ -515,8 +515,8 @@ void ZBarrierSetAssembler::copy_store_at_slow(MacroAssembler* masm,
|
||||
void ZBarrierSetAssembler::generate_disjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized) {
|
||||
const Register zpointer = R2, tmp = R9;
|
||||
Label done, loop, load_bad, load_good, store_bad, store_good;
|
||||
__ cmpdi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, done);
|
||||
__ cmpdi(CCR0, R5_ARG3, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ mtctr(R5_ARG3);
|
||||
|
||||
__ align(32);
|
||||
@@ -539,7 +539,7 @@ void ZBarrierSetAssembler::generate_conjoint_oop_copy(MacroAssembler* masm, bool
|
||||
const Register zpointer = R2, tmp = R9;
|
||||
Label done, loop, load_bad, load_good, store_bad, store_good;
|
||||
__ sldi_(R0, R5_ARG3, 3);
|
||||
__ beq(CR0, done);
|
||||
__ beq(CCR0, done);
|
||||
__ mtctr(R5_ARG3);
|
||||
// Point behind last elements and copy backwards.
|
||||
__ add(R3_ARG1, R3_ARG1, R0);
|
||||
@@ -570,12 +570,12 @@ void ZBarrierSetAssembler::check_oop(MacroAssembler *masm, Register obj, const c
|
||||
Label done, skip_uncolor;
|
||||
// Skip (colored) null.
|
||||
__ srdi_(R0, obj, ZPointerLoadShift);
|
||||
__ beq(CR0, done);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
// Check if ZAddressHeapBase << ZPointerLoadShift is set. If so, we need to uncolor.
|
||||
__ rldicl_(R0, obj, 64 - ZAddressHeapBaseShift - ZPointerLoadShift, 63);
|
||||
__ mr(R0, obj);
|
||||
__ beq(CR0, skip_uncolor);
|
||||
__ beq(CCR0, skip_uncolor);
|
||||
__ srdi(R0, obj, ZPointerLoadShift);
|
||||
__ bind(skip_uncolor);
|
||||
|
||||
@@ -594,7 +594,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
|
||||
// Test for tag
|
||||
__ andi_(tmp, obj, JNIHandles::tag_mask);
|
||||
__ bne(CR0, tagged);
|
||||
__ bne(CCR0, tagged);
|
||||
|
||||
// Resolve local handle
|
||||
__ ld(dst, 0, obj);
|
||||
@@ -605,7 +605,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
// Test for weak tag
|
||||
__ andi_(tmp, obj, JNIHandles::TypeTag::weak_global);
|
||||
__ clrrdi(dst, obj, JNIHandles::tag_size); // Untag.
|
||||
__ bne(CR0, weak_tagged);
|
||||
__ bne(CCR0, weak_tagged);
|
||||
|
||||
// Resolve global handle
|
||||
__ ld(dst, 0, dst);
|
||||
@@ -620,7 +620,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
|
||||
__ bind(check_color);
|
||||
__ and_(tmp, tmp, dst);
|
||||
__ bne(CR0, slowpath);
|
||||
__ bne(CCR0, slowpath);
|
||||
|
||||
// Uncolor
|
||||
__ srdi(dst, dst, ZPointerLoadShift);
|
||||
@@ -666,7 +666,7 @@ void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const {
|
||||
check_color(ce, ref, on_non_strong);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *stub->entry());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
z_uncolor(ce, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2025 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@@ -70,7 +70,7 @@ static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address r
|
||||
check_color(masm, ref, on_non_strong);
|
||||
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
__ bne_far(CR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
|
||||
z_uncolor(masm, ref);
|
||||
__ bind(*stub->continuation());
|
||||
@@ -97,7 +97,7 @@ static void z_compare_and_swap(MacroAssembler* masm, const MachNode* node,
|
||||
Register rold_zpointer = tmp1, rnew_zpointer = tmp2;
|
||||
z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
|
||||
z_color(masm, rold_zpointer, oldval);
|
||||
__ cmpxchgd(CR0, R0, rold_zpointer, rnew_zpointer, mem,
|
||||
__ cmpxchgd(CCR0, R0, rold_zpointer, rnew_zpointer, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
|
||||
false /* we could support weak, but benefit is questionable */);
|
||||
|
||||
@@ -119,7 +119,7 @@ static void z_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
|
||||
Register rold_zpointer = R0, rnew_zpointer = tmp;
|
||||
z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
|
||||
z_color(masm, rold_zpointer, oldval);
|
||||
__ cmpxchgd(CR0, res, rold_zpointer, rnew_zpointer, mem,
|
||||
__ cmpxchgd(CCR0, res, rold_zpointer, rnew_zpointer, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
|
||||
false /* we could support weak, but benefit is questionable */);
|
||||
z_uncolor(masm, res);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -126,10 +126,10 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
|
||||
// means that this code is called *during* popframe handling - we
|
||||
// don't want to reenter.
|
||||
andi_(R0, scratch_reg, JavaThread::popframe_pending_bit);
|
||||
beq(CR0, L);
|
||||
beq(CCR0, L);
|
||||
|
||||
andi_(R0, scratch_reg, JavaThread::popframe_processing_bit);
|
||||
bne(CR0, L);
|
||||
bne(CCR0, L);
|
||||
|
||||
// Call the Interpreter::remove_activation_preserving_args_entry()
|
||||
// func to get the address of the same-named entrypoint in the
|
||||
@@ -150,12 +150,12 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
|
||||
if (JvmtiExport::can_force_early_return()) {
|
||||
Label Lno_early_ret;
|
||||
ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
|
||||
cmpdi(CR0, Rthr_state_addr, 0);
|
||||
beq(CR0, Lno_early_ret);
|
||||
cmpdi(CCR0, Rthr_state_addr, 0);
|
||||
beq(CCR0, Lno_early_ret);
|
||||
|
||||
lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr);
|
||||
cmpwi(CR0, R0, JvmtiThreadState::earlyret_pending);
|
||||
bne(CR0, Lno_early_ret);
|
||||
cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending);
|
||||
bne(CCR0, Lno_early_ret);
|
||||
|
||||
// Jump to Interpreter::_earlyret_entry.
|
||||
lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr);
|
||||
@@ -229,7 +229,7 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register byt
|
||||
ld(R0, in_bytes(JavaThread::polling_word_offset()), R16_thread);
|
||||
// Armed page has poll_bit set, if poll bit is cleared just continue.
|
||||
andi_(R0, R0, SafepointMechanism::poll_bit());
|
||||
beq(CR0, dispatch);
|
||||
beq(CCR0, dispatch);
|
||||
load_dispatch_table(R11_scratch1, sfpt_tbl);
|
||||
align(32, 16);
|
||||
bind(dispatch);
|
||||
@@ -528,8 +528,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
|
||||
Label index_ok;
|
||||
lwa(R0, arrayOopDesc::length_offset_in_bytes(), result);
|
||||
sldi(R0, R0, LogBytesPerHeapOop);
|
||||
cmpd(CR0, index, R0);
|
||||
blt(CR0, index_ok);
|
||||
cmpd(CCR0, index, R0);
|
||||
blt(CCR0, index_ok);
|
||||
stop("resolved reference index out of bounds");
|
||||
bind(index_ok);
|
||||
#endif
|
||||
@@ -592,8 +592,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
|
||||
|
||||
// Array nullcheck
|
||||
if (!ImplicitNullChecks) {
|
||||
cmpdi(CR0, Rarray, 0);
|
||||
beq(CR0, LisNull);
|
||||
cmpdi(CCR0, Rarray, 0);
|
||||
beq(CCR0, LisNull);
|
||||
} else {
|
||||
null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex);
|
||||
}
|
||||
@@ -605,9 +605,9 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
|
||||
|
||||
// Index check
|
||||
lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray);
|
||||
cmplw(CR0, Rindex, Rlength);
|
||||
cmplw(CCR0, Rindex, Rlength);
|
||||
sldi(RsxtIndex, RsxtIndex, index_shift);
|
||||
blt(CR0, LnotOOR);
|
||||
blt(CCR0, LnotOOR);
|
||||
// Index should be in R17_tos, array should be in R4_ARG2.
|
||||
mr_if_needed(R17_tos, Rindex);
|
||||
mr_if_needed(R4_ARG2, Rarray);
|
||||
@@ -687,11 +687,11 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
push(state);
|
||||
|
||||
// Skip if we don't have to unlock.
|
||||
testbitdi(CR0, R0, Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
beq(CR0, Lunlocked);
|
||||
testbitdi(CCR0, R0, Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
beq(CCR0, Lunlocked);
|
||||
|
||||
cmpwi(CR0, Rdo_not_unlock_flag, 0);
|
||||
bne(CR0, Lno_unlock);
|
||||
cmpwi(CCR0, Rdo_not_unlock_flag, 0);
|
||||
bne(CCR0, Lno_unlock);
|
||||
}
|
||||
|
||||
// Unlock
|
||||
@@ -705,8 +705,8 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
-(frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
|
||||
|
||||
ld(R0, BasicObjectLock::obj_offset(), Rmonitor_base);
|
||||
cmpdi(CR0, R0, 0);
|
||||
bne(CR0, Lunlock);
|
||||
cmpdi(CCR0, R0, 0);
|
||||
bne(CCR0, Lunlock);
|
||||
|
||||
// If it's already unlocked, throw exception.
|
||||
if (throw_monitor_exception) {
|
||||
@@ -740,7 +740,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size); // Monitor base
|
||||
|
||||
subf_(Riterations, R26_monitor, Rmonitor_base);
|
||||
ble(CR0, Lno_unlock);
|
||||
ble(CCR0, Lno_unlock);
|
||||
|
||||
addi(Rcurrent_obj_addr, Rmonitor_base,
|
||||
in_bytes(BasicObjectLock::obj_offset()) - frame::interpreter_frame_monitor_size_in_bytes());
|
||||
@@ -759,8 +759,8 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
bind(Lloop);
|
||||
|
||||
// Check if current entry is used.
|
||||
cmpdi(CR0, Rcurrent_obj, 0);
|
||||
bne(CR0, Lexception);
|
||||
cmpdi(CCR0, Rcurrent_obj, 0);
|
||||
bne(CCR0, Lexception);
|
||||
// Preload next iteration's compare value.
|
||||
ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
|
||||
addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
|
||||
@@ -816,29 +816,29 @@ void InterpreterMacroAssembler::narrow(Register result) {
|
||||
Label notBool, notByte, notChar, done;
|
||||
|
||||
// common case first
|
||||
cmpwi(CR0, ret_type, T_INT);
|
||||
beq(CR0, done);
|
||||
cmpwi(CCR0, ret_type, T_INT);
|
||||
beq(CCR0, done);
|
||||
|
||||
cmpwi(CR0, ret_type, T_BOOLEAN);
|
||||
bne(CR0, notBool);
|
||||
cmpwi(CCR0, ret_type, T_BOOLEAN);
|
||||
bne(CCR0, notBool);
|
||||
andi(result, result, 0x1);
|
||||
b(done);
|
||||
|
||||
bind(notBool);
|
||||
cmpwi(CR0, ret_type, T_BYTE);
|
||||
bne(CR0, notByte);
|
||||
cmpwi(CCR0, ret_type, T_BYTE);
|
||||
bne(CCR0, notByte);
|
||||
extsb(result, result);
|
||||
b(done);
|
||||
|
||||
bind(notByte);
|
||||
cmpwi(CR0, ret_type, T_CHAR);
|
||||
bne(CR0, notChar);
|
||||
cmpwi(CCR0, ret_type, T_CHAR);
|
||||
bne(CCR0, notChar);
|
||||
andi(result, result, 0xffff);
|
||||
b(done);
|
||||
|
||||
bind(notChar);
|
||||
// cmpwi(CR0, ret_type, T_SHORT); // all that's left
|
||||
// bne(CR0, done);
|
||||
// cmpwi(CCR0, ret_type, T_SHORT); // all that's left
|
||||
// bne(CCR0, done);
|
||||
extsh(result, result);
|
||||
|
||||
// Nothing to do for T_INT
|
||||
@@ -893,8 +893,8 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// check if already enabled - if so no re-enabling needed
|
||||
assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
|
||||
lwz(R0, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
|
||||
cmpwi(CR0, R0, StackOverflow::stack_guard_enabled);
|
||||
beq_predict_taken(CR0, no_reserved_zone_enabling);
|
||||
cmpwi(CCR0, R0, StackOverflow::stack_guard_enabled);
|
||||
beq_predict_taken(CCR0, no_reserved_zone_enabling);
|
||||
|
||||
// Compare frame pointers. There is no good stack pointer, as with stack
|
||||
// frame compression we can get different SPs when we do calls. A subsequent
|
||||
@@ -902,8 +902,8 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// inner call of the method annotated with ReservedStack.
|
||||
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
|
||||
ld_ptr(R11_scratch1, _abi0(callers_sp), R1_SP); // Load frame pointer.
|
||||
cmpld(CR0, R11_scratch1, R0);
|
||||
blt_predict_taken(CR0, no_reserved_zone_enabling);
|
||||
cmpld(CCR0, R11_scratch1, R0);
|
||||
blt_predict_taken(CCR0, no_reserved_zone_enabling);
|
||||
|
||||
// Enable reserved zone again, throw stack overflow exception.
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
|
||||
@@ -961,8 +961,8 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, object);
|
||||
lbz(tmp, in_bytes(Klass::misc_flags_offset()), tmp);
|
||||
testbitdi(CR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CR0, slow_case);
|
||||
testbitdi(CCR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_case);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
@@ -989,8 +989,8 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// CmpxchgX sets CR0 to cmpX(current, displaced).
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
// CmpxchgX sets CCR0 to cmpX(current, displaced).
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/header, /*exchange_value=*/monitor,
|
||||
/*where=*/object_mark_addr,
|
||||
@@ -1021,7 +1021,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
and_(R0/*==0?*/, current_header, tmp);
|
||||
// If condition is true we are done and hence we can store 0 in the displaced
|
||||
// header indicating it is a recursive lock.
|
||||
bne(CR0, slow_case);
|
||||
bne(CCR0, slow_case);
|
||||
std(R0/*==0!*/, mark_offset, monitor);
|
||||
b(count_locking);
|
||||
}
|
||||
@@ -1087,8 +1087,8 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
||||
// If the displaced header is zero, we have a recursive unlock.
|
||||
cmpdi(CR0, header, 0);
|
||||
beq(CR0, free_slot); // recursive unlock
|
||||
cmpdi(CCR0, header, 0);
|
||||
beq(CCR0, free_slot); // recursive unlock
|
||||
}
|
||||
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
@@ -1108,8 +1108,8 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
||||
// We have the displaced header in displaced_header. If the lock is still
|
||||
// lightweight, it will contain the monitor address and we'll store the
|
||||
// displaced header back into the object's mark word.
|
||||
// CmpxchgX sets CR0 to cmpX(current, monitor).
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
// CmpxchgX sets CCR0 to cmpX(current, monitor).
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/monitor, /*exchange_value=*/header,
|
||||
/*where=*/object_mark_addr,
|
||||
@@ -1170,8 +1170,8 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
Label done;
|
||||
cmpwi(CR0, Rinterp_only, 0);
|
||||
beq(CR0, done);
|
||||
cmpwi(CCR0, Rinterp_only, 0);
|
||||
beq(CCR0, done);
|
||||
ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method);
|
||||
align(32, 12);
|
||||
bind(done);
|
||||
@@ -1180,8 +1180,8 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label Lok;
|
||||
cmpdi(CR0, Rtarget_addr, 0);
|
||||
bne(CR0, Lok);
|
||||
cmpdi(CCR0, Rtarget_addr, 0);
|
||||
bne(CCR0, Lok);
|
||||
stop("null entry point");
|
||||
bind(Lok);
|
||||
}
|
||||
@@ -1211,7 +1211,7 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
|
||||
sldi(Rscratch1, Rscratch1, Interpreter::logStackElementSize);
|
||||
add(Rscratch1, Rscratch1, Rscratch2); // Rscratch2 contains fp
|
||||
// Compare sender_sp with the derelativized top_frame_sp
|
||||
cmpd(CR0, R21_sender_SP, Rscratch1);
|
||||
cmpd(CCR0, R21_sender_SP, Rscratch1);
|
||||
asm_assert_eq("top_frame_sp incorrect");
|
||||
#endif
|
||||
|
||||
@@ -1234,8 +1234,8 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
|
||||
// Test ImethodDataPtr. If it is null, continue at the specified label.
|
||||
void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
cmpdi(CR0, R28_mdx, 0);
|
||||
beq(CR0, zero_continue);
|
||||
cmpdi(CCR0, R28_mdx, 0);
|
||||
beq(CCR0, zero_continue);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
@@ -1250,8 +1250,8 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
|
||||
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
|
||||
add(R11_scratch1, R12_scratch2, R12_scratch2);
|
||||
cmpd(CR0, R11_scratch1, R14_bcp);
|
||||
beq(CR0, verify_continue);
|
||||
cmpd(CCR0, R11_scratch1, R14_bcp);
|
||||
beq(CCR0, verify_continue);
|
||||
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
|
||||
|
||||
@@ -1334,8 +1334,8 @@ void InterpreterMacroAssembler::test_mdp_data_at(int offset,
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
|
||||
ld(test_out, offset, R28_mdx);
|
||||
cmpd(CR0, value, test_out);
|
||||
bne(CR0, not_equal_continue);
|
||||
cmpd(CCR0, value, test_out);
|
||||
bne(CCR0, not_equal_continue);
|
||||
}
|
||||
|
||||
// Update the method data pointer by the displacement located at some fixed
|
||||
@@ -1491,8 +1491,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
|
||||
Label skip_receiver_profile;
|
||||
if (receiver_can_be_null) {
|
||||
Label not_null;
|
||||
cmpdi(CR0, Rreceiver, 0);
|
||||
bne(CR0, not_null);
|
||||
cmpdi(CCR0, Rreceiver, 0);
|
||||
bne(CCR0, not_null);
|
||||
// We are making a call. Increment the count for null receiver.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
|
||||
b(skip_receiver_profile);
|
||||
@@ -1681,8 +1681,8 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
if (start_row == last_row) {
|
||||
// The only thing left to do is handle the null case.
|
||||
// Scratch1 contains test_out from test_mdp_data_at.
|
||||
cmpdi(CR0, scratch1, 0);
|
||||
beq(CR0, found_null);
|
||||
cmpdi(CCR0, scratch1, 0);
|
||||
beq(CCR0, found_null);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
|
||||
@@ -1691,8 +1691,8 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
break;
|
||||
}
|
||||
// Since null is rare, make it be the branch-taken case.
|
||||
cmpdi(CR0, scratch1, 0);
|
||||
beq(CR0, found_null);
|
||||
cmpdi(CCR0, scratch1, 0);
|
||||
beq(CCR0, found_null);
|
||||
|
||||
// Put all the "Case 3" tests here.
|
||||
record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done);
|
||||
@@ -1734,27 +1734,27 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr
|
||||
ld(tmp, mdo_addr_offs, mdo_addr_base);
|
||||
|
||||
// Set null_seen if obj is 0.
|
||||
cmpdi(CR0, obj, 0);
|
||||
cmpdi(CCR0, obj, 0);
|
||||
ori(R0, tmp, TypeEntries::null_seen);
|
||||
beq(CR0, do_update);
|
||||
beq(CCR0, do_update);
|
||||
|
||||
load_klass(klass, obj);
|
||||
|
||||
clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
cmpd(CR1, R0, klass);
|
||||
cmpd(CCR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
//beq(CR1, do_nothing);
|
||||
//beq(CCR1, do_nothing);
|
||||
|
||||
andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
//bne(CR0, do_nothing);
|
||||
crorc(CR0, Assembler::equal, CR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
beq(CR0, do_nothing);
|
||||
//bne(CCR0, do_nothing);
|
||||
crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
beq(CCR0, do_nothing);
|
||||
|
||||
clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
|
||||
beq(CR0, do_update); // First time here. Set profile type.
|
||||
beq(CCR0, do_update); // First time here. Set profile type.
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
ori(R0, tmp, TypeEntries::type_unknown);
|
||||
@@ -1785,8 +1785,8 @@ void InterpreterMacroAssembler::profile_arguments_type(Register callee,
|
||||
in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
||||
|
||||
lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
|
||||
cmpwi(CR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
bne(CR0, profile_continue);
|
||||
cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
bne(CCR0, profile_continue);
|
||||
|
||||
if (MethodData::profile_arguments()) {
|
||||
Label done;
|
||||
@@ -1797,9 +1797,9 @@ void InterpreterMacroAssembler::profile_arguments_type(Register callee,
|
||||
if (i > 0 || MethodData::profile_return()) {
|
||||
// If return value type is profiled we may have no argument to profile.
|
||||
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
|
||||
cmpdi(CR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
|
||||
cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
|
||||
addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
|
||||
blt(CR0, done);
|
||||
blt(CCR0, done);
|
||||
}
|
||||
ld(tmp1, in_bytes(Method::const_offset()), callee);
|
||||
lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
|
||||
@@ -1865,12 +1865,12 @@ void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1,
|
||||
// length.
|
||||
lbz(tmp1, 0, R14_bcp);
|
||||
lbz(tmp2, in_bytes(Method::intrinsic_id_offset()), R19_method);
|
||||
cmpwi(CR0, tmp1, Bytecodes::_invokedynamic);
|
||||
cmpwi(CR1, tmp1, Bytecodes::_invokehandle);
|
||||
cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
cmpwi(CR1, tmp2, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
|
||||
cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
bne(CR0, profile_continue);
|
||||
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
|
||||
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
|
||||
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
cmpwi(CCR1, tmp2, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
|
||||
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
bne(CCR0, profile_continue);
|
||||
}
|
||||
|
||||
profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
|
||||
@@ -1890,8 +1890,8 @@ void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register
|
||||
// Load the offset of the area within the MDO used for
|
||||
// parameters. If it's negative we're not profiling any parameters.
|
||||
lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
|
||||
cmpwi(CR0, tmp1, 0);
|
||||
blt(CR0, profile_continue);
|
||||
cmpwi(CCR0, tmp1, 0);
|
||||
blt(CCR0, profile_continue);
|
||||
|
||||
// Compute a pointer to the area for parameters from the offset
|
||||
// and move the pointer to the slot for the last
|
||||
@@ -1936,9 +1936,9 @@ void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register
|
||||
|
||||
// Go to next parameter.
|
||||
int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
|
||||
cmpdi(CR0, entry_offset, off_base + delta);
|
||||
cmpdi(CCR0, entry_offset, off_base + delta);
|
||||
addi(entry_offset, entry_offset, -delta);
|
||||
bge(CR0, loop);
|
||||
bge(CCR0, loop);
|
||||
|
||||
align(32, 12);
|
||||
bind(profile_continue);
|
||||
@@ -1975,7 +1975,7 @@ void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Regist
|
||||
subf(n_slots, esp, R26_monitor);
|
||||
srdi_(n_slots, n_slots, LogBytesPerWord); // Compute number of slots to copy.
|
||||
assert(LogBytesPerWord == 3, "conflicts assembler instructions");
|
||||
beq(CR0, copy_slot_finished); // Nothing to copy.
|
||||
beq(CCR0, copy_slot_finished); // Nothing to copy.
|
||||
|
||||
mtctr(n_slots);
|
||||
|
||||
@@ -2115,8 +2115,8 @@ void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1,
|
||||
Label Ldone;
|
||||
// Get pending exception oop.
|
||||
ld(Rexception, thread_(pending_exception));
|
||||
cmpdi(CR0, Rexception, 0);
|
||||
beq(CR0, Ldone);
|
||||
cmpdi(CCR0, Rexception, 0);
|
||||
beq(CCR0, Ldone);
|
||||
li(Rtmp, 0);
|
||||
mr_if_needed(R3, Rexception);
|
||||
std(Rtmp, thread_(pending_exception)); // Clear exception in thread
|
||||
@@ -2168,7 +2168,7 @@ void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address
|
||||
Label resume_pc, not_preempted;
|
||||
|
||||
DEBUG_ONLY(ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread));
|
||||
DEBUG_ONLY(cmpdi(CR0, R0, 0));
|
||||
DEBUG_ONLY(cmpdi(CCR0, R0, 0));
|
||||
asm_assert_eq("Should not have alternate return address set");
|
||||
|
||||
// Preserve 2 registers
|
||||
@@ -2186,8 +2186,8 @@ void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address
|
||||
|
||||
// Jump to handler if the call was preempted
|
||||
ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
cmpdi(CR0, R0, 0);
|
||||
beq(CR0, not_preempted);
|
||||
cmpdi(CCR0, R0, 0);
|
||||
beq(CCR0, not_preempted);
|
||||
mtlr(R0);
|
||||
li(R0, 0);
|
||||
std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
@@ -2215,8 +2215,8 @@ void InterpreterMacroAssembler::restore_after_resume(Register fp) {
|
||||
{
|
||||
Label ok;
|
||||
ld(R12_scratch2, 0, R1_SP); // load fp
|
||||
cmpd(CR0, R12_scratch2, R11_scratch1);
|
||||
beq(CR0, ok);
|
||||
cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||
beq(CCR0, ok);
|
||||
stop(FILE_AND_LINE ": FP is expected in R11_scratch1");
|
||||
bind(ok);
|
||||
}
|
||||
@@ -2298,8 +2298,8 @@ void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool
|
||||
{
|
||||
Label Lok;
|
||||
subf(R0, R1_SP, scratch);
|
||||
cmpdi(CR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size);
|
||||
bge(CR0, Lok);
|
||||
cmpdi(CCR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size);
|
||||
bge(CCR0, Lok);
|
||||
stop("frame too small (restore istate)");
|
||||
bind(Lok);
|
||||
}
|
||||
@@ -2312,13 +2312,13 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
BLOCK_COMMENT("Load and ev. allocate counter object {");
|
||||
Label has_counters;
|
||||
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
|
||||
cmpdi(CR0, Rcounters, 0);
|
||||
bne(CR0, has_counters);
|
||||
cmpdi(CCR0, Rcounters, 0);
|
||||
bne(CCR0, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method);
|
||||
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
|
||||
cmpdi(CR0, Rcounters, 0);
|
||||
beq(CR0, skip); // No MethodCounters, OutOfMemory.
|
||||
cmpdi(CCR0, Rcounters, 0);
|
||||
beq(CCR0, skip); // No MethodCounters, OutOfMemory.
|
||||
BLOCK_COMMENT("} Load and ev. allocate counter object");
|
||||
|
||||
bind(has_counters);
|
||||
@@ -2398,7 +2398,7 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
|
||||
|
||||
const int log2_bytecode_size_limit = 16;
|
||||
srdi_(Rtmp, reg, log2_bytecode_size_limit);
|
||||
bne(CR0, test);
|
||||
bne(CCR0, test);
|
||||
|
||||
address fd = CAST_FROM_FN_PTR(address, verify_return_address);
|
||||
const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
|
||||
@@ -2442,8 +2442,8 @@ void InterpreterMacroAssembler::notify_method_entry() {
|
||||
Label jvmti_post_done;
|
||||
|
||||
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
cmpwi(CR0, R0, 0);
|
||||
beq(CR0, jvmti_post_done);
|
||||
cmpwi(CCR0, R0, 0);
|
||||
beq(CCR0, jvmti_post_done);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
|
||||
|
||||
bind(jvmti_post_done);
|
||||
@@ -2476,8 +2476,8 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosSta
|
||||
Label jvmti_post_done;
|
||||
|
||||
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
cmpwi(CR0, R0, 0);
|
||||
beq(CR0, jvmti_post_done);
|
||||
cmpwi(CCR0, R0, 0);
|
||||
beq(CCR0, jvmti_post_done);
|
||||
if (!is_native_method) { push(state); } // Expose tos to GC.
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), check_exceptions);
|
||||
if (!is_native_method) { pop(state); }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -103,9 +103,9 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
Label do_null;
|
||||
if (do_null_check) {
|
||||
__ ld(R0, locals_j_arg_at(offset()));
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ li(r, 0);
|
||||
__ beq(CR0, do_null);
|
||||
__ beq(CCR0, do_null);
|
||||
}
|
||||
__ addir(r, locals_j_arg_at(offset()));
|
||||
__ bind(do_null);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -75,7 +75,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
__ ld(Rcounter, counter_offs, Rcounter_addr);
|
||||
__ andi_(R0, Rcounter, 1);
|
||||
__ bne(CR0, slow);
|
||||
__ bne(CCR0, slow);
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
// Field may be volatile.
|
||||
@@ -91,8 +91,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
int fac_offs = __ load_const_optimized(Rtmp, JvmtiExport::get_field_access_count_addr(),
|
||||
R0, true);
|
||||
__ lwa(Rtmp, fac_offs, Rtmp);
|
||||
__ cmpwi(CR0, Rtmp, 0);
|
||||
__ bne(CR0, slow);
|
||||
__ cmpwi(CCR0, Rtmp, 0);
|
||||
__ bne(CCR0, slow);
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
@@ -118,8 +118,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
// Order preceding load(s) wrt. succeeding check (LoadStore for volatile field).
|
||||
if (is_fp) {
|
||||
Label next;
|
||||
__ fcmpu(CR0, F1_RET, F1_RET);
|
||||
__ bne(CR0, next);
|
||||
__ fcmpu(CCR0, F1_RET, F1_RET);
|
||||
__ bne(CCR0, next);
|
||||
__ bind(next);
|
||||
} else {
|
||||
__ twi_0(Rtmp);
|
||||
@@ -127,8 +127,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
__ isync();
|
||||
|
||||
__ ld(R0, counter_offs, Rcounter_addr);
|
||||
__ cmpd(CR0, R0, Rcounter);
|
||||
__ bne(CR0, slow);
|
||||
__ cmpd(CCR0, R0, Rcounter);
|
||||
__ bne(CCR0, slow);
|
||||
|
||||
if (!is_fp) {
|
||||
__ mr(R3_RET, Rtmp);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -179,8 +179,8 @@ class MacroAssembler: public Assembler {
|
||||
//
|
||||
// branch, jump
|
||||
//
|
||||
// set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1,
|
||||
// if CR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
// set dst to -1, 0, +1 as follows: if CCR0bi is "greater than", dst is set to 1,
|
||||
// if CCR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
void inline set_cmp3(Register dst);
|
||||
// set dst to (treat_unordered_like_less ? -1 : +1)
|
||||
void inline set_cmpu3(Register dst, bool treat_unordered_like_less);
|
||||
@@ -612,20 +612,6 @@ class MacroAssembler: public Assembler {
|
||||
// The temp_reg can be noreg, if no temps are available.
|
||||
// It can also be sub_klass or super_klass, meaning it's OK to kill that one.
|
||||
// Updates the sub's secondary super cache as necessary.
|
||||
void check_klass_subtype_slow_path_linear(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label* L_success = nullptr,
|
||||
Register result_reg = noreg);
|
||||
|
||||
void check_klass_subtype_slow_path_table(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label* L_success = nullptr,
|
||||
Register result_reg = noreg);
|
||||
|
||||
void check_klass_subtype_slow_path(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
@@ -633,25 +619,6 @@ class MacroAssembler: public Assembler {
|
||||
Label* L_success = nullptr,
|
||||
Register result_reg = noreg);
|
||||
|
||||
void lookup_secondary_supers_table_var(Register sub_klass,
|
||||
Register r_super_klass,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3,
|
||||
Register temp4,
|
||||
Register result);
|
||||
|
||||
// If r is valid, return r.
|
||||
// If r is invalid, remove a register r2 from available_regs, add r2
|
||||
// to regs_to_push, then return r2.
|
||||
Register allocate_if_noreg(const Register r,
|
||||
RegSetIterator<Register> &available_regs,
|
||||
RegSet ®s_to_push);
|
||||
|
||||
// Frameless register spills (negative offset from SP)
|
||||
void push_set(RegSet set);
|
||||
void pop_set(RegSet set);
|
||||
|
||||
// Simplified, combined version, good for typical uses.
|
||||
// Falls through on failure.
|
||||
void check_klass_subtype(Register sub_klass,
|
||||
@@ -664,14 +631,14 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// As above, but with a constant super_klass.
|
||||
// The result is in Register result, not the condition codes.
|
||||
void lookup_secondary_supers_table_const(Register r_sub_klass,
|
||||
Register r_super_klass,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3,
|
||||
Register temp4,
|
||||
Register result,
|
||||
u1 super_klass_slot);
|
||||
void lookup_secondary_supers_table(Register r_sub_klass,
|
||||
Register r_super_klass,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3,
|
||||
Register temp4,
|
||||
Register result,
|
||||
u1 super_klass_slot);
|
||||
|
||||
void verify_secondary_supers_table(Register r_sub_klass,
|
||||
Register r_super_klass,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -248,14 +248,14 @@ inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
|
||||
is_endgroup(instruction_2);
|
||||
}
|
||||
|
||||
// set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1,
|
||||
// if CR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
// set dst to -1, 0, +1 as follows: if CCR0bi is "greater than", dst is set to 1,
|
||||
// if CCR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
inline void MacroAssembler::set_cmp3(Register dst) {
|
||||
assert_different_registers(dst, R0);
|
||||
// P10, prefer using setbc instructions
|
||||
if (VM_Version::has_brw()) {
|
||||
setbc(R0, CR0, Assembler::greater); // Set 1 to R0 if CR0bi is "greater than", otherwise 0
|
||||
setnbc(dst, CR0, Assembler::less); // Set -1 to dst if CR0bi is "less than", otherwise 0
|
||||
setbc(R0, CCR0, Assembler::greater); // Set 1 to R0 if CCR0bi is "greater than", otherwise 0
|
||||
setnbc(dst, CCR0, Assembler::less); // Set -1 to dst if CCR0bi is "less than", otherwise 0
|
||||
} else {
|
||||
mfcr(R0); // copy CR register to R0
|
||||
srwi(dst, R0, 30); // copy the first two bits to dst
|
||||
@@ -267,9 +267,9 @@ inline void MacroAssembler::set_cmp3(Register dst) {
|
||||
// set dst to (treat_unordered_like_less ? -1 : +1)
|
||||
inline void MacroAssembler::set_cmpu3(Register dst, bool treat_unordered_like_less) {
|
||||
if (treat_unordered_like_less) {
|
||||
cror(CR0, Assembler::less, CR0, Assembler::summary_overflow); // treat unordered like less
|
||||
cror(CCR0, Assembler::less, CCR0, Assembler::summary_overflow); // treat unordered like less
|
||||
} else {
|
||||
cror(CR0, Assembler::greater, CR0, Assembler::summary_overflow); // treat unordered like greater
|
||||
cror(CCR0, Assembler::greater, CCR0, Assembler::summary_overflow); // treat unordered like greater
|
||||
}
|
||||
set_cmp3(dst);
|
||||
}
|
||||
@@ -280,11 +280,11 @@ inline void MacroAssembler::normalize_bool(Register dst, Register temp, bool is_
|
||||
|
||||
if (VM_Version::has_brw()) {
|
||||
if (is_64bit) {
|
||||
cmpdi(CR0, dst, 0);
|
||||
cmpdi(CCR0, dst, 0);
|
||||
} else {
|
||||
cmpwi(CR0, dst, 0);
|
||||
cmpwi(CCR0, dst, 0);
|
||||
}
|
||||
setbcr(dst, CR0, Assembler::equal);
|
||||
setbcr(dst, CCR0, Assembler::equal);
|
||||
} else {
|
||||
assert_different_registers(temp, dst);
|
||||
neg(temp, dst);
|
||||
@@ -373,8 +373,8 @@ inline void MacroAssembler::null_check_throw(Register a, int offset, Register te
|
||||
trap_null_check(a);
|
||||
} else {
|
||||
Label ok;
|
||||
cmpdi(CR0, a, 0);
|
||||
bne(CR0, ok);
|
||||
cmpdi(CCR0, a, 0);
|
||||
bne(CCR0, ok);
|
||||
load_const_optimized(temp_reg, exception_entry);
|
||||
mtctr(temp_reg);
|
||||
bctr();
|
||||
@@ -390,8 +390,8 @@ inline void MacroAssembler::null_check(Register a, int offset, Label *Lis_null)
|
||||
trap_null_check(a);
|
||||
} else if (Lis_null){
|
||||
Label ok;
|
||||
cmpdi(CR0, a, 0);
|
||||
beq(CR0, *Lis_null);
|
||||
cmpdi(CCR0, a, 0);
|
||||
beq(CCR0, *Lis_null);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -468,14 +468,14 @@ inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register sr
|
||||
inline Register MacroAssembler::encode_heap_oop(Register d, Register src) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
if (VM_Version::has_isel()) {
|
||||
cmpdi(CR0, src, 0);
|
||||
cmpdi(CCR0, src, 0);
|
||||
Register co = encode_heap_oop_not_null(d, src);
|
||||
assert(co == d, "sanity");
|
||||
isel_0(d, CR0, Assembler::equal);
|
||||
isel_0(d, CCR0, Assembler::equal);
|
||||
} else {
|
||||
Label isNull;
|
||||
or_(d, src, src); // move and compare 0
|
||||
beq(CR0, isNull);
|
||||
beq(CCR0, isNull);
|
||||
encode_heap_oop_not_null(d, src);
|
||||
bind(isNull);
|
||||
}
|
||||
@@ -509,16 +509,16 @@ inline void MacroAssembler::decode_heap_oop(Register d) {
|
||||
Label isNull;
|
||||
bool use_isel = false;
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
cmpwi(CR0, d, 0);
|
||||
cmpwi(CCR0, d, 0);
|
||||
if (VM_Version::has_isel()) {
|
||||
use_isel = true;
|
||||
} else {
|
||||
beq(CR0, isNull);
|
||||
beq(CCR0, isNull);
|
||||
}
|
||||
}
|
||||
decode_heap_oop_not_null(d);
|
||||
if (use_isel) {
|
||||
isel_0(d, CR0, Assembler::equal);
|
||||
isel_0(d, CCR0, Assembler::equal);
|
||||
}
|
||||
bind(isNull);
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ void MacroAssembler::sha256_load_h_vec(const VectorRegister a,
|
||||
lvx (a, hptr);
|
||||
addi (tmp, hptr, 16);
|
||||
lvx (e, tmp);
|
||||
beq (CR0, sha256_aligned);
|
||||
beq (CCR0, sha256_aligned);
|
||||
|
||||
// handle unaligned accesses
|
||||
load_perm(vRb, hptr);
|
||||
@@ -121,7 +121,7 @@ void MacroAssembler::sha256_load_w_plus_k_vec(const Register buf_in,
|
||||
VectorRegister vRb = VR6;
|
||||
|
||||
andi_ (tmp, buf_in, 0xF);
|
||||
beq (CR0, w_aligned); // address ends with 0x0, not 0x8
|
||||
beq (CCR0, w_aligned); // address ends with 0x0, not 0x8
|
||||
|
||||
// deal with unaligned addresses
|
||||
lvx (ws[0], buf_in);
|
||||
@@ -318,7 +318,7 @@ void MacroAssembler::sha256_update_sha_state(const VectorRegister a,
|
||||
li (of16, 16);
|
||||
lvx (vt0, hptr);
|
||||
lvx (vt5, of16, hptr);
|
||||
beq (CR0, state_load_aligned);
|
||||
beq (CCR0, state_load_aligned);
|
||||
|
||||
// handle unaligned accesses
|
||||
li (of32, 32);
|
||||
@@ -538,8 +538,8 @@ void MacroAssembler::sha256(bool multi_block) {
|
||||
if (multi_block) {
|
||||
addi(buf_in, buf_in, buf_size);
|
||||
addi(ofs, ofs, buf_size);
|
||||
cmplw(CR0, ofs, limit);
|
||||
ble(CR0, sha_loop);
|
||||
cmplw(CCR0, ofs, limit);
|
||||
ble(CCR0, sha_loop);
|
||||
|
||||
// return ofs
|
||||
mr(R3_RET, ofs);
|
||||
@@ -567,7 +567,7 @@ void MacroAssembler::sha512_load_w_vec(const Register buf_in,
|
||||
Label is_aligned, after_alignment;
|
||||
|
||||
andi_ (tmp, buf_in, 0xF);
|
||||
beq (CR0, is_aligned); // address ends with 0x0, not 0x8
|
||||
beq (CCR0, is_aligned); // address ends with 0x0, not 0x8
|
||||
|
||||
// deal with unaligned addresses
|
||||
lvx (ws[0], buf_in);
|
||||
@@ -623,7 +623,7 @@ void MacroAssembler::sha512_update_sha_state(const Register state,
|
||||
VectorRegister aux = VR9;
|
||||
|
||||
andi_(tmp, state, 0xf);
|
||||
beq(CR0, state_save_aligned);
|
||||
beq(CCR0, state_save_aligned);
|
||||
// deal with unaligned addresses
|
||||
|
||||
{
|
||||
@@ -860,7 +860,7 @@ void MacroAssembler::sha512_load_h_vec(const Register state,
|
||||
Label state_aligned, after_state_aligned;
|
||||
|
||||
andi_(tmp, state, 0xf);
|
||||
beq(CR0, state_aligned);
|
||||
beq(CCR0, state_aligned);
|
||||
|
||||
// deal with unaligned addresses
|
||||
VectorRegister aux = VR9;
|
||||
@@ -1121,8 +1121,8 @@ void MacroAssembler::sha512(bool multi_block) {
|
||||
if (multi_block) {
|
||||
addi(buf_in, buf_in, buf_size);
|
||||
addi(ofs, ofs, buf_size);
|
||||
cmplw(CR0, ofs, limit);
|
||||
ble(CR0, sha_loop);
|
||||
cmplw(CCR0, ofs, limit);
|
||||
ble(CCR0, sha_loop);
|
||||
|
||||
// return ofs
|
||||
mr(R3_RET, ofs);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -83,16 +83,16 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj_reg, FILE_AND_LINE);
|
||||
__ cmpdi(CR0, obj_reg, 0);
|
||||
__ beq(CR0, L_bad);
|
||||
__ cmpdi(CCR0, obj_reg, 0);
|
||||
__ beq(CCR0, L_bad);
|
||||
__ load_klass(temp_reg, obj_reg);
|
||||
__ load_const_optimized(temp2_reg, (address) klass_addr);
|
||||
__ ld(temp2_reg, 0, temp2_reg);
|
||||
__ cmpd(CR0, temp_reg, temp2_reg);
|
||||
__ beq(CR0, L_ok);
|
||||
__ cmpd(CCR0, temp_reg, temp2_reg);
|
||||
__ beq(CCR0, L_ok);
|
||||
__ ld(temp_reg, klass->super_check_offset(), temp_reg);
|
||||
__ cmpd(CR0, temp_reg, temp2_reg);
|
||||
__ beq(CR0, L_ok);
|
||||
__ cmpd(CCR0, temp_reg, temp2_reg);
|
||||
__ beq(CCR0, L_ok);
|
||||
__ BIND(L_bad);
|
||||
__ stop(error_message);
|
||||
__ BIND(L_ok);
|
||||
@@ -107,8 +107,8 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
|
||||
// assert(sizeof(u4) == sizeof(java.lang.invoke.MemberName.flags), "");
|
||||
__ srwi( temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
|
||||
__ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
|
||||
__ cmpwi(CR1, temp, ref_kind);
|
||||
__ beq(CR1, L);
|
||||
__ cmpwi(CCR1, temp, ref_kind);
|
||||
__ beq(CCR1, L);
|
||||
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
|
||||
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
|
||||
if (ref_kind == JVM_REF_invokeVirtual ||
|
||||
@@ -135,11 +135,11 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
__ cmplwi(CR0, temp, 0);
|
||||
__ beq(CR0, run_compiled_code);
|
||||
__ cmplwi(CCR0, temp, 0);
|
||||
__ beq(CCR0, run_compiled_code);
|
||||
// Null method test is replicated below in compiled case.
|
||||
__ cmplwi(CR0, R19_method, 0);
|
||||
__ beq(CR0, L_no_such_method);
|
||||
__ cmplwi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, L_no_such_method);
|
||||
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
|
||||
__ mtctr(target);
|
||||
__ bctr();
|
||||
@@ -147,8 +147,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
}
|
||||
|
||||
// Compiled case, either static or fall-through from runtime conditional
|
||||
__ cmplwi(CR0, R19_method, 0);
|
||||
__ beq(CR0, L_no_such_method);
|
||||
__ cmplwi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, L_no_such_method);
|
||||
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
@@ -200,8 +200,8 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
// assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
|
||||
Label L;
|
||||
__ ld(temp2, __ argument_offset(temp2, temp2, 0), R15_esp);
|
||||
__ cmpd(CR1, temp2, recv);
|
||||
__ beq(CR1, L);
|
||||
__ cmpd(CCR1, temp2, recv);
|
||||
__ beq(CCR1, L);
|
||||
__ stop("receiver not on stack");
|
||||
__ BIND(L);
|
||||
}
|
||||
@@ -248,8 +248,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
||||
BLOCK_COMMENT("verify_intrinsic_id {");
|
||||
__ load_sized_value(R30_tmp1, in_bytes(Method::intrinsic_id_offset()), R19_method,
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
__ cmpwi(CR1, R30_tmp1, (int) iid);
|
||||
__ beq(CR1, L);
|
||||
__ cmpwi(CCR1, R30_tmp1, (int) iid);
|
||||
__ beq(CCR1, L);
|
||||
if (iid == vmIntrinsics::_linkToVirtual ||
|
||||
iid == vmIntrinsics::_linkToSpecial) {
|
||||
// could do this for all kinds, but would explode assembly code size
|
||||
@@ -425,8 +425,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmpdi(CR1, temp2_index, 0);
|
||||
__ bge(CR1, L_index_ok);
|
||||
__ cmpdi(CCR1, temp2_index, 0);
|
||||
__ bge(CCR1, L_index_ok);
|
||||
__ stop("no virtual index");
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
@@ -457,8 +457,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
__ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset()), member_reg);
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmpdi(CR1, vtable_index, 0);
|
||||
__ bge(CR1, L_index_ok);
|
||||
__ cmpdi(CCR1, vtable_index, 0);
|
||||
__ bge(CCR1, L_index_ok);
|
||||
__ stop("invalid vtable index for MH.invokeInterface");
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@@ -236,14 +236,14 @@ register %{
|
||||
// in the CR register.
|
||||
|
||||
// types: v = volatile, nv = non-volatile, s = system
|
||||
reg_def CR0(SOC, SOC, Op_RegFlags, 0, CR0->as_VMReg()); // v
|
||||
reg_def CR1(SOC, SOC, Op_RegFlags, 1, CR1->as_VMReg()); // v
|
||||
reg_def CR2(SOC, SOC, Op_RegFlags, 2, CR2->as_VMReg()); // nv
|
||||
reg_def CR3(SOC, SOC, Op_RegFlags, 3, CR3->as_VMReg()); // nv
|
||||
reg_def CR4(SOC, SOC, Op_RegFlags, 4, CR4->as_VMReg()); // nv
|
||||
reg_def CR5(SOC, SOC, Op_RegFlags, 5, CR5->as_VMReg()); // v
|
||||
reg_def CR6(SOC, SOC, Op_RegFlags, 6, CR6->as_VMReg()); // v
|
||||
reg_def CR7(SOC, SOC, Op_RegFlags, 7, CR7->as_VMReg()); // v
|
||||
reg_def CCR0(SOC, SOC, Op_RegFlags, 0, CCR0->as_VMReg()); // v
|
||||
reg_def CCR1(SOC, SOC, Op_RegFlags, 1, CCR1->as_VMReg()); // v
|
||||
reg_def CCR2(SOC, SOC, Op_RegFlags, 2, CCR2->as_VMReg()); // nv
|
||||
reg_def CCR3(SOC, SOC, Op_RegFlags, 3, CCR3->as_VMReg()); // nv
|
||||
reg_def CCR4(SOC, SOC, Op_RegFlags, 4, CCR4->as_VMReg()); // nv
|
||||
reg_def CCR5(SOC, SOC, Op_RegFlags, 5, CCR5->as_VMReg()); // v
|
||||
reg_def CCR6(SOC, SOC, Op_RegFlags, 6, CCR6->as_VMReg()); // v
|
||||
reg_def CCR7(SOC, SOC, Op_RegFlags, 7, CCR7->as_VMReg()); // v
|
||||
|
||||
// Special registers of PPC64
|
||||
|
||||
@@ -443,14 +443,14 @@ alloc_class chunk1 (
|
||||
alloc_class chunk2 (
|
||||
// Chunk2 contains *all* 8 condition code registers.
|
||||
|
||||
CR0,
|
||||
CR1,
|
||||
CR2,
|
||||
CR3,
|
||||
CR4,
|
||||
CR5,
|
||||
CR6,
|
||||
CR7
|
||||
CCR0,
|
||||
CCR1,
|
||||
CCR2,
|
||||
CCR3,
|
||||
CCR4,
|
||||
CCR5,
|
||||
CCR6,
|
||||
CCR7
|
||||
);
|
||||
|
||||
alloc_class chunk3 (
|
||||
@@ -803,30 +803,30 @@ reg_class bits64_reg_ro(
|
||||
// Special Class for Condition Code Flags Register
|
||||
|
||||
reg_class int_flags(
|
||||
/*CR0*/ // scratch
|
||||
/*CR1*/ // scratch
|
||||
/*CR2*/ // nv!
|
||||
/*CR3*/ // nv!
|
||||
/*CR4*/ // nv!
|
||||
CR5,
|
||||
CR6,
|
||||
CR7
|
||||
/*CCR0*/ // scratch
|
||||
/*CCR1*/ // scratch
|
||||
/*CCR2*/ // nv!
|
||||
/*CCR3*/ // nv!
|
||||
/*CCR4*/ // nv!
|
||||
CCR5,
|
||||
CCR6,
|
||||
CCR7
|
||||
);
|
||||
|
||||
reg_class int_flags_ro(
|
||||
CR0,
|
||||
CR1,
|
||||
CR2,
|
||||
CR3,
|
||||
CR4,
|
||||
CR5,
|
||||
CR6,
|
||||
CR7
|
||||
CCR0,
|
||||
CCR1,
|
||||
CCR2,
|
||||
CCR3,
|
||||
CCR4,
|
||||
CCR5,
|
||||
CCR6,
|
||||
CCR7
|
||||
);
|
||||
|
||||
reg_class int_flags_CR0(CR0);
|
||||
reg_class int_flags_CR1(CR1);
|
||||
reg_class int_flags_CR6(CR6);
|
||||
reg_class int_flags_CR0(CCR0);
|
||||
reg_class int_flags_CR1(CCR1);
|
||||
reg_class int_flags_CR6(CCR6);
|
||||
reg_class ctr_reg(SR_CTR);
|
||||
|
||||
// ----------------------------
|
||||
@@ -5568,8 +5568,8 @@ instruct loadF_ac(regF dst, memory mem, flagsRegCR0 cr0) %{
|
||||
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
||||
Label next;
|
||||
__ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
||||
__ fcmpu(CR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CR0, next);
|
||||
__ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CCR0, next);
|
||||
__ bind(next);
|
||||
__ isync();
|
||||
%}
|
||||
@@ -5604,8 +5604,8 @@ instruct loadD_ac(regD dst, memory mem, flagsRegCR0 cr0) %{
|
||||
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
||||
Label next;
|
||||
__ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
||||
__ fcmpu(CR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CR0, next);
|
||||
__ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CCR0, next);
|
||||
__ bind(next);
|
||||
__ isync();
|
||||
%}
|
||||
@@ -7394,8 +7394,8 @@ instruct compareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7413,8 +7413,8 @@ instruct compareAndSwapB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIs
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7432,8 +7432,8 @@ instruct compareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7451,8 +7451,8 @@ instruct compareAndSwapS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIs
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7469,8 +7469,8 @@ instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7488,8 +7488,8 @@ instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7506,8 +7506,8 @@ instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7525,8 +7525,8 @@ instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7546,8 +7546,8 @@ instruct weakCompareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7560,8 +7560,8 @@ instruct weakCompareAndSwapB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7574,8 +7574,8 @@ instruct weakCompareAndSwapB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7588,8 +7588,8 @@ instruct weakCompareAndSwapB4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7602,8 +7602,8 @@ instruct weakCompareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7616,8 +7616,8 @@ instruct weakCompareAndSwapS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7630,8 +7630,8 @@ instruct weakCompareAndSwapS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7644,8 +7644,8 @@ instruct weakCompareAndSwapS4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7658,8 +7658,8 @@ instruct weakCompareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7672,10 +7672,10 @@ instruct weakCompareAndSwapI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7688,8 +7688,8 @@ instruct weakCompareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7702,10 +7702,10 @@ instruct weakCompareAndSwapN_acq_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7718,9 +7718,9 @@ instruct weakCompareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7733,10 +7733,10 @@ instruct weakCompareAndSwapL_acq_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7749,8 +7749,8 @@ instruct weakCompareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7763,10 +7763,10 @@ instruct weakCompareAndSwapP_acq_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7781,8 +7781,8 @@ instruct compareAndExchangeB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7795,8 +7795,8 @@ instruct compareAndExchangeB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7809,8 +7809,8 @@ instruct compareAndExchangeB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7829,8 +7829,8 @@ instruct compareAndExchangeB4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7849,8 +7849,8 @@ instruct compareAndExchangeS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7863,8 +7863,8 @@ instruct compareAndExchangeS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7877,8 +7877,8 @@ instruct compareAndExchangeS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7897,8 +7897,8 @@ instruct compareAndExchangeS4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7917,8 +7917,8 @@ instruct compareAndExchangeI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7931,8 +7931,8 @@ instruct compareAndExchangeI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7951,8 +7951,8 @@ instruct compareAndExchangeN_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7965,8 +7965,8 @@ instruct compareAndExchangeN_acq_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7985,8 +7985,8 @@ instruct compareAndExchangeL_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as long" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7999,8 +7999,8 @@ instruct compareAndExchangeL_acq_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as long" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -8020,8 +8020,8 @@ instruct compareAndExchangeP_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -8035,8 +8035,8 @@ instruct compareAndExchangeP_acq_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -11389,7 +11389,7 @@ instruct cmpL3_reg_reg(iRegIdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 c
|
||||
format %{ "cmpL3_reg_reg $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ cmpd(CR0, $src1$$Register, $src2$$Register);
|
||||
__ cmpd(CCR0, $src1$$Register, $src2$$Register);
|
||||
__ set_cmp3($dst$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@@ -11661,11 +11661,11 @@ instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{
|
||||
//
|
||||
// block BXX:
|
||||
// 0: instruct cmpFUnordered_reg_reg (cmpF_reg_reg-0):
|
||||
// cmpFUrd CR6, F11, F9
|
||||
// cmpFUrd CCR6, F11, F9
|
||||
// 4: instruct cmov_bns_less (cmpF_reg_reg-1):
|
||||
// cmov CR6
|
||||
// cmov CCR6
|
||||
// 8: instruct branchConSched:
|
||||
// B_FARle CR6, B56 P=0.500000 C=-1.000000
|
||||
// B_FARle CCR6, B56 P=0.500000 C=-1.000000
|
||||
match(Set crx (CmpF src1 src2));
|
||||
ins_cost(DEFAULT_COST+BRANCH_COST);
|
||||
|
||||
@@ -11724,7 +11724,7 @@ instruct cmpF3_reg_reg(iRegIdst dst, regF src1, regF src2, flagsRegCR0 cr0) %{
|
||||
format %{ "cmpF3_reg_reg $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ fcmpu(CR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ fcmpu(CCR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@@ -11808,7 +11808,7 @@ instruct cmpD3_reg_reg(iRegIdst dst, regD src1, regD src2, flagsRegCR0 cr0) %{
|
||||
format %{ "cmpD3_reg_reg $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ fcmpu(CR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ fcmpu(CCR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@@ -12069,7 +12069,6 @@ instruct branchLoopEndFar(cmpOp cmp, flagsRegSrc crx, label labl) %{
|
||||
instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
|
||||
iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
|
||||
match(Set result (PartialSubtypeCheck subklass superklass));
|
||||
predicate(!UseSecondarySupersTable);
|
||||
effect(TEMP_DEF result, TEMP tmp_klass, TEMP tmp_arrayptr);
|
||||
ins_cost(DEFAULT_COST*10);
|
||||
|
||||
@@ -12081,30 +12080,6 @@ instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P supe
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Two versions of partialSubtypeCheck, both used when we need to
|
||||
// search for a super class in the secondary supers array. The first
|
||||
// is used when we don't know _a priori_ the class being searched
|
||||
// for. The second, far more common, is used when we do know: this is
|
||||
// used for instanceof, checkcast, and any case where C2 can determine
|
||||
// it by constant propagation.
|
||||
instruct partialSubtypeCheckVarSuper(iRegPsrc sub, iRegPsrc super, iRegPdst result,
|
||||
iRegPdst tempR1, iRegPdst tempR2, iRegPdst tempR3, iRegPdst tempR4,
|
||||
flagsRegCR0 cr0, regCTR ctr)
|
||||
%{
|
||||
match(Set result (PartialSubtypeCheck sub super));
|
||||
predicate(UseSecondarySupersTable);
|
||||
effect(KILL cr0, KILL ctr, TEMP_DEF result, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP tempR4);
|
||||
|
||||
ins_cost(DEFAULT_COST * 10); // slightly larger than the next version
|
||||
format %{ "partialSubtypeCheck $result, $sub, $super" %}
|
||||
ins_encode %{
|
||||
__ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
|
||||
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
||||
$result$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP super_con, rarg6RegP result,
|
||||
rarg1RegP tempR1, rarg5RegP tempR2, rarg4RegP tempR3, rscratch1RegP tempR4,
|
||||
flagsRegCR0 cr0, regCTR ctr)
|
||||
@@ -12119,9 +12094,9 @@ instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP
|
||||
ins_encode %{
|
||||
u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
|
||||
if (InlineSecondarySupersTest) {
|
||||
__ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
|
||||
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
||||
$result$$Register, super_klass_slot);
|
||||
__ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register,
|
||||
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
||||
$result$$Register, super_klass_slot);
|
||||
} else {
|
||||
address stub = StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot);
|
||||
Register r_stub_addr = $tempR1$$Register;
|
||||
@@ -12770,7 +12745,7 @@ instruct string_inflate(Universe dummy, rarg1RegP src, rarg2RegP dst, iRegIsrc l
|
||||
__ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
|
||||
$tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register);
|
||||
__ rldicl_($tmp1$$Register, $len$$Register, 0, 64-3); // Remaining characters.
|
||||
__ beq(CR0, Ldone);
|
||||
__ beq(CCR0, Ldone);
|
||||
__ string_inflate($src$$Register, $dst$$Register, $tmp1$$Register, $tmp2$$Register);
|
||||
__ bind(Ldone);
|
||||
%}
|
||||
@@ -12854,8 +12829,8 @@ instruct minI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegC
|
||||
ins_cost(DEFAULT_COST*2);
|
||||
|
||||
ins_encode %{
|
||||
__ cmpw(CR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
__ cmpw(CCR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CCR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
@@ -12887,8 +12862,8 @@ instruct maxI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegC
|
||||
ins_cost(DEFAULT_COST*2);
|
||||
|
||||
ins_encode %{
|
||||
__ cmpw(CR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
__ cmpw(CCR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CCR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -179,14 +179,14 @@ inline constexpr ConditionRegister as_ConditionRegister(int encoding) {
|
||||
return ConditionRegister(encoding);
|
||||
}
|
||||
|
||||
constexpr ConditionRegister CR0 = as_ConditionRegister(0);
|
||||
constexpr ConditionRegister CR1 = as_ConditionRegister(1);
|
||||
constexpr ConditionRegister CR2 = as_ConditionRegister(2);
|
||||
constexpr ConditionRegister CR3 = as_ConditionRegister(3);
|
||||
constexpr ConditionRegister CR4 = as_ConditionRegister(4);
|
||||
constexpr ConditionRegister CR5 = as_ConditionRegister(5);
|
||||
constexpr ConditionRegister CR6 = as_ConditionRegister(6);
|
||||
constexpr ConditionRegister CR7 = as_ConditionRegister(7);
|
||||
constexpr ConditionRegister CCR0 = as_ConditionRegister(0);
|
||||
constexpr ConditionRegister CCR1 = as_ConditionRegister(1);
|
||||
constexpr ConditionRegister CCR2 = as_ConditionRegister(2);
|
||||
constexpr ConditionRegister CCR3 = as_ConditionRegister(3);
|
||||
constexpr ConditionRegister CCR4 = as_ConditionRegister(4);
|
||||
constexpr ConditionRegister CCR5 = as_ConditionRegister(5);
|
||||
constexpr ConditionRegister CCR6 = as_ConditionRegister(6);
|
||||
constexpr ConditionRegister CCR7 = as_ConditionRegister(7);
|
||||
|
||||
|
||||
class VectorSRegister;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -101,7 +101,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
__ call_c((address) OptoRuntime::handle_exception_C);
|
||||
address calls_return_pc = __ last_calls_return_pc();
|
||||
# ifdef ASSERT
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ asm_assert_ne("handle_exception_C must not return null");
|
||||
# endif
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -908,9 +908,9 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
|
||||
// Does compiled code exists? If yes, patch the caller's callsite.
|
||||
__ ld(code, method_(code));
|
||||
__ cmpdi(CR0, code, 0);
|
||||
__ cmpdi(CCR0, code, 0);
|
||||
__ ld(ientry, method_(interpreter_entry)); // preloaded
|
||||
__ beq(CR0, call_interpreter);
|
||||
__ beq(CCR0, call_interpreter);
|
||||
|
||||
|
||||
// Patch caller's callsite, method_(code) was not null which means that
|
||||
@@ -1184,9 +1184,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
// Argument is valid and klass is as expected, continue.
|
||||
|
||||
__ ld(code, method_(code));
|
||||
__ cmpdi(CR0, code, 0);
|
||||
__ cmpdi(CCR0, code, 0);
|
||||
__ ld(ientry, method_(interpreter_entry)); // preloaded
|
||||
__ beq_predict_taken(CR0, call_interpreter);
|
||||
__ beq_predict_taken(CCR0, call_interpreter);
|
||||
|
||||
// Branch to ic_miss_stub.
|
||||
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
|
||||
@@ -1203,7 +1203,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
__ beq(CCR0, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
Register klass = R11_scratch1;
|
||||
@@ -1251,8 +1251,8 @@ static void object_move(MacroAssembler* masm,
|
||||
|
||||
__ addi(r_handle, r_caller_sp, reg2offset(src.first()));
|
||||
__ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
|
||||
__ cmpdi(CR0, r_temp_2, 0);
|
||||
__ bne(CR0, skip);
|
||||
__ cmpdi(CCR0, r_temp_2, 0);
|
||||
__ bne(CCR0, skip);
|
||||
// Use a null handle if oop is null.
|
||||
__ li(r_handle, 0);
|
||||
__ bind(skip);
|
||||
@@ -1281,8 +1281,8 @@ static void object_move(MacroAssembler* masm,
|
||||
__ std( r_oop, oop_offset, R1_SP);
|
||||
__ addi(r_handle, R1_SP, oop_offset);
|
||||
|
||||
__ cmpdi(CR0, r_oop, 0);
|
||||
__ bne(CR0, skip);
|
||||
__ cmpdi(CCR0, r_oop, 0);
|
||||
__ bne(CCR0, skip);
|
||||
// Use a null handle if oop is null.
|
||||
__ li(r_handle, 0);
|
||||
__ bind(skip);
|
||||
@@ -1642,7 +1642,7 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
#ifdef ASSERT
|
||||
__ block_comment("clean {");
|
||||
__ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
|
||||
__ cmpd(CR0, R1_SP, tmp1);
|
||||
__ cmpd(CCR0, R1_SP, tmp1);
|
||||
__ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
|
||||
#endif
|
||||
|
||||
@@ -1653,15 +1653,15 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ beq(CCR0, L_skip_vthread_code);
|
||||
|
||||
// If the held monitor count is > 0 and this vthread is terminating then
|
||||
// it failed to release a JNI monitor. So we issue the same log message
|
||||
// that JavaThread::exit does.
|
||||
__ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ beq(CCR0, L_skip_vthread_code);
|
||||
|
||||
// Save return value potentially containing the exception oop
|
||||
Register ex_oop = R15_esp; // nonvolatile register
|
||||
@@ -1683,8 +1683,8 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ beq(CCR0, L_skip_vthread_code);
|
||||
|
||||
// See comment just above. If not checking JNI calls the JNI count is only
|
||||
// needed for assertion checking.
|
||||
@@ -1749,8 +1749,8 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
#ifdef ASSERT
|
||||
Label is_interp_only;
|
||||
__ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ bne(CR0, is_interp_only);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ bne(CCR0, is_interp_only);
|
||||
__ stop("enterSpecial interpreter entry called when not in interp_only_mode");
|
||||
__ bind(is_interp_only);
|
||||
#endif
|
||||
@@ -1770,8 +1770,8 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
|
||||
|
||||
// If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
|
||||
__ cmpwi(CR0, reg_is_cont, 0);
|
||||
__ bne(CR0, L_thaw);
|
||||
__ cmpwi(CCR0, reg_is_cont, 0);
|
||||
__ bne(CCR0, L_thaw);
|
||||
|
||||
// --- call Continuation.enter(Continuation c, boolean isContinue)
|
||||
|
||||
@@ -1818,8 +1818,8 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
|
||||
|
||||
// If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
|
||||
__ cmpwi(CR0, reg_is_cont, 0);
|
||||
__ bne(CR0, L_thaw);
|
||||
__ cmpwi(CCR0, reg_is_cont, 0);
|
||||
__ bne(CCR0, L_thaw);
|
||||
|
||||
// --- call Continuation.enter(Continuation c, boolean isContinue)
|
||||
|
||||
@@ -1869,7 +1869,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
// Pop frame and return
|
||||
DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP));
|
||||
__ addi(R1_SP, R1_SP, framesize_words*wordSize);
|
||||
DEBUG_ONLY(__ cmpd(CR0, R0, R1_SP));
|
||||
DEBUG_ONLY(__ cmpd(CCR0, R0, R1_SP));
|
||||
__ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size");
|
||||
__ ld(R0, _abi0(lr), R1_SP); // Return pc
|
||||
__ mtlr(R0);
|
||||
@@ -1937,8 +1937,8 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
||||
|
||||
Label L_pinned;
|
||||
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ bne(CR0, L_pinned);
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ bne(CCR0, L_pinned);
|
||||
|
||||
// yield succeeded
|
||||
|
||||
@@ -1961,8 +1961,8 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
||||
|
||||
// handle pending exception thrown by freeze
|
||||
__ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
|
||||
__ cmpdi(CR0, tmp, 0);
|
||||
__ beq(CR0, L_return); // return if no exception is pending
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ beq(CCR0, L_return); // return if no exception is pending
|
||||
__ pop_frame();
|
||||
__ ld(R0, _abi0(lr), R1_SP); // Return pc
|
||||
__ mtlr(R0);
|
||||
@@ -2398,12 +2398,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
|
||||
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||
__ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||
} else {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
__ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
__ beq(CR0, locked);
|
||||
__ beq(CCR0, locked);
|
||||
|
||||
// None of the above fast optimizations worked so we have to get into the
|
||||
// slow case of monitor enter. Inline a special case of call_VM that
|
||||
@@ -2538,8 +2538,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Not suspended.
|
||||
// TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
|
||||
__ lwz(suspend_flags, thread_(suspend_flags));
|
||||
__ cmpwi(CR1, suspend_flags, 0);
|
||||
__ beq(CR1, no_block);
|
||||
__ cmpwi(CCR1, suspend_flags, 0);
|
||||
__ beq(CCR1, no_block);
|
||||
|
||||
// Block. Save any potential method result value before the operation and
|
||||
// use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
|
||||
@@ -2572,8 +2572,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||
Label not_preempted;
|
||||
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, not_preempted);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ beq(CCR0, not_preempted);
|
||||
__ mtlr(R0);
|
||||
__ li(R0, 0);
|
||||
__ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
@@ -2591,8 +2591,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Label no_reguard;
|
||||
__ lwz(r_temp_1, thread_(stack_guard_state));
|
||||
__ cmpwi(CR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ bne(CR0, no_reguard);
|
||||
__ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ bne(CCR0, no_reguard);
|
||||
|
||||
save_native_result(masm, ret_type, workspace_slot_offset);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
|
||||
@@ -2622,11 +2622,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Try fastpath for unlocking.
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
} else {
|
||||
__ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
__ beq(CR0, done);
|
||||
__ beq(CCR0, done);
|
||||
|
||||
// Save and restore any potential method result value around the unlocking operation.
|
||||
save_native_result(masm, ret_type, workspace_slot_offset);
|
||||
@@ -2693,8 +2693,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Check for pending exceptions.
|
||||
// --------------------------------------------------------------------------
|
||||
__ ld(r_temp_2, thread_(pending_exception));
|
||||
__ cmpdi(CR0, r_temp_2, 0);
|
||||
__ bne(CR0, handle_pending_exception);
|
||||
__ cmpdi(CCR0, r_temp_2, 0);
|
||||
__ bne(CCR0, handle_pending_exception);
|
||||
|
||||
// Return
|
||||
// --------------------------------------------------------------------------
|
||||
@@ -2851,7 +2851,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
|
||||
|
||||
#ifdef ASSERT
|
||||
// Make sure that there is at least one entry in the array.
|
||||
__ cmpdi(CR0, number_of_frames_reg, 0);
|
||||
__ cmpdi(CCR0, number_of_frames_reg, 0);
|
||||
__ asm_assert_ne("array_size must be > 0");
|
||||
#endif
|
||||
|
||||
@@ -2866,8 +2866,8 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
|
||||
pcs_reg,
|
||||
frame_size_reg,
|
||||
pc_reg);
|
||||
__ cmpdi(CR0, number_of_frames_reg, 0);
|
||||
__ bne(CR0, loop);
|
||||
__ cmpdi(CCR0, number_of_frames_reg, 0);
|
||||
__ bne(CCR0, loop);
|
||||
|
||||
// Get the return address pointing into the frame manager.
|
||||
__ ld(R0, 0, pcs_reg);
|
||||
@@ -3014,8 +3014,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// stored in the thread during exception entry above. The exception
|
||||
// oop will be the return value of this stub.
|
||||
Label skip_restore_excp;
|
||||
__ cmpdi(CR0, exec_mode_reg, Deoptimization::Unpack_exception);
|
||||
__ bne(CR0, skip_restore_excp);
|
||||
__ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
|
||||
__ bne(CCR0, skip_restore_excp);
|
||||
__ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
|
||||
__ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
|
||||
__ li(R0, 0);
|
||||
@@ -3165,7 +3165,7 @@ void OptoRuntime::generate_uncommon_trap_blob() {
|
||||
|
||||
#ifdef ASSERT
|
||||
__ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
|
||||
__ cmpdi(CR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
|
||||
__ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
|
||||
__ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
|
||||
#endif
|
||||
|
||||
@@ -3295,8 +3295,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal
|
||||
BLOCK_COMMENT(" Check pending exception.");
|
||||
const Register pending_exception = R0;
|
||||
__ ld(pending_exception, thread_(pending_exception));
|
||||
__ cmpdi(CR0, pending_exception, 0);
|
||||
__ beq(CR0, noException);
|
||||
__ cmpdi(CCR0, pending_exception, 0);
|
||||
__ beq(CCR0, noException);
|
||||
|
||||
// Exception pending
|
||||
RegisterSaver::restore_live_registers_and_pop_frame(masm,
|
||||
@@ -3315,8 +3315,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal
|
||||
Label no_adjust;
|
||||
// If our stashed return pc was modified by the runtime we avoid touching it
|
||||
__ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP);
|
||||
__ cmpd(CR0, R0, R31);
|
||||
__ bne(CR0, no_adjust);
|
||||
__ cmpd(CCR0, R0, R31);
|
||||
__ bne(CCR0, no_adjust);
|
||||
|
||||
// Adjust return pc forward to step over the safepoint poll instruction
|
||||
__ addi(R31, R31, 4);
|
||||
@@ -3395,8 +3395,8 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti
|
||||
BLOCK_COMMENT("Check for pending exceptions.");
|
||||
Label pending;
|
||||
__ ld(R11_scratch1, thread_(pending_exception));
|
||||
__ cmpdi(CR0, R11_scratch1, 0);
|
||||
__ bne(CR0, pending);
|
||||
__ cmpdi(CCR0, R11_scratch1, 0);
|
||||
__ bne(CCR0, pending);
|
||||
|
||||
__ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
|
||||
|
||||
@@ -3499,8 +3499,8 @@ RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address ru
|
||||
__ ld(R0,
|
||||
in_bytes(Thread::pending_exception_offset()),
|
||||
R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ bne(CR0, L);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ stop("SharedRuntime::throw_exception: no pending exception");
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_STUBDECLARATIONS_HPP
|
||||
#define CPU_PPC_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 24000) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 24000) \
|
||||
|
||||
|
||||
#endif // CPU_PPC_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -32,17 +32,14 @@
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 20000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 24000,
|
||||
_final_stubs_code_size = 24000
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
#define CRC32_TABLE_SIZE (4 * 256)
|
||||
#define REVERSE_CRC32_POLY 0xEDB88320
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2015, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -147,8 +147,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
assert(sizeof(AccessFlags) == 2, "wrong size");
|
||||
__ lhz(R11_scratch1/*access_flags*/, method_(access_flags));
|
||||
// testbit with condition register.
|
||||
__ testbitdi(CR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CR0, L);
|
||||
__ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CCR0, L);
|
||||
// For non-static functions, pass "this" in R4_ARG2 and copy it
|
||||
// to 2nd C-arg slot.
|
||||
// We need to box the Java object here, so we use arg_java
|
||||
@@ -175,8 +175,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// signature points to '(' at entry
|
||||
#ifdef ASSERT
|
||||
__ lbz(sig_byte, 0, signature);
|
||||
__ cmplwi(CR0, sig_byte, '(');
|
||||
__ bne(CR0, do_dontreachhere);
|
||||
__ cmplwi(CCR0, sig_byte, '(');
|
||||
__ bne(CCR0, do_dontreachhere);
|
||||
#endif
|
||||
|
||||
__ bind(loop_start);
|
||||
@@ -184,41 +184,41 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ addi(argcnt, argcnt, 1);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, ')'); // end of signature
|
||||
__ beq(CR0, loop_end);
|
||||
__ cmplwi(CCR0, sig_byte, ')'); // end of signature
|
||||
__ beq(CCR0, loop_end);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'B'); // byte
|
||||
__ beq(CR0, do_int);
|
||||
__ cmplwi(CCR0, sig_byte, 'B'); // byte
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'C'); // char
|
||||
__ beq(CR0, do_int);
|
||||
__ cmplwi(CCR0, sig_byte, 'C'); // char
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'D'); // double
|
||||
__ beq(CR0, do_double);
|
||||
__ cmplwi(CCR0, sig_byte, 'D'); // double
|
||||
__ beq(CCR0, do_double);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'F'); // float
|
||||
__ beq(CR0, do_float);
|
||||
__ cmplwi(CCR0, sig_byte, 'F'); // float
|
||||
__ beq(CCR0, do_float);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'I'); // int
|
||||
__ beq(CR0, do_int);
|
||||
__ cmplwi(CCR0, sig_byte, 'I'); // int
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'J'); // long
|
||||
__ beq(CR0, do_long);
|
||||
__ cmplwi(CCR0, sig_byte, 'J'); // long
|
||||
__ beq(CCR0, do_long);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'S'); // short
|
||||
__ beq(CR0, do_int);
|
||||
__ cmplwi(CCR0, sig_byte, 'S'); // short
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'Z'); // boolean
|
||||
__ beq(CR0, do_int);
|
||||
__ cmplwi(CCR0, sig_byte, 'Z'); // boolean
|
||||
__ beq(CCR0, do_int);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'L'); // object
|
||||
__ beq(CR0, do_object);
|
||||
__ cmplwi(CCR0, sig_byte, 'L'); // object
|
||||
__ beq(CCR0, do_object);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, '['); // array
|
||||
__ beq(CR0, do_array);
|
||||
__ cmplwi(CCR0, sig_byte, '['); // array
|
||||
__ beq(CCR0, do_array);
|
||||
|
||||
// __ cmplwi(CR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
|
||||
// __ beq(CR0, do_void);
|
||||
// __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
|
||||
// __ beq(CCR0, do_void);
|
||||
|
||||
__ bind(do_dontreachhere);
|
||||
|
||||
@@ -231,16 +231,16 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
|
||||
__ bind(start_skip);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
__ cmplwi(CR0, sig_byte, '[');
|
||||
__ beq(CR0, start_skip); // skip further brackets
|
||||
__ cmplwi(CR0, sig_byte, '9');
|
||||
__ bgt(CR0, end_skip); // no optional size
|
||||
__ cmplwi(CR0, sig_byte, '0');
|
||||
__ bge(CR0, start_skip); // skip optional size
|
||||
__ cmplwi(CCR0, sig_byte, '[');
|
||||
__ beq(CCR0, start_skip); // skip further brackets
|
||||
__ cmplwi(CCR0, sig_byte, '9');
|
||||
__ bgt(CCR0, end_skip); // no optional size
|
||||
__ cmplwi(CCR0, sig_byte, '0');
|
||||
__ bge(CCR0, start_skip); // skip optional size
|
||||
__ bind(end_skip);
|
||||
|
||||
__ cmplwi(CR0, sig_byte, 'L');
|
||||
__ beq(CR0, do_object); // for arrays of objects, the name of the object must be skipped
|
||||
__ cmplwi(CCR0, sig_byte, 'L');
|
||||
__ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
|
||||
__ b(do_boxed); // otherwise, go directly to do_boxed
|
||||
}
|
||||
|
||||
@@ -249,8 +249,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
Label L;
|
||||
__ bind(L);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
__ cmplwi(CR0, sig_byte, ';');
|
||||
__ bne(CR0, L);
|
||||
__ cmplwi(CCR0, sig_byte, ';');
|
||||
__ bne(CCR0, L);
|
||||
}
|
||||
// Need to box the Java object here, so we use arg_java (address of
|
||||
// current Java stack slot) as argument and don't dereference it as
|
||||
@@ -258,16 +258,16 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
Label do_null;
|
||||
__ bind(do_boxed);
|
||||
__ ld(R0,0, arg_java);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ li(intSlot,0);
|
||||
__ beq(CR0, do_null);
|
||||
__ beq(CCR0, do_null);
|
||||
__ mr(intSlot, arg_java);
|
||||
__ bind(do_null);
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CR0, move_intSlot_to_ARG);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_int);
|
||||
@@ -275,8 +275,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CR0, move_intSlot_to_ARG);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_long);
|
||||
@@ -284,8 +284,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CR0, move_intSlot_to_ARG);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_float);
|
||||
@@ -293,8 +293,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ stfs(floatSlot, Argument::float_on_stack_offset_in_bytes_c, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CR0, move_floatSlot_to_FARG);
|
||||
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CCR0, move_floatSlot_to_FARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_double);
|
||||
@@ -302,8 +302,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ stfd(floatSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CR0, move_floatSlot_to_FARG);
|
||||
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CCR0, move_floatSlot_to_FARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(loop_end);
|
||||
@@ -510,8 +510,8 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
__ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
|
||||
|
||||
// Check if receiver == nullptr and go the slow path.
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ beq(CR0, slow_path);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, slow_path);
|
||||
|
||||
__ load_heap_oop(R3_RET, referent_offset, R3_RET,
|
||||
/* non-volatile temp */ R31, R11_scratch1,
|
||||
@@ -725,8 +725,8 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
if (ProfileInterpreter) {
|
||||
const Register Rmdo = R3_counters;
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CR0, Rmdo, 0);
|
||||
__ beq(CR0, no_mdo);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, no_mdo);
|
||||
|
||||
// Increment invocation counter in the MDO.
|
||||
const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
@@ -735,7 +735,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mdo_ic_offs, Rmdo);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ bne(CR0, done);
|
||||
__ bne(CCR0, done);
|
||||
__ b(*overflow);
|
||||
}
|
||||
|
||||
@@ -748,7 +748,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_ic_offs, R3_counters);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ beq(CR0, *overflow);
|
||||
__ beq(CCR0, *overflow);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
@@ -789,8 +789,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
|
||||
BLOCK_COMMENT("stack_overflow_check_with_compare {");
|
||||
__ sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
|
||||
__ ld(Rscratch1, thread_(stack_overflow_limit));
|
||||
__ cmpld(CR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
|
||||
__ bgt(CR0/*is_stack_overflow*/, done);
|
||||
__ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
|
||||
__ bgt(CCR0/*is_stack_overflow*/, done);
|
||||
|
||||
// The stack overflows. Load target address of the runtime stub and call it.
|
||||
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "generated in wrong order");
|
||||
@@ -799,13 +799,13 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
|
||||
// Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame).
|
||||
#ifdef ASSERT
|
||||
Label frame_not_shrunk;
|
||||
__ cmpld(CR0, R1_SP, R21_sender_SP);
|
||||
__ ble(CR0, frame_not_shrunk);
|
||||
__ cmpld(CCR0, R1_SP, R21_sender_SP);
|
||||
__ ble(CCR0, frame_not_shrunk);
|
||||
__ stop("frame shrunk");
|
||||
__ bind(frame_not_shrunk);
|
||||
__ ld(Rscratch1, 0, R1_SP);
|
||||
__ ld(R0, 0, R21_sender_SP);
|
||||
__ cmpd(CR0, R0, Rscratch1);
|
||||
__ cmpd(CCR0, R0, Rscratch1);
|
||||
__ asm_assert_eq("backlink");
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP);
|
||||
@@ -829,8 +829,8 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
|
||||
// Check if methods needs synchronization.
|
||||
{
|
||||
Label Lok;
|
||||
__ testbitdi(CR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
__ btrue(CR0,Lok);
|
||||
__ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
__ btrue(CCR0,Lok);
|
||||
__ stop("method doesn't need synchronization");
|
||||
__ bind(Lok);
|
||||
}
|
||||
@@ -842,8 +842,8 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
|
||||
Label Lstatic;
|
||||
Label Ldone;
|
||||
|
||||
__ testbitdi(CR0, R0, Rflags, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CR0, Lstatic);
|
||||
__ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CCR0, Lstatic);
|
||||
|
||||
// Non-static case: load receiver obj from stack and we're done.
|
||||
__ ld(Robj_to_lock, R18_locals);
|
||||
@@ -950,8 +950,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
// environment and one for a possible native mirror.
|
||||
Label skip_native_calculate_max_stack;
|
||||
__ addi(Rtop_frame_size, Rsize_of_parameters, 2);
|
||||
__ cmpwi(CR0, Rtop_frame_size, Argument::n_int_register_parameters_c);
|
||||
__ bge(CR0, skip_native_calculate_max_stack);
|
||||
__ cmpwi(CCR0, Rtop_frame_size, Argument::n_int_register_parameters_c);
|
||||
__ bge(CCR0, skip_native_calculate_max_stack);
|
||||
__ li(Rtop_frame_size, Argument::n_int_register_parameters_c);
|
||||
__ bind(skip_native_calculate_max_stack);
|
||||
__ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
|
||||
@@ -999,8 +999,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
if (ProfileInterpreter) {
|
||||
Label zero_continue;
|
||||
__ ld(R28_mdx, method_(method_data));
|
||||
__ cmpdi(CR0, R28_mdx, 0);
|
||||
__ beq(CR0, zero_continue);
|
||||
__ cmpdi(CCR0, R28_mdx, 0);
|
||||
__ beq(CCR0, zero_continue);
|
||||
__ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
|
||||
__ bind(zero_continue);
|
||||
}
|
||||
@@ -1330,8 +1330,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ ld(signature_handler_fd, method_(signature_handler));
|
||||
Label call_signature_handler;
|
||||
|
||||
__ cmpdi(CR0, signature_handler_fd, 0);
|
||||
__ bne(CR0, call_signature_handler);
|
||||
__ cmpdi(CCR0, signature_handler_fd, 0);
|
||||
__ bne(CCR0, call_signature_handler);
|
||||
|
||||
// Method has never been called. Either generate a specialized
|
||||
// handler or point to the slow one.
|
||||
@@ -1342,8 +1342,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Check for an exception while looking up the target method. If we
|
||||
// incurred one, bail.
|
||||
__ ld(pending_exception, thread_(pending_exception));
|
||||
__ cmpdi(CR0, pending_exception, 0);
|
||||
__ bne(CR0, exception_return_sync_check); // Has pending exception.
|
||||
__ cmpdi(CCR0, pending_exception, 0);
|
||||
__ bne(CCR0, exception_return_sync_check); // Has pending exception.
|
||||
|
||||
// Reload signature handler, it may have been created/assigned in the meanwhile.
|
||||
__ ld(signature_handler_fd, method_(signature_handler));
|
||||
@@ -1398,8 +1398,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Access_flags is non-volatile and still, no need to restore it.
|
||||
|
||||
// Restore access flags.
|
||||
__ testbitdi(CR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
||||
__ bfalse(CR0, method_is_not_static);
|
||||
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
||||
__ bfalse(CCR0, method_is_not_static);
|
||||
|
||||
// Load mirror from interpreter frame (FP in R11_scratch1)
|
||||
__ ld(R21_tmp1, _ijava_state_neg(mirror), R11_scratch1);
|
||||
@@ -1508,8 +1508,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Not suspended.
|
||||
// TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
|
||||
__ lwz(suspend_flags, thread_(suspend_flags));
|
||||
__ cmpwi(CR1, suspend_flags, 0);
|
||||
__ beq(CR1, sync_check_done);
|
||||
__ cmpwi(CCR1, suspend_flags, 0);
|
||||
__ beq(CCR1, sync_check_done);
|
||||
|
||||
__ bind(do_safepoint);
|
||||
__ isync();
|
||||
@@ -1552,8 +1552,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Check preemption for Object.wait()
|
||||
Label not_preempted;
|
||||
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, not_preempted);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ beq(CCR0, not_preempted);
|
||||
__ mtlr(R0);
|
||||
__ li(R0, 0);
|
||||
__ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
@@ -1611,8 +1611,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
Label exception_return_sync_check_already_unlocked;
|
||||
__ ld(R0/*pending_exception*/, thread_(pending_exception));
|
||||
__ cmpdi(CR0, R0/*pending_exception*/, 0);
|
||||
__ bne(CR0, exception_return_sync_check_already_unlocked);
|
||||
__ cmpdi(CCR0, R0/*pending_exception*/, 0);
|
||||
__ bne(CCR0, exception_return_sync_check_already_unlocked);
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// No exception pending.
|
||||
@@ -1706,7 +1706,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
|
||||
__ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
|
||||
__ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
|
||||
__ beq(CR0, Lno_locals);
|
||||
__ beq(CCR0, Lno_locals);
|
||||
__ li(R0, 0);
|
||||
__ mtctr(Rnum);
|
||||
|
||||
@@ -2080,8 +2080,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ ld(return_pc, 0, R1_SP);
|
||||
__ ld(return_pc, _abi0(lr), return_pc);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ bne(CR0, Lcaller_not_deoptimized);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ bne(CCR0, Lcaller_not_deoptimized);
|
||||
|
||||
// The deoptimized case.
|
||||
// In this case, we can't call dispatch_next() after the frame is
|
||||
@@ -2127,16 +2127,16 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
Label L_done;
|
||||
|
||||
__ lbz(R11_scratch1, 0, R14_bcp);
|
||||
__ cmpwi(CR0, R11_scratch1, Bytecodes::_invokestatic);
|
||||
__ bne(CR0, L_done);
|
||||
__ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
|
||||
__ bne(CCR0, L_done);
|
||||
|
||||
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
|
||||
__ ld(R4_ARG2, 0, R18_locals);
|
||||
__ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp);
|
||||
|
||||
__ cmpdi(CR0, R4_ARG2, 0);
|
||||
__ beq(CR0, L_done);
|
||||
__ cmpdi(CCR0, R4_ARG2, 0);
|
||||
__ beq(CCR0, L_done);
|
||||
__ std(R4_ARG2, wordSize, R15_esp);
|
||||
__ bind(L_done);
|
||||
#endif // INCLUDE_JVMTI
|
||||
@@ -2321,8 +2321,8 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
|
||||
int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
|
||||
__ ld(R11_scratch1, offs1, R11_scratch1);
|
||||
__ lwa(R12_scratch2, offs2, R12_scratch2);
|
||||
__ cmpd(CR0, R12_scratch2, R11_scratch1);
|
||||
__ blt(CR0, Lskip_vm_call);
|
||||
__ cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||
__ blt(CCR0, Lskip_vm_call);
|
||||
}
|
||||
|
||||
__ push(state);
|
||||
@@ -2396,8 +2396,8 @@ void TemplateInterpreterGenerator::stop_interpreter_at() {
|
||||
int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
|
||||
__ ld(R11_scratch1, offs1, R11_scratch1);
|
||||
__ lwa(R12_scratch2, offs2, R12_scratch2);
|
||||
__ cmpd(CR0, R12_scratch2, R11_scratch1);
|
||||
__ bne(CR0, L);
|
||||
__ cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||
__ bne(CCR0, L);
|
||||
__ illtrap();
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2013, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -123,9 +123,9 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
|
||||
int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset())
|
||||
: in_bytes(ResolvedFieldEntry::put_code_offset());
|
||||
__ lbz(Rnew_bc, code_offset, Rtemp);
|
||||
__ cmpwi(CR0, Rnew_bc, 0);
|
||||
__ cmpwi(CCR0, Rnew_bc, 0);
|
||||
__ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
|
||||
__ beq(CR0, L_patch_done);
|
||||
__ beq(CCR0, L_patch_done);
|
||||
// __ isync(); // acquire not needed
|
||||
break;
|
||||
}
|
||||
@@ -140,8 +140,8 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
|
||||
if (JvmtiExport::can_post_breakpoint()) {
|
||||
Label L_fast_patch;
|
||||
__ lbz(Rtemp, 0, R14_bcp);
|
||||
__ cmpwi(CR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
|
||||
__ bne(CR0, L_fast_patch);
|
||||
__ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
|
||||
__ bne(CCR0, L_fast_patch);
|
||||
// Perform the quickening, slowly, in the bowels of the breakpoint table.
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
|
||||
__ b(L_patch_done);
|
||||
@@ -261,14 +261,14 @@ void TemplateTable::ldc(LdcType type) {
|
||||
__ addi(Rscratch2, Rscratch2, tags_offset);
|
||||
__ lbzx(Rscratch2, Rscratch2, Rscratch1);
|
||||
|
||||
__ cmpwi(CR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
|
||||
__ cmpwi(CR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
|
||||
__ cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
__ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
|
||||
__ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
|
||||
__ cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
|
||||
// Resolved class - need to call vm to get java mirror of the class.
|
||||
__ cmpwi(CR1, Rscratch2, JVM_CONSTANT_Class);
|
||||
__ crnor(CR0, Assembler::equal, CR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
|
||||
__ beq(CR0, notClass);
|
||||
__ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
|
||||
__ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
|
||||
__ beq(CCR0, notClass);
|
||||
|
||||
__ li(R4, is_ldc_wide(type) ? 1 : 0);
|
||||
call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
|
||||
@@ -279,16 +279,16 @@ void TemplateTable::ldc(LdcType type) {
|
||||
__ bind(notClass);
|
||||
__ addi(Rcpool, Rcpool, base_offset);
|
||||
__ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
|
||||
__ cmpdi(CR0, Rscratch2, JVM_CONSTANT_Integer);
|
||||
__ bne(CR0, notInt);
|
||||
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
|
||||
__ bne(CCR0, notInt);
|
||||
__ lwax(R17_tos, Rcpool, Rscratch1);
|
||||
__ push(itos);
|
||||
__ b(exit);
|
||||
|
||||
__ align(32, 12);
|
||||
__ bind(notInt);
|
||||
__ cmpdi(CR0, Rscratch2, JVM_CONSTANT_Float);
|
||||
__ bne(CR0, notFloat);
|
||||
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
|
||||
__ bne(CCR0, notFloat);
|
||||
__ lfsx(F15_ftos, Rcpool, Rscratch1);
|
||||
__ push(ftos);
|
||||
__ b(exit);
|
||||
@@ -318,12 +318,12 @@ void TemplateTable::fast_aldc(LdcType type) {
|
||||
int simm16_rest = __ load_const_optimized(R11_scratch1, Universe::the_null_sentinel_addr(), R0, true);
|
||||
__ ld(R31, simm16_rest, R11_scratch1);
|
||||
__ resolve_oop_handle(R31, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_NONE);
|
||||
__ cmpld(CR0, R17_tos, R31);
|
||||
__ cmpld(CCR0, R17_tos, R31);
|
||||
if (VM_Version::has_isel()) {
|
||||
__ isel_0(R17_tos, CR0, Assembler::equal);
|
||||
__ isel_0(R17_tos, CCR0, Assembler::equal);
|
||||
} else {
|
||||
Label not_sentinel;
|
||||
__ bne(CR0, not_sentinel);
|
||||
__ bne(CCR0, not_sentinel);
|
||||
__ li(R17_tos, 0);
|
||||
__ bind(not_sentinel);
|
||||
}
|
||||
@@ -359,15 +359,15 @@ void TemplateTable::ldc2_w() {
|
||||
__ lbzx(Rtag, Rtag, Rindex);
|
||||
__ sldi(Rindex, Rindex, LogBytesPerWord);
|
||||
|
||||
__ cmpdi(CR0, Rtag, JVM_CONSTANT_Double);
|
||||
__ bne(CR0, not_double);
|
||||
__ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
|
||||
__ bne(CCR0, not_double);
|
||||
__ lfdx(F15_ftos, Rcpool, Rindex);
|
||||
__ push(dtos);
|
||||
__ b(exit);
|
||||
|
||||
__ bind(not_double);
|
||||
__ cmpdi(CR0, Rtag, JVM_CONSTANT_Long);
|
||||
__ bne(CR0, not_long);
|
||||
__ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long);
|
||||
__ bne(CCR0, not_long);
|
||||
__ ldx(R17_tos, Rcpool, Rindex);
|
||||
__ push(ltos);
|
||||
__ b(exit);
|
||||
@@ -401,32 +401,32 @@ void TemplateTable::condy_helper(Label& Done) {
|
||||
{
|
||||
// tos in (itos, ftos, stos, btos, ctos, ztos)
|
||||
Label notInt, notFloat, notShort, notByte, notChar, notBool;
|
||||
__ cmplwi(CR0, flags, itos);
|
||||
__ bne(CR0, notInt);
|
||||
__ cmplwi(CCR0, flags, itos);
|
||||
__ bne(CCR0, notInt);
|
||||
// itos
|
||||
__ lwax(R17_tos, obj, off);
|
||||
__ push(itos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notInt);
|
||||
__ cmplwi(CR0, flags, ftos);
|
||||
__ bne(CR0, notFloat);
|
||||
__ cmplwi(CCR0, flags, ftos);
|
||||
__ bne(CCR0, notFloat);
|
||||
// ftos
|
||||
__ lfsx(F15_ftos, obj, off);
|
||||
__ push(ftos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notFloat);
|
||||
__ cmplwi(CR0, flags, stos);
|
||||
__ bne(CR0, notShort);
|
||||
__ cmplwi(CCR0, flags, stos);
|
||||
__ bne(CCR0, notShort);
|
||||
// stos
|
||||
__ lhax(R17_tos, obj, off);
|
||||
__ push(stos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmplwi(CR0, flags, btos);
|
||||
__ bne(CR0, notByte);
|
||||
__ cmplwi(CCR0, flags, btos);
|
||||
__ bne(CCR0, notByte);
|
||||
// btos
|
||||
__ lbzx(R17_tos, obj, off);
|
||||
__ extsb(R17_tos, R17_tos);
|
||||
@@ -434,16 +434,16 @@ void TemplateTable::condy_helper(Label& Done) {
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmplwi(CR0, flags, ctos);
|
||||
__ bne(CR0, notChar);
|
||||
__ cmplwi(CCR0, flags, ctos);
|
||||
__ bne(CCR0, notChar);
|
||||
// ctos
|
||||
__ lhzx(R17_tos, obj, off);
|
||||
__ push(ctos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notChar);
|
||||
__ cmplwi(CR0, flags, ztos);
|
||||
__ bne(CR0, notBool);
|
||||
__ cmplwi(CCR0, flags, ztos);
|
||||
__ bne(CCR0, notBool);
|
||||
// ztos
|
||||
__ lbzx(R17_tos, obj, off);
|
||||
__ push(ztos);
|
||||
@@ -456,16 +456,16 @@ void TemplateTable::condy_helper(Label& Done) {
|
||||
case Bytecodes::_ldc2_w:
|
||||
{
|
||||
Label notLong, notDouble;
|
||||
__ cmplwi(CR0, flags, ltos);
|
||||
__ bne(CR0, notLong);
|
||||
__ cmplwi(CCR0, flags, ltos);
|
||||
__ bne(CCR0, notLong);
|
||||
// ltos
|
||||
__ ldx(R17_tos, obj, off);
|
||||
__ push(ltos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notLong);
|
||||
__ cmplwi(CR0, flags, dtos);
|
||||
__ bne(CR0, notDouble);
|
||||
__ cmplwi(CCR0, flags, dtos);
|
||||
__ bne(CCR0, notDouble);
|
||||
// dtos
|
||||
__ lfdx(F15_ftos, obj, off);
|
||||
__ push(dtos);
|
||||
@@ -517,16 +517,16 @@ void TemplateTable::iload_internal(RewriteControl rc) {
|
||||
// last two iloads in a pair. Comparing against fast_iload means that
|
||||
// the next bytecode is neither an iload or a caload, and therefore
|
||||
// an iload pair.
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
|
||||
__ beq(CR0, Ldone);
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
|
||||
__ beq(CCR0, Ldone);
|
||||
|
||||
__ cmpwi(CR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
|
||||
__ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
|
||||
__ beq(CR1, Lrewrite);
|
||||
__ beq(CCR1, Lrewrite);
|
||||
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
|
||||
__ beq(CR0, Lrewrite);
|
||||
__ beq(CCR0, Lrewrite);
|
||||
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
|
||||
|
||||
@@ -812,20 +812,20 @@ void TemplateTable::aload_0_internal(RewriteControl rc) {
|
||||
__ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
|
||||
|
||||
// If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
|
||||
__ beq(CR0, Ldont_rewrite);
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
|
||||
__ beq(CCR0, Ldont_rewrite);
|
||||
|
||||
__ cmpwi(CR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
|
||||
__ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
|
||||
__ beq(CR1, Lrewrite);
|
||||
__ beq(CCR1, Lrewrite);
|
||||
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
|
||||
__ beq(CR0, Lrewrite);
|
||||
__ beq(CCR0, Lrewrite);
|
||||
|
||||
__ cmpwi(CR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
|
||||
__ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
|
||||
__ beq(CR1, Lrewrite);
|
||||
__ beq(CCR1, Lrewrite);
|
||||
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
|
||||
|
||||
@@ -997,8 +997,8 @@ void TemplateTable::aastore() {
|
||||
Register Rscratch3 = Rindex;
|
||||
|
||||
// Do array store check - check for null value first.
|
||||
__ cmpdi(CR0, R17_tos, 0);
|
||||
__ beq(CR0, Lis_null);
|
||||
__ cmpdi(CCR0, R17_tos, 0);
|
||||
__ beq(CCR0, Lis_null);
|
||||
|
||||
__ load_klass(Rarray_klass, Rarray);
|
||||
__ load_klass(Rvalue_klass, R17_tos);
|
||||
@@ -1045,9 +1045,9 @@ void TemplateTable::bastore() {
|
||||
__ load_klass(Rscratch, Rarray);
|
||||
__ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch);
|
||||
int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit());
|
||||
__ testbitdi(CR0, R0, Rscratch, diffbit);
|
||||
__ testbitdi(CCR0, R0, Rscratch, diffbit);
|
||||
Label L_skip;
|
||||
__ bfalse(CR0, L_skip);
|
||||
__ bfalse(CCR0, L_skip);
|
||||
__ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
|
||||
__ bind(L_skip);
|
||||
|
||||
@@ -1262,11 +1262,11 @@ void TemplateTable::idiv() {
|
||||
Register Rdividend = R11_scratch1; // Used by irem.
|
||||
|
||||
__ addi(R0, R17_tos, 1);
|
||||
__ cmplwi(CR0, R0, 2);
|
||||
__ bgt(CR0, Lnormal); // divisor <-1 or >1
|
||||
__ cmplwi(CCR0, R0, 2);
|
||||
__ bgt(CCR0, Lnormal); // divisor <-1 or >1
|
||||
|
||||
__ cmpwi(CR1, R17_tos, 0);
|
||||
__ beq(CR1, Lexception); // divisor == 0
|
||||
__ cmpwi(CCR1, R17_tos, 0);
|
||||
__ beq(CCR1, Lexception); // divisor == 0
|
||||
|
||||
__ pop_i(Rdividend);
|
||||
__ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
|
||||
@@ -1307,11 +1307,11 @@ void TemplateTable::ldiv() {
|
||||
Register Rdividend = R11_scratch1; // Used by lrem.
|
||||
|
||||
__ addi(R0, R17_tos, 1);
|
||||
__ cmpldi(CR0, R0, 2);
|
||||
__ bgt(CR0, Lnormal); // divisor <-1 or >1
|
||||
__ cmpldi(CCR0, R0, 2);
|
||||
__ bgt(CCR0, Lnormal); // divisor <-1 or >1
|
||||
|
||||
__ cmpdi(CR1, R17_tos, 0);
|
||||
__ beq(CR1, Lexception); // divisor == 0
|
||||
__ cmpdi(CCR1, R17_tos, 0);
|
||||
__ beq(CCR1, Lexception); // divisor == 0
|
||||
|
||||
__ pop_l(Rdividend);
|
||||
__ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
|
||||
@@ -1565,18 +1565,18 @@ void TemplateTable::convert() {
|
||||
|
||||
case Bytecodes::_d2i:
|
||||
case Bytecodes::_f2i:
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos);
|
||||
__ li(R17_tos, 0); // 0 in case of NAN
|
||||
__ bso(CR0, done);
|
||||
__ bso(CCR0, done);
|
||||
__ fctiwz(F15_ftos, F15_ftos);
|
||||
__ move_d_to_l();
|
||||
break;
|
||||
|
||||
case Bytecodes::_d2l:
|
||||
case Bytecodes::_f2l:
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos);
|
||||
__ li(R17_tos, 0); // 0 in case of NAN
|
||||
__ bso(CR0, done);
|
||||
__ bso(CCR0, done);
|
||||
__ fctidz(F15_ftos, F15_ftos);
|
||||
__ move_d_to_l();
|
||||
break;
|
||||
@@ -1593,7 +1593,7 @@ void TemplateTable::lcmp() {
|
||||
const Register Rscratch = R11_scratch1;
|
||||
__ pop_l(Rscratch); // first operand, deeper in stack
|
||||
|
||||
__ cmpd(CR0, Rscratch, R17_tos); // compare
|
||||
__ cmpd(CCR0, Rscratch, R17_tos); // compare
|
||||
__ set_cmp3(R17_tos); // set result as follows: <: -1, =: 0, >: 1
|
||||
}
|
||||
|
||||
@@ -1611,7 +1611,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) {
|
||||
__ pop_d(Rfirst);
|
||||
}
|
||||
|
||||
__ fcmpu(CR0, Rfirst, Rsecond); // compare
|
||||
__ fcmpu(CCR0, Rfirst, Rsecond); // compare
|
||||
// if unordered_result is 1, treat unordered_result like 'greater than'
|
||||
assert(unordered_result == 1 || unordered_result == -1, "unordered_result can be either 1 or -1");
|
||||
__ set_cmpu3(R17_tos, unordered_result != 1);
|
||||
@@ -1683,8 +1683,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
Label Lforward;
|
||||
|
||||
// Check branch direction.
|
||||
__ cmpdi(CR0, Rdisp, 0);
|
||||
__ bgt(CR0, Lforward);
|
||||
__ cmpdi(CCR0, Rdisp, 0);
|
||||
__ bgt(CCR0, Lforward);
|
||||
|
||||
__ get_method_counters(R19_method, R4_counters, Lforward);
|
||||
|
||||
@@ -1695,8 +1695,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
// If no method data exists, go to profile_continue.
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CR0, Rmdo, 0);
|
||||
__ beq(CR0, Lno_mdo);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, Lno_mdo);
|
||||
|
||||
// Increment backedge counter in the MDO.
|
||||
const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
@@ -1706,7 +1706,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ stw(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CR0, Lforward);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ b(Loverflow);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
@@ -1722,7 +1722,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ stw(Rscratch2, mo_bc_offs, R4_counters);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CR0, Lforward);
|
||||
__ bne(CCR0, Lforward);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
@@ -1733,13 +1733,13 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
|
||||
|
||||
// Was an OSR adapter generated?
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ beq(CR0, Lforward);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, Lforward);
|
||||
|
||||
// Has the nmethod been invalidated already?
|
||||
__ lbz(R0, in_bytes(nmethod::state_offset()), R3_RET);
|
||||
__ cmpwi(CR0, R0, nmethod::in_use);
|
||||
__ bne(CR0, Lforward);
|
||||
__ cmpwi(CCR0, R0, nmethod::in_use);
|
||||
__ bne(CCR0, Lforward);
|
||||
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
// We can use all registers because we will not return to interpreter from this point.
|
||||
@@ -1775,18 +1775,18 @@ void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rs
|
||||
|
||||
if (is_jint) {
|
||||
if (cmp0) {
|
||||
__ cmpwi(CR0, Rfirst, 0);
|
||||
__ cmpwi(CCR0, Rfirst, 0);
|
||||
} else {
|
||||
__ cmpw(CR0, Rfirst, Rsecond);
|
||||
__ cmpw(CCR0, Rfirst, Rsecond);
|
||||
}
|
||||
} else {
|
||||
if (cmp0) {
|
||||
__ cmpdi(CR0, Rfirst, 0);
|
||||
__ cmpdi(CCR0, Rfirst, 0);
|
||||
} else {
|
||||
__ cmpd(CR0, Rfirst, Rsecond);
|
||||
__ cmpd(CCR0, Rfirst, Rsecond);
|
||||
}
|
||||
}
|
||||
branch_conditional(CR0, cc, Lnot_taken, /*invert*/ true);
|
||||
branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
|
||||
|
||||
// Conition is false => Jump!
|
||||
branch(false, false);
|
||||
@@ -1885,10 +1885,10 @@ void TemplateTable::tableswitch() {
|
||||
__ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
|
||||
// Check for default case (=index outside [low,high]).
|
||||
__ cmpw(CR0, R17_tos, Rlow_byte);
|
||||
__ cmpw(CR1, R17_tos, Rhigh_byte);
|
||||
__ blt(CR0, Ldefault_case);
|
||||
__ bgt(CR1, Ldefault_case);
|
||||
__ cmpw(CCR0, R17_tos, Rlow_byte);
|
||||
__ cmpw(CCR1, R17_tos, Rhigh_byte);
|
||||
__ blt(CCR0, Ldefault_case);
|
||||
__ bgt(CCR1, Ldefault_case);
|
||||
|
||||
// Lookup dispatch offset.
|
||||
__ sub(Rindex, R17_tos, Rlow_byte);
|
||||
@@ -1944,8 +1944,8 @@ void TemplateTable::fast_linearswitch() {
|
||||
__ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
|
||||
|
||||
__ mtctr(Rcount);
|
||||
__ cmpwi(CR0, Rcount, 0);
|
||||
__ bne(CR0, Lloop_entry);
|
||||
__ cmpwi(CCR0, Rcount, 0);
|
||||
__ bne(CCR0, Lloop_entry);
|
||||
|
||||
// Default case
|
||||
__ bind(Ldefault_case);
|
||||
@@ -1961,8 +1961,8 @@ void TemplateTable::fast_linearswitch() {
|
||||
__ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
|
||||
__ bind(Lloop_entry);
|
||||
__ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
|
||||
__ cmpw(CR0, Rvalue, Rcmp_value);
|
||||
__ bne(CR0, Lsearch_loop);
|
||||
__ cmpw(CCR0, Rvalue, Rcmp_value);
|
||||
__ bne(CCR0, Lsearch_loop);
|
||||
|
||||
// Found, load offset.
|
||||
__ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
|
||||
@@ -2057,8 +2057,8 @@ void TemplateTable::fast_binaryswitch() {
|
||||
// else
|
||||
// Rh = Ri
|
||||
Label Lgreater;
|
||||
__ cmpw(CR0, Rkey, Rscratch);
|
||||
__ bge(CR0, Lgreater);
|
||||
__ cmpw(CCR0, Rkey, Rscratch);
|
||||
__ bge(CCR0, Lgreater);
|
||||
__ mr(Rj, Rh);
|
||||
__ b(entry);
|
||||
__ bind(Lgreater);
|
||||
@@ -2067,10 +2067,10 @@ void TemplateTable::fast_binaryswitch() {
|
||||
// while (i+1 < j)
|
||||
__ bind(entry);
|
||||
__ addi(Rscratch, Ri, 1);
|
||||
__ cmpw(CR0, Rscratch, Rj);
|
||||
__ cmpw(CCR0, Rscratch, Rj);
|
||||
__ add(Rh, Ri, Rj); // start h = i + j >> 1;
|
||||
|
||||
__ blt(CR0, loop);
|
||||
__ blt(CCR0, loop);
|
||||
}
|
||||
|
||||
// End of binary search, result index is i (must check again!).
|
||||
@@ -2086,8 +2086,8 @@ void TemplateTable::fast_binaryswitch() {
|
||||
|
||||
Label not_found;
|
||||
// Ri = offset offset
|
||||
__ cmpw(CR0, Rkey, Rscratch);
|
||||
__ beq(CR0, not_found);
|
||||
__ cmpw(CCR0, Rkey, Rscratch);
|
||||
__ beq(CCR0, not_found);
|
||||
// entry not found -> j = default offset
|
||||
__ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
__ b(default_case);
|
||||
@@ -2130,8 +2130,8 @@ void TemplateTable::_return(TosState state) {
|
||||
// Load klass of this obj.
|
||||
__ load_klass(Rklass, R17_tos);
|
||||
__ lbz(Rklass_flags, in_bytes(Klass::misc_flags_offset()), Rklass);
|
||||
__ testbitdi(CR0, R0, Rklass_flags, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ bfalse(CR0, Lskip_register_finalizer);
|
||||
__ testbitdi(CCR0, R0, Rklass_flags, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ bfalse(CCR0, Lskip_register_finalizer);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
|
||||
|
||||
@@ -2143,7 +2143,7 @@ void TemplateTable::_return(TosState state) {
|
||||
Label no_safepoint;
|
||||
__ ld(R11_scratch1, in_bytes(JavaThread::polling_word_offset()), R16_thread);
|
||||
__ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit());
|
||||
__ beq(CR0, no_safepoint);
|
||||
__ beq(CCR0, no_safepoint);
|
||||
__ push(state);
|
||||
__ push_cont_fastpath();
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
|
||||
@@ -2214,8 +2214,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
|
||||
// Load-acquire the bytecode to match store-release in InterpreterRuntime
|
||||
__ lbz(Rscratch, bytecode_offset, Rcache);
|
||||
// Acquire by cmp-br-isync (see below).
|
||||
__ cmpdi(CR0, Rscratch, (int)code);
|
||||
__ beq(CR0, Lresolved);
|
||||
__ cmpdi(CCR0, Rscratch, (int)code);
|
||||
__ beq(CCR0, Lresolved);
|
||||
|
||||
// Class initialization barrier slow path lands here as well.
|
||||
__ bind(L_clinit_barrier_slow);
|
||||
@@ -2263,8 +2263,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset())
|
||||
: in_bytes(ResolvedFieldEntry::put_code_offset());
|
||||
__ lbz(R0, code_offset, Rcache);
|
||||
__ cmpwi(CR0, R0, (int)code); // have we resolved this bytecode?
|
||||
__ beq(CR0, resolved);
|
||||
__ cmpwi(CCR0, R0, (int)code); // have we resolved this bytecode?
|
||||
__ beq(CCR0, resolved);
|
||||
|
||||
// resolve first time through
|
||||
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
|
||||
@@ -2332,8 +2332,8 @@ void TemplateTable::load_resolved_method_entry_handle(Register cache,
|
||||
|
||||
// maybe push appendix to arguments (just before return address)
|
||||
Label L_no_push;
|
||||
__ testbitdi(CR0, R0, flags, ResolvedMethodEntry::has_appendix_shift);
|
||||
__ bfalse(CR0, L_no_push);
|
||||
__ testbitdi(CCR0, R0, flags, ResolvedMethodEntry::has_appendix_shift);
|
||||
__ bfalse(CCR0, L_no_push);
|
||||
// invokehandle uses an index into the resolved references array
|
||||
__ lhz(ref_index, in_bytes(ResolvedMethodEntry::resolved_references_index_offset()), cache);
|
||||
// Push the appendix as a trailing parameter.
|
||||
@@ -2395,8 +2395,8 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
|
||||
__ ld_ptr(method, in_bytes(ResolvedIndyEntry::method_offset()), cache);
|
||||
|
||||
// The invokedynamic is unresolved iff method is null
|
||||
__ cmpdi(CR0, method, 0);
|
||||
__ bne(CR0, resolved);
|
||||
__ cmpdi(CCR0, method, 0);
|
||||
__ bne(CCR0, resolved);
|
||||
|
||||
Bytecodes::Code code = bytecode();
|
||||
|
||||
@@ -2408,7 +2408,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
|
||||
__ load_resolved_indy_entry(cache, index);
|
||||
__ ld_ptr(method, in_bytes(ResolvedIndyEntry::method_offset()), cache);
|
||||
|
||||
DEBUG_ONLY(__ cmpdi(CR0, method, 0));
|
||||
DEBUG_ONLY(__ cmpdi(CCR0, method, 0));
|
||||
__ asm_assert_ne("Should be resolved by now");
|
||||
__ bind(resolved);
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
@@ -2417,7 +2417,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
|
||||
// Check if there is an appendix
|
||||
__ lbz(index, in_bytes(ResolvedIndyEntry::flags_offset()), cache);
|
||||
__ rldicl_(R0, index, 64-ResolvedIndyEntry::has_appendix_shift, 63);
|
||||
__ beq(CR0, L_no_push);
|
||||
__ beq(CCR0, L_no_push);
|
||||
|
||||
// Get appendix
|
||||
__ lhz(index, in_bytes(ResolvedIndyEntry::resolved_references_index_offset()), cache);
|
||||
@@ -2489,8 +2489,8 @@ void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch,
|
||||
int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
|
||||
__ lwz(Rscratch, offs, Rscratch);
|
||||
|
||||
__ cmpwi(CR0, Rscratch, 0);
|
||||
__ beq(CR0, Lno_field_access_post);
|
||||
__ cmpwi(CCR0, Rscratch, 0);
|
||||
__ beq(CCR0, Lno_field_access_post);
|
||||
|
||||
// Post access enabled - do it!
|
||||
if (is_static) {
|
||||
@@ -2574,13 +2574,13 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
#ifdef ASSERT
|
||||
Label LFlagInvalid;
|
||||
__ cmpldi(CR0, Rtos_state, number_of_states);
|
||||
__ bge(CR0, LFlagInvalid);
|
||||
__ cmpldi(CCR0, Rtos_state, number_of_states);
|
||||
__ bge(CCR0, LFlagInvalid);
|
||||
#endif
|
||||
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead).
|
||||
__ sldi(Rtos_state, Rtos_state, LogBytesPerWord);
|
||||
__ cmpwi(CR2, Rscratch, 1); // Volatile?
|
||||
__ cmpwi(CCR2, Rscratch, 1); // Volatile?
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
|
||||
}
|
||||
@@ -2631,12 +2631,12 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
}
|
||||
{
|
||||
Label acquire_double;
|
||||
__ beq(CR2, acquire_double); // Volatile?
|
||||
__ beq(CCR2, acquire_double); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ bind(acquire_double);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CR0, Lisync);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CCR0, Lisync);
|
||||
__ b(Lisync); // In case of NAN.
|
||||
}
|
||||
|
||||
@@ -2652,12 +2652,12 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
}
|
||||
{
|
||||
Label acquire_float;
|
||||
__ beq(CR2, acquire_float); // Volatile?
|
||||
__ beq(CCR2, acquire_float); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ bind(acquire_float);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CR0, Lisync);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CCR0, Lisync);
|
||||
__ b(Lisync); // In case of NAN.
|
||||
}
|
||||
|
||||
@@ -2671,7 +2671,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2684,7 +2684,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2698,7 +2698,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2712,7 +2712,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
// use btos rewriting, no truncating to t/f bit is needed for getfield.
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2725,7 +2725,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2738,7 +2738,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2753,7 +2753,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 12);
|
||||
@@ -2796,8 +2796,8 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo
|
||||
int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
|
||||
__ lwz(Rscratch, offs, Rscratch);
|
||||
|
||||
__ cmpwi(CR0, Rscratch, 0);
|
||||
__ beq(CR0, Lno_field_mod_post);
|
||||
__ cmpwi(CCR0, Rscratch, 0);
|
||||
__ beq(CCR0, Lno_field_mod_post);
|
||||
|
||||
// Do the post
|
||||
const Register Robj = Rscratch;
|
||||
@@ -2830,11 +2830,11 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo
|
||||
// the type to determine where the object is.
|
||||
__ lbz(Rtos_state, in_bytes(ResolvedFieldEntry::type_offset()), Rcache);
|
||||
|
||||
__ cmpwi(CR0, Rtos_state, ltos);
|
||||
__ cmpwi(CR1, Rtos_state, dtos);
|
||||
__ cmpwi(CCR0, Rtos_state, ltos);
|
||||
__ cmpwi(CCR1, Rtos_state, dtos);
|
||||
__ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
|
||||
__ crnor(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
__ beq(CR0, is_one_slot);
|
||||
__ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
__ beq(CCR0, is_one_slot);
|
||||
__ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
|
||||
__ bind(is_one_slot);
|
||||
break;
|
||||
@@ -2881,7 +2881,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
Rscratch2 = R12_scratch2, // used by load_field_cp_cache_entry
|
||||
Rscratch3 = R6_ARG4,
|
||||
Rbc = Rscratch3;
|
||||
const ConditionRegister CR_is_vol = CR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
|
||||
static address field_rw_branch_table[number_of_states],
|
||||
field_norw_branch_table[number_of_states],
|
||||
@@ -2907,8 +2907,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
#ifdef ASSERT
|
||||
Label LFlagInvalid;
|
||||
__ cmpldi(CR0, Rtos_state, number_of_states);
|
||||
__ bge(CR0, LFlagInvalid);
|
||||
__ cmpldi(CCR0, Rtos_state, number_of_states);
|
||||
__ bge(CCR0, LFlagInvalid);
|
||||
#endif
|
||||
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead).
|
||||
@@ -3124,7 +3124,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
Rscratch = R11_scratch1, // used by load_field_cp_cache_entry
|
||||
Rscratch2 = R12_scratch2, // used by load_field_cp_cache_entry
|
||||
Rscratch3 = R4_ARG2;
|
||||
const ConditionRegister CR_is_vol = CR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
|
||||
// Constant pool already resolved => Load flags and offset of field.
|
||||
__ load_field_entry(Rcache, Rscratch);
|
||||
@@ -3139,7 +3139,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
|
||||
{
|
||||
Label LnotVolatile;
|
||||
__ beq(CR0, LnotVolatile);
|
||||
__ beq(CCR0, LnotVolatile);
|
||||
__ release();
|
||||
__ align(32, 12);
|
||||
__ bind(LnotVolatile);
|
||||
@@ -3219,7 +3219,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
|
||||
// Get volatile flag.
|
||||
__ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit.
|
||||
__ bne(CR0, LisVolatile);
|
||||
__ bne(CCR0, LisVolatile);
|
||||
|
||||
switch(bytecode()) {
|
||||
case Bytecodes::_fast_agetfield:
|
||||
@@ -3307,8 +3307,8 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
Label Ldummy;
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
|
||||
__ lfsx(F15_ftos, Rclass_or_obj, Roffset);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CR0, Ldummy);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CCR0, Ldummy);
|
||||
__ bind(Ldummy);
|
||||
__ isync();
|
||||
break;
|
||||
@@ -3322,8 +3322,8 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
Label Ldummy;
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
|
||||
__ lfdx(F15_ftos, Rclass_or_obj, Roffset);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CR0, Ldummy);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CCR0, Ldummy);
|
||||
__ bind(Ldummy);
|
||||
__ isync();
|
||||
break;
|
||||
@@ -3360,7 +3360,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
|
||||
// Get volatile flag.
|
||||
__ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit.
|
||||
__ bne(CR0, LisVolatile);
|
||||
__ bne(CCR0, LisVolatile);
|
||||
|
||||
switch(state) {
|
||||
case atos:
|
||||
@@ -3398,8 +3398,8 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
Label Ldummy;
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
|
||||
__ lfsx(F15_ftos, Rclass_or_obj, Roffset);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CR0, Ldummy);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CCR0, Ldummy);
|
||||
__ bind(Ldummy);
|
||||
__ isync();
|
||||
break;
|
||||
@@ -3480,8 +3480,8 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
load_resolved_method_entry_virtual(Rcache, noreg, Rflags);
|
||||
|
||||
// Handle final method separately.
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CR0, LnotFinal);
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotFinal);
|
||||
|
||||
if (RewriteBytecodes && !CDSConfig::is_using_archive() && !CDSConfig::is_dumping_static_archive()) {
|
||||
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
|
||||
@@ -3587,8 +3587,8 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
|
||||
Label LnotFinal;
|
||||
|
||||
// Check for vfinal.
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CR0, LnotFinal);
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotFinal);
|
||||
|
||||
Register Rscratch = Rflags, // Rflags is dead now.
|
||||
Rmethod = Rtemp2,
|
||||
@@ -3641,8 +3641,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// to handle this corner case.
|
||||
|
||||
Label LnotObjectMethod, Lthrow_ame;
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_forced_virtual_shift);
|
||||
__ bfalse(CR0, LnotObjectMethod);
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_forced_virtual_shift);
|
||||
__ bfalse(CCR0, LnotObjectMethod);
|
||||
invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rcache, Rscratch1, Rscratch2);
|
||||
__ bind(LnotObjectMethod);
|
||||
|
||||
@@ -3652,8 +3652,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// Check for private method invocation - indicated by vfinal
|
||||
Label LnotVFinal, L_no_such_interface, L_subtype;
|
||||
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CR0, LnotVFinal);
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotVFinal);
|
||||
|
||||
__ check_klass_subtype(Rrecv_klass, Rinterface_klass, Rscratch1, Rscratch2, L_subtype);
|
||||
// If we get here the typecheck failed
|
||||
@@ -3687,8 +3687,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2,
|
||||
L_no_such_interface);
|
||||
|
||||
__ cmpdi(CR0, Rmethod2, 0);
|
||||
__ beq(CR0, Lthrow_ame);
|
||||
__ cmpdi(CCR0, Rmethod2, 0);
|
||||
__ beq(CCR0, Lthrow_ame);
|
||||
// Found entry. Jump off!
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true);
|
||||
@@ -3795,8 +3795,8 @@ void TemplateTable::_new() {
|
||||
__ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
|
||||
__ lbzx(Rtags, Rindex, Rtags);
|
||||
|
||||
__ cmpdi(CR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ bne(CR0, Lslow_case);
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ bne(CCR0, Lslow_case);
|
||||
|
||||
// Get instanceKlass
|
||||
__ sldi(Roffset, Rindex, LogBytesPerWord);
|
||||
@@ -3810,7 +3810,7 @@ void TemplateTable::_new() {
|
||||
|
||||
// Make sure klass is not abstract, or interface or java/lang/Class.
|
||||
__ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
|
||||
__ bne(CR0, Lslow_case);
|
||||
__ bne(CCR0, Lslow_case);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Fast case:
|
||||
@@ -3829,8 +3829,8 @@ void TemplateTable::_new() {
|
||||
__ add(RnewTopValue, Rinstance_size, RoldTopValue);
|
||||
|
||||
// If there is enough space, we do not CAS and do not clear.
|
||||
__ cmpld(CR0, RnewTopValue, RendValue);
|
||||
__ bgt(CR0, Lslow_case);
|
||||
__ cmpld(CCR0, RnewTopValue, RendValue);
|
||||
__ bgt(CCR0, Lslow_case);
|
||||
|
||||
__ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
|
||||
|
||||
@@ -3947,8 +3947,8 @@ void TemplateTable::checkcast() {
|
||||
Rtags = R12_scratch2;
|
||||
|
||||
// Null does not pass.
|
||||
__ cmpdi(CR0, R17_tos, 0);
|
||||
__ beq(CR0, Lis_null);
|
||||
__ cmpdi(CCR0, R17_tos, 0);
|
||||
__ beq(CCR0, Lis_null);
|
||||
|
||||
// Get constant pool tag to find out if the bytecode has already been "quickened".
|
||||
__ get_cpool_and_tags(Rcpool, Rtags);
|
||||
@@ -3958,8 +3958,8 @@ void TemplateTable::checkcast() {
|
||||
__ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
|
||||
__ lbzx(Rtags, Rtags, Roffset);
|
||||
|
||||
__ cmpdi(CR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CR0, Lquicked);
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CCR0, Lquicked);
|
||||
|
||||
// Call into the VM to "quicken" instanceof.
|
||||
__ push_ptr(); // for GC
|
||||
@@ -4009,8 +4009,8 @@ void TemplateTable::instanceof() {
|
||||
Rtags = R12_scratch2;
|
||||
|
||||
// Null does not pass.
|
||||
__ cmpdi(CR0, R17_tos, 0);
|
||||
__ beq(CR0, Lis_null);
|
||||
__ cmpdi(CCR0, R17_tos, 0);
|
||||
__ beq(CCR0, Lis_null);
|
||||
|
||||
// Get constant pool tag to find out if the bytecode has already been "quickened".
|
||||
__ get_cpool_and_tags(Rcpool, Rtags);
|
||||
@@ -4020,8 +4020,8 @@ void TemplateTable::instanceof() {
|
||||
__ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
|
||||
__ lbzx(Rtags, Rtags, Roffset);
|
||||
|
||||
__ cmpdi(CR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CR0, Lquicked);
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CCR0, Lquicked);
|
||||
|
||||
// Call into the VM to "quicken" instanceof.
|
||||
__ push_ptr(); // for GC
|
||||
@@ -4127,8 +4127,8 @@ void TemplateTable::monitorenter() {
|
||||
__ null_check_throw(Robj_to_lock, -1, Rscratch1);
|
||||
|
||||
// Check if any slot is present => short cut to allocation if not.
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CR0, Lallocate_new);
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CCR0, Lallocate_new);
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Find a free slot in the monitor block.
|
||||
@@ -4141,24 +4141,24 @@ void TemplateTable::monitorenter() {
|
||||
__ ld(Rcurrent_obj, in_bytes(BasicObjectLock::obj_offset()), Rcurrent_monitor);
|
||||
// Exit if current entry is for same object; this guarantees, that new monitor
|
||||
// used for recursive lock is above the older one.
|
||||
__ cmpd(CR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CR0, Lexit); // recursive locking
|
||||
__ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CCR0, Lexit); // recursive locking
|
||||
|
||||
__ cmpdi(CR0, Rcurrent_obj, 0);
|
||||
__ bne(CR0, LnotFree);
|
||||
__ cmpdi(CCR0, Rcurrent_obj, 0);
|
||||
__ bne(CCR0, LnotFree);
|
||||
__ mr(Rfree_slot, Rcurrent_monitor); // remember free slot closest to the bottom
|
||||
__ bind(LnotFree);
|
||||
|
||||
__ addi(Rcurrent_monitor, Rcurrent_monitor, frame::interpreter_frame_monitor_size_in_bytes());
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CR0, Lloop);
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CCR0, Lloop);
|
||||
__ bind(Lexit);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Check if we found a free slot.
|
||||
__ cmpdi(CR0, Rfree_slot, 0);
|
||||
__ bne(CR0, Lfound);
|
||||
__ cmpdi(CCR0, Rfree_slot, 0);
|
||||
__ bne(CCR0, Lfound);
|
||||
|
||||
// We didn't find a free BasicObjLock => allocate one.
|
||||
__ bind(Lallocate_new);
|
||||
@@ -4206,8 +4206,8 @@ void TemplateTable::monitorexit() {
|
||||
__ null_check_throw(Robj_to_lock, -1, Rscratch);
|
||||
|
||||
// Check corner case: unbalanced monitorEnter / Exit.
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CR0, Lillegal_monitor_state);
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CCR0, Lillegal_monitor_state);
|
||||
|
||||
// Find the corresponding slot in the monitors stack section.
|
||||
{
|
||||
@@ -4216,12 +4216,12 @@ void TemplateTable::monitorexit() {
|
||||
__ bind(Lloop);
|
||||
__ ld(Rcurrent_obj, in_bytes(BasicObjectLock::obj_offset()), Rcurrent_monitor);
|
||||
// Is this entry for same obj?
|
||||
__ cmpd(CR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CR0, Lfound);
|
||||
__ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CCR0, Lfound);
|
||||
|
||||
__ addi(Rcurrent_monitor, Rcurrent_monitor, frame::interpreter_frame_monitor_size_in_bytes());
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CR0, Lloop);
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CCR0, Lloop);
|
||||
}
|
||||
|
||||
// Fell through without finding the basic obj lock => throw up!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -91,8 +91,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// Check offset vs vtable length.
|
||||
const Register vtable_len = R12_scratch2;
|
||||
__ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
|
||||
__ cmpwi(CR0, vtable_len, vtable_index*vtableEntry::size());
|
||||
__ bge(CR0, L);
|
||||
__ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
|
||||
__ bge(CCR0, L);
|
||||
__ li(R12_scratch2, vtable_index);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
|
||||
__ bind(L);
|
||||
@@ -108,8 +108,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
__ cmpdi(CR0, R19_method, 0);
|
||||
__ bne(CR0, L);
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ stop("Vtable entry is ZERO");
|
||||
__ bind(L);
|
||||
}
|
||||
@@ -194,8 +194,8 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label ok;
|
||||
__ cmpdi(CR0, R19_method, 0);
|
||||
__ bne(CR0, ok);
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ bne(CCR0, ok);
|
||||
__ stop("method is null");
|
||||
__ bind(ok);
|
||||
}
|
||||
|
||||
@@ -330,104 +330,7 @@ class InternalAddress: public Address {
|
||||
};
|
||||
|
||||
class Assembler : public AbstractAssembler {
|
||||
protected:
|
||||
|
||||
static int zfa_zli_lookup_double(uint64_t value) {
|
||||
switch(value) {
|
||||
case 0xbff0000000000000 : return 0;
|
||||
case 0x0010000000000000 : return 1;
|
||||
case 0x3ef0000000000000 : return 2;
|
||||
case 0x3f00000000000000 : return 3;
|
||||
case 0x3f70000000000000 : return 4;
|
||||
case 0x3f80000000000000 : return 5;
|
||||
case 0x3fb0000000000000 : return 6;
|
||||
case 0x3fc0000000000000 : return 7;
|
||||
case 0x3fd0000000000000 : return 8;
|
||||
case 0x3fd4000000000000 : return 9;
|
||||
case 0x3fd8000000000000 : return 10;
|
||||
case 0x3fdc000000000000 : return 11;
|
||||
case 0x3fe0000000000000 : return 12;
|
||||
case 0x3fe4000000000000 : return 13;
|
||||
case 0x3fe8000000000000 : return 14;
|
||||
case 0x3fec000000000000 : return 15;
|
||||
case 0x3ff0000000000000 : return 16;
|
||||
case 0x3ff4000000000000 : return 17;
|
||||
case 0x3ff8000000000000 : return 18;
|
||||
case 0x3ffc000000000000 : return 19;
|
||||
case 0x4000000000000000 : return 20;
|
||||
case 0x4004000000000000 : return 21;
|
||||
case 0x4008000000000000 : return 22;
|
||||
case 0x4010000000000000 : return 23;
|
||||
case 0x4020000000000000 : return 24;
|
||||
case 0x4030000000000000 : return 25;
|
||||
case 0x4060000000000000 : return 26;
|
||||
case 0x4070000000000000 : return 27;
|
||||
case 0x40e0000000000000 : return 28;
|
||||
case 0x40f0000000000000 : return 29;
|
||||
case 0x7ff0000000000000 : return 30;
|
||||
case 0x7ff8000000000000 : return 31;
|
||||
default: break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
static int zfa_zli_lookup_float(uint32_t value) {
|
||||
switch(value) {
|
||||
case 0xbf800000 : return 0;
|
||||
case 0x00800000 : return 1;
|
||||
case 0x37800000 : return 2;
|
||||
case 0x38000000 : return 3;
|
||||
case 0x3b800000 : return 4;
|
||||
case 0x3c000000 : return 5;
|
||||
case 0x3d800000 : return 6;
|
||||
case 0x3e000000 : return 7;
|
||||
case 0x3e800000 : return 8;
|
||||
case 0x3ea00000 : return 9;
|
||||
case 0x3ec00000 : return 10;
|
||||
case 0x3ee00000 : return 11;
|
||||
case 0x3f000000 : return 12;
|
||||
case 0x3f200000 : return 13;
|
||||
case 0x3f400000 : return 14;
|
||||
case 0x3f600000 : return 15;
|
||||
case 0x3f800000 : return 16;
|
||||
case 0x3fa00000 : return 17;
|
||||
case 0x3fc00000 : return 18;
|
||||
case 0x3fe00000 : return 19;
|
||||
case 0x40000000 : return 20;
|
||||
case 0x40200000 : return 21;
|
||||
case 0x40400000 : return 22;
|
||||
case 0x40800000 : return 23;
|
||||
case 0x41000000 : return 24;
|
||||
case 0x41800000 : return 25;
|
||||
case 0x43000000 : return 26;
|
||||
case 0x43800000 : return 27;
|
||||
case 0x47000000 : return 28;
|
||||
case 0x47800000 : return 29;
|
||||
case 0x7f800000 : return 30;
|
||||
case 0x7fc00000 : return 31;
|
||||
default: break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static bool can_zfa_zli_float(jfloat f) {
|
||||
if (!UseZfa) {
|
||||
return false;
|
||||
}
|
||||
uint32_t f_bits = (uint32_t)jint_cast(f);
|
||||
return zfa_zli_lookup_float(f_bits) != -1;
|
||||
}
|
||||
|
||||
static bool can_zfa_zli_double(jdouble d) {
|
||||
if (!UseZfa) {
|
||||
return false;
|
||||
}
|
||||
uint64_t d_bits = (uint64_t)julong_cast(d);
|
||||
return zfa_zli_lookup_double(d_bits) != -1;
|
||||
}
|
||||
public:
|
||||
|
||||
enum {
|
||||
instruction_size = 4,
|
||||
@@ -1069,13 +972,6 @@ enum operand_size { int8, int16, int32, uint32, int64 };
|
||||
fp_base<Fmt, funct5>(Rd->raw_encoding(), Rs1->raw_encoding(), Rs2, (RoundingMode)rm);
|
||||
}
|
||||
|
||||
template <FmtPrecision Fmt, uint8_t funct5>
|
||||
void fp_base(FloatRegister Rd, uint8_t Rs1, uint8_t Rs2, int8_t rm) {
|
||||
guarantee(is_uimm5(Rs1), "Rs1 is out of validity");
|
||||
guarantee(is_uimm5(Rs2), "Rs2 is out of validity");
|
||||
fp_base<Fmt, funct5>(Rd->raw_encoding(), Rs1, Rs2, (RoundingMode)rm);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
enum FClassBits {
|
||||
@@ -1397,18 +1293,6 @@ enum operand_size { int8, int16, int32, uint32, int64 };
|
||||
fp_base<H_16_hp, 0b11100>(Rd, Rs1, 0b00000, 0b000);
|
||||
}
|
||||
|
||||
// -------------- ZFA Instruction Definitions --------------
|
||||
// Zfa Extension for Additional Floating-Point Instructions
|
||||
void _fli_s(FloatRegister Rd, uint8_t Rs1) {
|
||||
assert_cond(UseZfa);
|
||||
fp_base<S_32_sp, 0b11110>(Rd, Rs1, 0b00001, 0b000);
|
||||
}
|
||||
|
||||
void _fli_d(FloatRegister Rd, uint8_t Rs1) {
|
||||
assert_cond(UseZfa);
|
||||
fp_base<D_64_dp, 0b11110>(Rd, Rs1, 0b00001, 0b000);
|
||||
}
|
||||
|
||||
// ==========================
|
||||
// RISC-V Vector Extension
|
||||
// ==========================
|
||||
|
||||
@@ -425,8 +425,6 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
assert(dest->is_register(), "should not call otherwise");
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
address const_addr = nullptr;
|
||||
jfloat fconst;
|
||||
jdouble dconst;
|
||||
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
@@ -462,25 +460,15 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
break;
|
||||
|
||||
case T_FLOAT:
|
||||
fconst = c->as_jfloat();
|
||||
if (MacroAssembler::can_fp_imm_load(fconst)) {
|
||||
__ fli_s(dest->as_float_reg(), fconst);
|
||||
} else {
|
||||
const_addr = float_constant(fconst);
|
||||
assert(const_addr != nullptr, "must create float constant in the constant table");
|
||||
__ flw(dest->as_float_reg(), InternalAddress(const_addr));
|
||||
}
|
||||
const_addr = float_constant(c->as_jfloat());
|
||||
assert(const_addr != nullptr, "must create float constant in the constant table");
|
||||
__ flw(dest->as_float_reg(), InternalAddress(const_addr));
|
||||
break;
|
||||
|
||||
case T_DOUBLE:
|
||||
dconst = c->as_jdouble();
|
||||
if (MacroAssembler::can_dp_imm_load(dconst)) {
|
||||
__ fli_d(dest->as_double_reg(), dconst);
|
||||
} else {
|
||||
const_addr = double_constant(c->as_jdouble());
|
||||
assert(const_addr != nullptr, "must create double constant in the constant table");
|
||||
__ fld(dest->as_double_reg(), InternalAddress(const_addr));
|
||||
}
|
||||
const_addr = double_constant(c->as_jdouble());
|
||||
assert(const_addr != nullptr, "must create double constant in the constant table");
|
||||
__ fld(dest->as_double_reg(), InternalAddress(const_addr));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -1409,14 +1409,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
load_chr_insn str1_load_chr = str1_isL ? (load_chr_insn)&MacroAssembler::lbu : (load_chr_insn)&MacroAssembler::lhu;
|
||||
load_chr_insn str2_load_chr = str2_isL ? (load_chr_insn)&MacroAssembler::lbu : (load_chr_insn)&MacroAssembler::lhu;
|
||||
|
||||
int base_offset1 = arrayOopDesc::base_offset_in_bytes(T_BYTE);
|
||||
int base_offset2 = arrayOopDesc::base_offset_in_bytes(T_CHAR);
|
||||
|
||||
assert((base_offset1 % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
assert((base_offset2 % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
BLOCK_COMMENT("string_compare {");
|
||||
|
||||
// Bizarrely, the counts are passed in bytes, regardless of whether they
|
||||
@@ -1434,24 +1426,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
mv(cnt2, cnt1);
|
||||
bind(L);
|
||||
|
||||
// Load 4 bytes once to compare for alignment before main loop. Note that this
|
||||
// is only possible for LL/UU case. We need to resort to load_long_misaligned
|
||||
// for both LU and UL cases.
|
||||
if (str1_isL == str2_isL) { // LL or UU
|
||||
beq(str1, str2, DONE);
|
||||
int base_offset = isLL ? base_offset1 : base_offset2;
|
||||
if (AvoidUnalignedAccesses && (base_offset % 8) != 0) {
|
||||
mv(t0, minCharsInWord / 2);
|
||||
ble(cnt2, t0, SHORT_STRING);
|
||||
lwu(tmp1, Address(str1));
|
||||
lwu(tmp2, Address(str2));
|
||||
bne(tmp1, tmp2, DIFFERENCE);
|
||||
addi(str1, str1, 4);
|
||||
addi(str2, str2, 4);
|
||||
subi(cnt2, cnt2, minCharsInWord / 2);
|
||||
}
|
||||
}
|
||||
|
||||
// A very short string
|
||||
mv(t0, minCharsInWord);
|
||||
ble(cnt2, t0, SHORT_STRING);
|
||||
@@ -1460,14 +1434,8 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
// load first parts of strings and finish initialization while loading
|
||||
{
|
||||
if (str1_isL == str2_isL) { // LL or UU
|
||||
#ifdef ASSERT
|
||||
Label align_ok;
|
||||
orr(t0, str1, str2);
|
||||
andi(t0, t0, 0x7);
|
||||
beqz(t0, align_ok);
|
||||
stop("bad alignment");
|
||||
bind(align_ok);
|
||||
#endif
|
||||
// check if str1 and str2 is same pointer
|
||||
beq(str1, str2, DONE);
|
||||
// load 8 bytes once to compare
|
||||
ld(tmp1, Address(str1));
|
||||
ld(tmp2, Address(str2));
|
||||
@@ -1484,7 +1452,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
sub(cnt2, zr, cnt2);
|
||||
} else if (isLU) { // LU case
|
||||
lwu(tmp1, Address(str1));
|
||||
load_long_misaligned(tmp2, Address(str2), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
ld(tmp2, Address(str2));
|
||||
mv(t0, STUB_THRESHOLD);
|
||||
bge(cnt2, t0, STUB);
|
||||
subi(cnt2, cnt2, 4);
|
||||
@@ -1497,11 +1465,11 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
sub(cnt2, zr, cnt2);
|
||||
addi(cnt1, cnt1, 4);
|
||||
} else { // UL case
|
||||
load_long_misaligned(tmp1, Address(str1), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
ld(tmp1, Address(str1));
|
||||
lwu(tmp2, Address(str2));
|
||||
mv(t0, STUB_THRESHOLD);
|
||||
bge(cnt2, t0, STUB);
|
||||
subi(cnt2, cnt2, 4);
|
||||
addi(cnt2, cnt2, -4);
|
||||
slli(t0, cnt2, 1);
|
||||
sub(cnt1, zr, t0);
|
||||
add(str1, str1, t0);
|
||||
@@ -1518,7 +1486,6 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
// main loop
|
||||
bind(NEXT_WORD);
|
||||
if (str1_isL == str2_isL) { // LL or UU
|
||||
// both of the two loads are 8-byte aligned
|
||||
add(t0, str1, cnt2);
|
||||
ld(tmp1, Address(t0));
|
||||
add(t0, str2, cnt2);
|
||||
@@ -1528,7 +1495,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
add(t0, str1, cnt1);
|
||||
lwu(tmp1, Address(t0));
|
||||
add(t0, str2, cnt2);
|
||||
load_long_misaligned(tmp2, Address(t0), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
ld(tmp2, Address(t0));
|
||||
addi(cnt1, cnt1, 4);
|
||||
inflate_lo32(tmp3, tmp1);
|
||||
mv(tmp1, tmp3);
|
||||
@@ -1537,7 +1504,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
add(t0, str2, cnt2);
|
||||
lwu(tmp2, Address(t0));
|
||||
add(t0, str1, cnt1);
|
||||
load_long_misaligned(tmp1, Address(t0), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
ld(tmp1, Address(t0));
|
||||
inflate_lo32(tmp3, tmp2);
|
||||
mv(tmp2, tmp3);
|
||||
addi(cnt1, cnt1, 8);
|
||||
@@ -1670,9 +1637,6 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
|
||||
int length_offset = arrayOopDesc::length_offset_in_bytes();
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
|
||||
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
Register cnt1 = tmp3;
|
||||
Register cnt2 = tmp1; // cnt2 only used in array length compare
|
||||
Label DONE, SAME, NEXT_WORD, SHORT, TAIL03, TAIL01;
|
||||
@@ -1696,31 +1660,10 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
|
||||
|
||||
la(a1, Address(a1, base_offset));
|
||||
la(a2, Address(a2, base_offset));
|
||||
|
||||
// Load 4 bytes once to compare for alignment before main loop.
|
||||
if (AvoidUnalignedAccesses && (base_offset % 8) != 0) {
|
||||
subi(cnt1, cnt1, elem_per_word / 2);
|
||||
bltz(cnt1, TAIL03);
|
||||
lwu(tmp1, Address(a1));
|
||||
lwu(tmp2, Address(a2));
|
||||
addi(a1, a1, 4);
|
||||
addi(a2, a2, 4);
|
||||
bne(tmp1, tmp2, DONE);
|
||||
}
|
||||
|
||||
// Check for short strings, i.e. smaller than wordSize.
|
||||
subi(cnt1, cnt1, elem_per_word);
|
||||
bltz(cnt1, SHORT);
|
||||
|
||||
#ifdef ASSERT
|
||||
Label align_ok;
|
||||
orr(t0, a1, a2);
|
||||
andi(t0, t0, 0x7);
|
||||
beqz(t0, align_ok);
|
||||
stop("bad alignment");
|
||||
bind(align_ok);
|
||||
#endif
|
||||
|
||||
// Main 8 byte comparison loop.
|
||||
bind(NEXT_WORD); {
|
||||
ld(tmp1, Address(a1));
|
||||
@@ -1786,45 +1729,20 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
|
||||
void C2_MacroAssembler::string_equals(Register a1, Register a2,
|
||||
Register result, Register cnt1)
|
||||
{
|
||||
Label SAME, DONE, SHORT, NEXT_WORD, TAIL03, TAIL01;
|
||||
Label SAME, DONE, SHORT, NEXT_WORD;
|
||||
Register tmp1 = t0;
|
||||
Register tmp2 = t1;
|
||||
|
||||
assert_different_registers(a1, a2, result, cnt1, tmp1, tmp2);
|
||||
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
|
||||
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
BLOCK_COMMENT("string_equals {");
|
||||
|
||||
mv(result, false);
|
||||
|
||||
// Load 4 bytes once to compare for alignment before main loop.
|
||||
if (AvoidUnalignedAccesses && (base_offset % 8) != 0) {
|
||||
subi(cnt1, cnt1, 4);
|
||||
bltz(cnt1, TAIL03);
|
||||
lwu(tmp1, Address(a1));
|
||||
lwu(tmp2, Address(a2));
|
||||
addi(a1, a1, 4);
|
||||
addi(a2, a2, 4);
|
||||
bne(tmp1, tmp2, DONE);
|
||||
}
|
||||
|
||||
// Check for short strings, i.e. smaller than wordSize.
|
||||
subi(cnt1, cnt1, wordSize);
|
||||
bltz(cnt1, SHORT);
|
||||
|
||||
#ifdef ASSERT
|
||||
Label align_ok;
|
||||
orr(t0, a1, a2);
|
||||
andi(t0, t0, 0x7);
|
||||
beqz(t0, align_ok);
|
||||
stop("bad alignment");
|
||||
bind(align_ok);
|
||||
#endif
|
||||
|
||||
// Main 8 byte comparison loop.
|
||||
bind(NEXT_WORD); {
|
||||
ld(tmp1, Address(a1));
|
||||
@@ -1839,6 +1757,8 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2,
|
||||
beqz(tmp1, SAME);
|
||||
|
||||
bind(SHORT);
|
||||
Label TAIL03, TAIL01;
|
||||
|
||||
// 0-7 bytes left.
|
||||
test_bit(tmp1, cnt1, 2);
|
||||
beqz(tmp1, TAIL03);
|
||||
@@ -2592,9 +2512,6 @@ void C2_MacroAssembler::arrays_equals_v(Register a1, Register a2, Register resul
|
||||
int length_offset = arrayOopDesc::length_offset_in_bytes();
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
|
||||
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
BLOCK_COMMENT("arrays_equals_v {");
|
||||
|
||||
// if (a1 == a2), return true
|
||||
|
||||
@@ -103,7 +103,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
product(bool, UseZba, false, DIAGNOSTIC, "Use Zba instructions") \
|
||||
product(bool, UseZbb, false, DIAGNOSTIC, "Use Zbb instructions") \
|
||||
product(bool, UseZbs, false, DIAGNOSTIC, "Use Zbs instructions") \
|
||||
product(bool, UseZfa, false, EXPERIMENTAL, "Use Zfa instructions") \
|
||||
product(bool, UseZfh, false, DIAGNOSTIC, "Use Zfh instructions") \
|
||||
product(bool, UseZfhmin, false, DIAGNOSTIC, "Use Zfhmin instructions") \
|
||||
product(bool, UseZacas, false, EXPERIMENTAL, "Use Zacas instructions") \
|
||||
|
||||
@@ -2593,45 +2593,6 @@ void MacroAssembler::movptr2(Register Rd, uint64_t addr, int32_t &offset, Regist
|
||||
offset = lower12;
|
||||
}
|
||||
|
||||
// floating point imm move
|
||||
bool MacroAssembler::can_fp_imm_load(float imm) {
|
||||
jint f_bits = jint_cast(imm);
|
||||
if (f_bits == 0) {
|
||||
return true;
|
||||
}
|
||||
return can_zfa_zli_float(imm);
|
||||
}
|
||||
|
||||
bool MacroAssembler::can_dp_imm_load(double imm) {
|
||||
julong d_bits = julong_cast(imm);
|
||||
if (d_bits == 0) {
|
||||
return true;
|
||||
}
|
||||
return can_zfa_zli_double(imm);
|
||||
}
|
||||
|
||||
void MacroAssembler::fli_s(FloatRegister Rd, float imm) {
|
||||
jint f_bits = jint_cast(imm);
|
||||
if (f_bits == 0) {
|
||||
fmv_w_x(Rd, zr);
|
||||
return;
|
||||
}
|
||||
int Rs = zfa_zli_lookup_float(f_bits);
|
||||
assert(Rs != -1, "Must be");
|
||||
_fli_s(Rd, Rs);
|
||||
}
|
||||
|
||||
void MacroAssembler::fli_d(FloatRegister Rd, double imm) {
|
||||
uint64_t d_bits = (uint64_t)julong_cast(imm);
|
||||
if (d_bits == 0) {
|
||||
fmv_d_x(Rd, zr);
|
||||
return;
|
||||
}
|
||||
int Rs = zfa_zli_lookup_double(d_bits);
|
||||
assert(Rs != -1, "Must be");
|
||||
_fli_d(Rd, Rs);
|
||||
}
|
||||
|
||||
void MacroAssembler::add(Register Rd, Register Rn, int64_t increment, Register tmp) {
|
||||
if (is_simm12(increment)) {
|
||||
addi(Rd, Rn, increment);
|
||||
@@ -4307,7 +4268,7 @@ void MacroAssembler::population_count(Register dst, Register src,
|
||||
{
|
||||
bind(loop);
|
||||
addi(dst, dst, 1);
|
||||
subi(tmp2, tmp1, 1);
|
||||
addi(tmp2, tmp1, -1);
|
||||
andr(tmp1, tmp1, tmp2);
|
||||
bnez(tmp1, loop);
|
||||
}
|
||||
|
||||
@@ -920,11 +920,6 @@ public:
|
||||
void movptr1(Register Rd, uintptr_t addr, int32_t &offset);
|
||||
void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp);
|
||||
public:
|
||||
// float imm move
|
||||
static bool can_fp_imm_load(float imm);
|
||||
static bool can_dp_imm_load(double imm);
|
||||
void fli_s(FloatRegister Rd, float imm);
|
||||
void fli_d(FloatRegister Rd, double imm);
|
||||
|
||||
// arith
|
||||
void add (Register Rd, Register Rn, int64_t increment, Register tmp = t0);
|
||||
|
||||
@@ -4920,11 +4920,7 @@ instruct loadConF(fRegF dst, immF con) %{
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
if (MacroAssembler::can_fp_imm_load($con$$constant)) {
|
||||
__ fli_s(as_FloatRegister($dst$$reg), $con$$constant);
|
||||
} else {
|
||||
__ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
}
|
||||
__ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
%}
|
||||
|
||||
ins_pipe(fp_load_constant_s);
|
||||
@@ -4954,11 +4950,7 @@ instruct loadConD(fRegD dst, immD con) %{
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
if (MacroAssembler::can_dp_imm_load($con$$constant)) {
|
||||
__ fli_d(as_FloatRegister($dst$$reg), $con$$constant);
|
||||
} else {
|
||||
__ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
}
|
||||
__ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
%}
|
||||
|
||||
ins_pipe(fp_load_constant_d);
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
#define CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 10000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 45000) \
|
||||
do_stub(compiler, compare_long_string_LL) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_LL, \
|
||||
compare_long_string_LL, compare_long_string_LL) \
|
||||
do_stub(compiler, compare_long_string_UU) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_UU, \
|
||||
compare_long_string_UU, compare_long_string_UU) \
|
||||
do_stub(compiler, compare_long_string_LU) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_LU, \
|
||||
compare_long_string_LU, compare_long_string_LU) \
|
||||
do_stub(compiler, compare_long_string_UL) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_UL, \
|
||||
compare_long_string_UL, compare_long_string_UL) \
|
||||
do_stub(compiler, string_indexof_linear_ll) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_ll, \
|
||||
string_indexof_linear_ll, string_indexof_linear_ll) \
|
||||
do_stub(compiler, string_indexof_linear_uu) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_uu, \
|
||||
string_indexof_linear_uu, string_indexof_linear_uu) \
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_ul, \
|
||||
string_indexof_linear_ul, string_indexof_linear_ul) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 20000 ZGC_ONLY(+10000)) \
|
||||
do_stub(final, copy_byte_f) \
|
||||
do_arch_entry(riscv, final, copy_byte_f, copy_byte_f, \
|
||||
copy_byte_f) \
|
||||
do_stub(final, copy_byte_b) \
|
||||
do_arch_entry(riscv, final, copy_byte_b, copy_byte_b, \
|
||||
copy_byte_b) \
|
||||
do_stub(final, zero_blocks) \
|
||||
do_arch_entry(riscv, final, zero_blocks, zero_blocks, \
|
||||
zero_blocks) \
|
||||
|
||||
|
||||
#endif // CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -33,19 +33,14 @@
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
|
||||
// define fields for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
address StubRoutines::riscv::_zero_blocks = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_LL = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_UU = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_LU = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_UL = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ll = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_uu = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ul = nullptr;
|
||||
|
||||
bool StubRoutines::riscv::_completed = false;
|
||||
|
||||
|
||||
@@ -35,53 +35,63 @@ static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 10000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 45000,
|
||||
_final_stubs_code_size = 20000 ZGC_ONLY(+10000)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
class riscv {
|
||||
friend class StubGenerator;
|
||||
#if INCLUDE_JVMCI
|
||||
friend class JVMCIVMStructs;
|
||||
#endif
|
||||
|
||||
// declare fields for arch-specific entries
|
||||
private:
|
||||
static address _zero_blocks;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
static address _compare_long_string_LL;
|
||||
static address _compare_long_string_LU;
|
||||
static address _compare_long_string_UL;
|
||||
static address _compare_long_string_UU;
|
||||
static address _string_indexof_linear_ll;
|
||||
static address _string_indexof_linear_uu;
|
||||
static address _string_indexof_linear_ul;
|
||||
|
||||
static bool _completed;
|
||||
|
||||
public:
|
||||
|
||||
// declare getters for arch-specific entries
|
||||
static address zero_blocks() {
|
||||
return _zero_blocks;
|
||||
}
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
static address compare_long_string_LL() {
|
||||
return _compare_long_string_LL;
|
||||
}
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
static address compare_long_string_LU() {
|
||||
return _compare_long_string_LU;
|
||||
}
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
static address compare_long_string_UL() {
|
||||
return _compare_long_string_UL;
|
||||
}
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
static address compare_long_string_UU() {
|
||||
return _compare_long_string_UU;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ul() {
|
||||
return _string_indexof_linear_ul;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ll() {
|
||||
return _string_indexof_linear_ll;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_uu() {
|
||||
return _string_indexof_linear_uu;
|
||||
}
|
||||
|
||||
static bool complete() {
|
||||
return _completed;
|
||||
|
||||
@@ -157,7 +157,6 @@ class VM_Version : public Abstract_VM_Version {
|
||||
decl(ext_Zbc , "Zbc" , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_Zbs , "Zbs" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbs)) \
|
||||
decl(ext_Zcb , "Zcb" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZcb)) \
|
||||
decl(ext_Zfa , "Zfa" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfa)) \
|
||||
decl(ext_Zfh , "Zfh" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfh)) \
|
||||
decl(ext_Zfhmin , "Zfhmin" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfhmin)) \
|
||||
decl(ext_Zicsr , "Zicsr" , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
@@ -227,7 +226,6 @@ class VM_Version : public Abstract_VM_Version {
|
||||
RV_ENABLE_EXTENSION(UseZbb) \
|
||||
RV_ENABLE_EXTENSION(UseZbs) \
|
||||
RV_ENABLE_EXTENSION(UseZcb) \
|
||||
RV_ENABLE_EXTENSION(UseZfa) \
|
||||
RV_ENABLE_EXTENSION(UseZfhmin) \
|
||||
RV_ENABLE_EXTENSION(UseZic64b) \
|
||||
RV_ENABLE_EXTENSION(UseZicbom) \
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_S390_STUBDECLARATIONS_HPP
|
||||
#define CPU_S390_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 20000 ) \
|
||||
do_stub(compiler, partial_subtype_check) \
|
||||
do_arch_entry(zarch, compiler, partial_subtype_check, \
|
||||
partial_subtype_check, partial_subtype_check) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 20000) \
|
||||
|
||||
|
||||
#endif // CPU_S390_STUBDECLARATIONS_HPP
|
||||
@@ -118,8 +118,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Set up a new C frame, copy Java arguments, call frame manager
|
||||
// or native_entry, and process result.
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "call_stub");
|
||||
address start = __ pc();
|
||||
|
||||
Register r_arg_call_wrapper_addr = Z_ARG1;
|
||||
@@ -459,8 +458,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// pending exception stored in JavaThread that can be tested from
|
||||
// within the VM.
|
||||
address generate_catch_exception() {
|
||||
StubGenStubId stub_id = StubGenStubId::catch_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "catch_exception");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -511,8 +509,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (Z_R14 is unchanged and is live out).
|
||||
//
|
||||
address generate_forward_exception() {
|
||||
StubGenStubId stub_id = StubGenStubId::forward_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "forward_exception");
|
||||
address start = __ pc();
|
||||
|
||||
#define pending_exception_offset in_bytes(Thread::pending_exception_offset())
|
||||
@@ -592,8 +589,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// raddr: Z_R14, blown by call
|
||||
//
|
||||
address generate_partial_subtype_check() {
|
||||
StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
|
||||
Label miss;
|
||||
|
||||
address start = __ pc();
|
||||
@@ -625,9 +621,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
void generate_lookup_secondary_supers_table_stub() {
|
||||
StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
||||
StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table");
|
||||
|
||||
const Register
|
||||
r_super_klass = Z_ARG1,
|
||||
@@ -637,20 +632,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
r_array_base = Z_ARG5,
|
||||
r_bitmap = Z_R10,
|
||||
r_result = Z_R11;
|
||||
for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
|
||||
StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc();
|
||||
__ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass,
|
||||
r_array_base, r_array_length, r_array_index,
|
||||
r_bitmap, r_result, slot);
|
||||
address start = __ pc();
|
||||
|
||||
__ z_br(Z_R14);
|
||||
}
|
||||
__ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass,
|
||||
r_array_base, r_array_length, r_array_index,
|
||||
r_bitmap, r_result, super_klass_index);
|
||||
|
||||
__ z_br(Z_R14);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Slow path implementation for UseSecondarySupersTable.
|
||||
address generate_lookup_secondary_supers_table_slow_path_stub() {
|
||||
StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -1265,75 +1260,51 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
address generate_disjoint_nonoop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
int element_size;
|
||||
switch (stub_id) {
|
||||
case jbyte_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 1;
|
||||
break;
|
||||
case arrayof_jbyte_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 1;
|
||||
break;
|
||||
case jshort_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 2;
|
||||
break;
|
||||
case arrayof_jshort_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 2;
|
||||
break;
|
||||
case jint_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 4;
|
||||
break;
|
||||
case arrayof_jint_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 4;
|
||||
break;
|
||||
case jlong_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 8;
|
||||
break;
|
||||
case arrayof_jlong_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 8;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
// Generate stub for disjoint byte copy. If "aligned" is true, the
|
||||
// "from" and "to" addresses are assumed to be heapword aligned.
|
||||
address generate_disjoint_byte_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
|
||||
// This is the zarch specific stub generator for byte array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, element_size, false, false);
|
||||
generate_disjoint_copy(aligned, 1, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_disjoint_oop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case oop_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case arrayof_oop_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_disjoint_arraycopy_uninit_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
case arrayof_oop_disjoint_arraycopy_uninit_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address generate_disjoint_short_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for short array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 2, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_int_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for int array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 4, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_long_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for long array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 8, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for oop array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features.
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
@@ -1357,96 +1328,77 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_nonoop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
int shift; // i.e. log2(element size)
|
||||
address nooverlap_target;
|
||||
switch (stub_id) {
|
||||
case jbyte_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 0;
|
||||
nooverlap_target = StubRoutines::jbyte_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jbyte_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 0;
|
||||
nooverlap_target = StubRoutines::arrayof_jbyte_disjoint_arraycopy();
|
||||
break;
|
||||
case jshort_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 1;
|
||||
nooverlap_target = StubRoutines::jshort_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jshort_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 1;
|
||||
nooverlap_target = StubRoutines::arrayof_jshort_disjoint_arraycopy();
|
||||
break;
|
||||
case jint_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 2;
|
||||
nooverlap_target = StubRoutines::jint_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jint_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 2;
|
||||
nooverlap_target = StubRoutines::arrayof_jint_disjoint_arraycopy();
|
||||
break;
|
||||
case jlong_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 3;
|
||||
nooverlap_target = StubRoutines::jlong_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jlong_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 3;
|
||||
nooverlap_target = StubRoutines::arrayof_jlong_disjoint_arraycopy();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 1 << shift, false);
|
||||
|
||||
address generate_conjoint_byte_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping byte array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy()
|
||||
: StubRoutines::jbyte_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 1, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_oop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
bool dest_uninitialized;
|
||||
address nooverlap_target;
|
||||
switch (stub_id) {
|
||||
case oop_arraycopy_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = false;
|
||||
nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
case arrayof_oop_arraycopy_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = false;
|
||||
nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
case oop_arraycopy_uninit_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = true;
|
||||
nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
case arrayof_oop_arraycopy_uninit_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = true;
|
||||
nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address generate_conjoint_short_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping short array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy()
|
||||
: StubRoutines::jshort_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 2, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_int_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping int array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy()
|
||||
: StubRoutines::jint_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 4, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_long_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping long array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy()
|
||||
: StubRoutines::jlong_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 8, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping oop array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features.
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
unsigned int size = UseCompressedOops ? 4 : 8;
|
||||
unsigned int shift = UseCompressedOops ? 2 : 3;
|
||||
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized)
|
||||
: StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
|
||||
|
||||
// Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
|
||||
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
|
||||
|
||||
@@ -1473,33 +1425,33 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_uninit_id);
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy");
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy");
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id);
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true);
|
||||
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jbyte_arraycopy_id);
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jshort_arraycopy_id);
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jint_arraycopy_id);
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jlong_arraycopy_id);
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_id);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_uninit_id);
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy");
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy");
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jbyte_arraycopy_id);
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jshort_arraycopy_id);
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_nonoop_copy (StubGenStubId::arrayof_jint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jlong_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id);
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy");
|
||||
StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy");
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false);
|
||||
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true);
|
||||
}
|
||||
|
||||
// Call interface for AES_encryptBlock, AES_decryptBlock stubs.
|
||||
@@ -1781,10 +1733,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute AES encrypt function.
|
||||
address generate_AES_encryptBlock() {
|
||||
address generate_AES_encryptBlock(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlock(false);
|
||||
@@ -1793,10 +1744,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute AES decrypt function.
|
||||
address generate_AES_decryptBlock() {
|
||||
address generate_AES_decryptBlock(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlock(true);
|
||||
@@ -1854,10 +1804,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute chained AES encrypt function.
|
||||
address generate_cipherBlockChaining_AES_encrypt() {
|
||||
address generate_cipherBlockChaining_AES_encrypt(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlockChaining(false);
|
||||
@@ -1866,10 +1815,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute chained AES decrypt function.
|
||||
address generate_cipherBlockChaining_AES_decrypt() {
|
||||
address generate_cipherBlockChaining_AES_decrypt(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlockChaining(true);
|
||||
@@ -2573,10 +2521,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Compute AES-CTR crypto function.
|
||||
// Encrypt or decrypt is selected via parameters. Only one stub is necessary.
|
||||
address generate_counterMode_AESCrypt() {
|
||||
address generate_counterMode_AESCrypt(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_counterMode_AES(false);
|
||||
@@ -2589,8 +2536,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Compute GHASH function.
|
||||
address generate_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register state = Z_ARG1;
|
||||
@@ -2667,20 +2613,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// provides for a large enough source data buffer.
|
||||
//
|
||||
// Compute SHA-1 function.
|
||||
address generate_SHA1_stub(StubGenStubId stub_id) {
|
||||
bool multiBlock;
|
||||
switch (stub_id) {
|
||||
case sha1_implCompress_id:
|
||||
multiBlock = false;
|
||||
break;
|
||||
case sha1_implCompressMB_id:
|
||||
multiBlock = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
address generate_SHA1_stub(bool multiBlock, const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added).
|
||||
@@ -2760,20 +2695,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute SHA-256 function.
|
||||
address generate_SHA256_stub(StubGenStubId stub_id) {
|
||||
bool multiBlock;
|
||||
switch (stub_id) {
|
||||
case sha256_implCompress_id:
|
||||
multiBlock = false;
|
||||
break;
|
||||
case sha256_implCompressMB_id:
|
||||
multiBlock = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
address generate_SHA256_stub(bool multiBlock, const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register srcBuff = Z_ARG1;
|
||||
@@ -2851,20 +2775,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute SHA-512 function.
|
||||
address generate_SHA512_stub(StubGenStubId stub_id) {
|
||||
bool multiBlock;
|
||||
switch (stub_id) {
|
||||
case sha512_implCompress_id:
|
||||
multiBlock = false;
|
||||
break;
|
||||
case sha512_implCompressMB_id:
|
||||
multiBlock = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
address generate_SHA512_stub(bool multiBlock, const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register srcBuff = Z_ARG1;
|
||||
@@ -2954,7 +2867,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
* Z_RET - int crc result
|
||||
**/
|
||||
// Compute CRC function (generic, for all polynomials).
|
||||
void generate_CRC_updateBytes(Register table, bool invertCRC) {
|
||||
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
|
||||
|
||||
// arguments to kernel_crc32:
|
||||
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
|
||||
@@ -2985,19 +2898,18 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
|
||||
// Compute CRC32 function.
|
||||
address generate_CRC32_updateBytes() {
|
||||
address generate_CRC32_updateBytes(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", StubRoutines::get_stub_name(stub_id));
|
||||
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
|
||||
|
||||
BLOCK_COMMENT("CRC32_updateBytes {");
|
||||
Register table = Z_ARG4; // crc32 table address.
|
||||
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
|
||||
|
||||
generate_CRC_updateBytes(table, true);
|
||||
generate_CRC_updateBytes(name, table, true);
|
||||
BLOCK_COMMENT("} CRC32_updateBytes");
|
||||
|
||||
return __ addr_at(start_off);
|
||||
@@ -3005,19 +2917,18 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
|
||||
// Compute CRC32C function.
|
||||
address generate_CRC32C_updateBytes() {
|
||||
address generate_CRC32C_updateBytes(const char* name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", StubRoutines::get_stub_name(stub_id));
|
||||
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
|
||||
|
||||
BLOCK_COMMENT("CRC32C_updateBytes {");
|
||||
Register table = Z_ARG4; // crc32c table address.
|
||||
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
|
||||
|
||||
generate_CRC_updateBytes(table, false);
|
||||
generate_CRC_updateBytes(name, table, false);
|
||||
BLOCK_COMMENT("} CRC32C_updateBytes");
|
||||
|
||||
return __ addr_at(start_off);
|
||||
@@ -3032,8 +2943,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Z_ARG5 - z address
|
||||
address generate_multiplyToLen() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::multiplyToLen_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -3064,8 +2974,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
address generate_method_entry_barrier() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -3130,8 +3039,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// exception handler for upcall stubs
|
||||
address generate_upcall_stub_exception_handler() {
|
||||
StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
|
||||
address start = __ pc();
|
||||
|
||||
// Native caller has no idea how to handle exceptions,
|
||||
@@ -3148,8 +3056,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Z_ARG1 = jobject receiver
|
||||
// Z_method = Method* result
|
||||
address generate_upcall_stub_load_target() {
|
||||
StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target");
|
||||
address start = __ pc();
|
||||
|
||||
__ resolve_global_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2);
|
||||
@@ -3186,12 +3093,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes();
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes();
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
|
||||
}
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
@@ -3210,6 +3117,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void generate_final_stubs() {
|
||||
// Generates all stubs and initializes the entry points.
|
||||
|
||||
StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check();
|
||||
|
||||
// Support for verify_oop (must happen after universe_init).
|
||||
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
|
||||
|
||||
@@ -3222,31 +3131,19 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (UseSecondarySupersTable) {
|
||||
StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
if (!InlineSecondarySupersTest) {
|
||||
generate_lookup_secondary_supers_table_stub();
|
||||
}
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
|
||||
StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target();
|
||||
}
|
||||
|
||||
void generate_compiler_stubs() {
|
||||
|
||||
StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check();
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
// Generate AES intrinsics code.
|
||||
if (UseAESIntrinsics) {
|
||||
if (VM_Version::has_Crypto_AES()) {
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock();
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock();
|
||||
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt();
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt();
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock");
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock");
|
||||
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining");
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining");
|
||||
} else {
|
||||
// In PRODUCT builds, the function pointers will keep their initial (null) value.
|
||||
// LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called.
|
||||
@@ -3256,7 +3153,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
if (VM_Version::has_Crypto_AES_CTR()) {
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt();
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt("counterMode_AESCrypt");
|
||||
} else {
|
||||
// In PRODUCT builds, the function pointers will keep their initial (null) value.
|
||||
// LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called.
|
||||
@@ -3271,16 +3168,16 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Generate SHA1/SHA256/SHA512 intrinsics code.
|
||||
if (UseSHA1Intrinsics) {
|
||||
StubRoutines::_sha1_implCompress = generate_SHA1_stub(StubGenStubId::sha1_implCompress_id);
|
||||
StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(StubGenStubId::sha1_implCompressMB_id);
|
||||
StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock");
|
||||
StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock");
|
||||
}
|
||||
if (UseSHA256Intrinsics) {
|
||||
StubRoutines::_sha256_implCompress = generate_SHA256_stub(StubGenStubId::sha256_implCompress_id);
|
||||
StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(StubGenStubId::sha256_implCompressMB_id);
|
||||
StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock");
|
||||
StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock");
|
||||
}
|
||||
if (UseSHA512Intrinsics) {
|
||||
StubRoutines::_sha512_implCompress = generate_SHA512_stub(StubGenStubId::sha512_implCompress_id);
|
||||
StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(StubGenStubId::sha512_implCompressMB_id);
|
||||
StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock");
|
||||
StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock");
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
@@ -3295,27 +3192,35 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_montgomerySquare
|
||||
= CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
|
||||
}
|
||||
if (UseSecondarySupersTable) {
|
||||
StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
if (!InlineSecondarySupersTest) {
|
||||
for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
|
||||
StubRoutines::_lookup_secondary_supers_table_stubs[slot] = generate_lookup_secondary_supers_table_stub(slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
}
|
||||
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case initial_id:
|
||||
StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) {
|
||||
switch(kind) {
|
||||
case Initial_stubs:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
case continuation_id:
|
||||
case Continuation_stubs:
|
||||
generate_continuation_stubs();
|
||||
break;
|
||||
case compiler_id:
|
||||
case Compiler_stubs:
|
||||
generate_compiler_stubs();
|
||||
break;
|
||||
case final_id:
|
||||
case Final_stubs:
|
||||
generate_final_stubs();
|
||||
break;
|
||||
default:
|
||||
fatal("unexpected blob id: %d", blob_id);
|
||||
fatal("unexpected stubs kind: %d", kind);
|
||||
break;
|
||||
};
|
||||
}
|
||||
@@ -3354,6 +3259,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
};
|
||||
|
||||
void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) {
|
||||
StubGenerator g(code, blob_id);
|
||||
void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) {
|
||||
StubGenerator g(code, kind);
|
||||
}
|
||||
|
||||
@@ -32,18 +32,7 @@
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
// define fields for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
address StubRoutines::zarch::_partial_subtype_check = nullptr;
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
address StubRoutines::zarch::_trot_table_addr = nullptr;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -31,17 +31,14 @@
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
enum { // Platform dependent constants.
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 20000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 20000,
|
||||
_final_stubs_code_size = 20000
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
// MethodHandles adapters
|
||||
enum method_handles_platform_dependent_constants {
|
||||
method_handles_adapters_code_size = 5000
|
||||
@@ -72,24 +69,10 @@ class zarch {
|
||||
locked = 1
|
||||
};
|
||||
|
||||
// declare fields for arch-specific entries
|
||||
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
|
||||
private:
|
||||
|
||||
static int _atomic_memory_operation_lock;
|
||||
|
||||
static address _partial_subtype_check;
|
||||
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
|
||||
@@ -98,20 +81,6 @@ private:
|
||||
static jlong _trot_table[TROT_COLUMN_SIZE];
|
||||
|
||||
public:
|
||||
|
||||
// declare getters for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
|
||||
// Global lock for everyone who needs to use atomic_compare_and_exchange
|
||||
// or atomic_increment -- should probably use more locks for more
|
||||
// scalability -- for instance one for each eden space or group of.
|
||||
@@ -123,6 +92,8 @@ private:
|
||||
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
|
||||
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
|
||||
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
|
||||
static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
|
||||
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
|
||||
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
|
||||
|
||||
@@ -199,14 +199,13 @@ void StubGenerator::generate_string_indexof(address *fnptrs) {
|
||||
|
||||
static void generate_string_indexof_stubs(StubGenerator *stubgen, address *fnptrs,
|
||||
StrIntrinsicNode::ArgEncoding ae, MacroAssembler *_masm) {
|
||||
StubCodeMark mark(stubgen, "StubRoutines", "stringIndexOf");
|
||||
bool isLL = (ae == StrIntrinsicNode::LL);
|
||||
bool isUL = (ae == StrIntrinsicNode::UL);
|
||||
bool isUU = (ae == StrIntrinsicNode::UU);
|
||||
bool isU = isUL || isUU; // At least one is UTF-16
|
||||
assert(isLL || isUL || isUU, "Encoding not recognized");
|
||||
|
||||
StubGenStubId stub_id = (isLL ? StubGenStubId::string_indexof_linear_ll_id : (isUL ? StubGenStubId::string_indexof_linear_ul_id : StubGenStubId::string_indexof_linear_uu_id));
|
||||
StubCodeMark mark(stubgen, stub_id);
|
||||
// Keep track of isUL since we need to generate UU code in the main body
|
||||
// for the case where we expand the needle from bytes to words on the stack.
|
||||
// This is done at L_wcharBegin. The algorithm used is:
|
||||
|
||||
@@ -9136,14 +9136,14 @@ void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Registe
|
||||
Label L_exit;
|
||||
|
||||
if (is_pclmulqdq_supported ) {
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1);
|
||||
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
|
||||
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
|
||||
assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
|
||||
} else {
|
||||
const_or_pre_comp_const_index[0] = 1;
|
||||
@@ -9216,14 +9216,14 @@ void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Registe
|
||||
Label L_exit;
|
||||
|
||||
if (is_pclmulqdq_supported) {
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1);
|
||||
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
|
||||
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
|
||||
} else {
|
||||
const_or_pre_comp_const_index[0] = 1;
|
||||
const_or_pre_comp_const_index[1] = 0;
|
||||
|
||||
@@ -1,262 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_STUBDECLARATIONS_HPP
|
||||
#define CPU_X86_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
|
||||
do_stub(initial, verify_mxcsr) \
|
||||
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
|
||||
verify_mxcsr_entry) \
|
||||
LP64_ONLY( \
|
||||
do_stub(initial, get_previous_sp) \
|
||||
do_arch_entry(x86, initial, get_previous_sp, \
|
||||
get_previous_sp_entry, \
|
||||
get_previous_sp_entry) \
|
||||
do_stub(initial, f2i_fixup) \
|
||||
do_arch_entry(x86, initial, f2i_fixup, f2i_fixup, f2i_fixup) \
|
||||
do_stub(initial, f2l_fixup) \
|
||||
do_arch_entry(x86, initial, f2l_fixup, f2l_fixup, f2l_fixup) \
|
||||
do_stub(initial, d2i_fixup) \
|
||||
do_arch_entry(x86, initial, d2i_fixup, d2i_fixup, d2i_fixup) \
|
||||
do_stub(initial, d2l_fixup) \
|
||||
do_arch_entry(x86, initial, d2l_fixup, d2l_fixup, d2l_fixup) \
|
||||
do_stub(initial, float_sign_mask) \
|
||||
do_arch_entry(x86, initial, float_sign_mask, float_sign_mask, \
|
||||
float_sign_mask) \
|
||||
do_stub(initial, float_sign_flip) \
|
||||
do_arch_entry(x86, initial, float_sign_flip, float_sign_flip, \
|
||||
float_sign_flip) \
|
||||
do_stub(initial, double_sign_mask) \
|
||||
do_arch_entry(x86, initial, double_sign_mask, double_sign_mask, \
|
||||
double_sign_mask) \
|
||||
do_stub(initial, double_sign_flip) \
|
||||
do_arch_entry(x86, initial, double_sign_flip, double_sign_flip, \
|
||||
double_sign_flip) \
|
||||
) \
|
||||
NOT_LP64( \
|
||||
do_stub(initial, verify_fpu_cntrl_word) \
|
||||
do_arch_entry(x86, initial, verify_fpu_cntrl_word, \
|
||||
verify_fpu_cntrl_wrd_entry, \
|
||||
verify_fpu_cntrl_wrd_entry) \
|
||||
do_stub(initial, d2i_wrapper) \
|
||||
do_arch_entry(x86, initial, d2i_wrapper, d2i_wrapper, \
|
||||
d2i_wrapper) \
|
||||
do_stub(initial, d2l_wrapper) \
|
||||
do_arch_entry(x86, initial, d2l_wrapper, d2l_wrapper, \
|
||||
d2l_wrapper) \
|
||||
) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 1000 LP64_ONLY(+2000)) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 20000 LP64_ONLY(+60000) WINDOWS_ONLY(+2000)) \
|
||||
do_stub(compiler, vector_float_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_float_sign_mask, \
|
||||
vector_float_sign_mask, vector_float_sign_mask) \
|
||||
do_stub(compiler, vector_float_sign_flip) \
|
||||
do_arch_entry(x86, compiler, vector_float_sign_flip, \
|
||||
vector_float_sign_flip, vector_float_sign_flip) \
|
||||
do_stub(compiler, vector_double_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_double_sign_mask, \
|
||||
vector_double_sign_mask, vector_double_sign_mask) \
|
||||
do_stub(compiler, vector_double_sign_flip) \
|
||||
do_arch_entry(x86, compiler, vector_double_sign_flip, \
|
||||
vector_double_sign_flip, vector_double_sign_flip) \
|
||||
do_stub(compiler, vector_all_bits_set) \
|
||||
do_arch_entry(x86, compiler, vector_all_bits_set, \
|
||||
vector_all_bits_set, vector_all_bits_set) \
|
||||
do_stub(compiler, vector_int_mask_cmp_bits) \
|
||||
do_arch_entry(x86, compiler, vector_int_mask_cmp_bits, \
|
||||
vector_int_mask_cmp_bits, vector_int_mask_cmp_bits) \
|
||||
do_stub(compiler, vector_short_to_byte_mask) \
|
||||
do_arch_entry(x86, compiler, vector_short_to_byte_mask, \
|
||||
vector_short_to_byte_mask, vector_short_to_byte_mask) \
|
||||
do_stub(compiler, vector_byte_perm_mask) \
|
||||
do_arch_entry(x86, compiler,vector_byte_perm_mask, \
|
||||
vector_byte_perm_mask, vector_byte_perm_mask) \
|
||||
do_stub(compiler, vector_int_to_byte_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_to_byte_mask, \
|
||||
vector_int_to_byte_mask, vector_int_to_byte_mask) \
|
||||
do_stub(compiler, vector_int_to_short_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_to_short_mask, \
|
||||
vector_int_to_short_mask, vector_int_to_short_mask) \
|
||||
do_stub(compiler, vector_32_bit_mask) \
|
||||
do_arch_entry(x86, compiler, vector_32_bit_mask, \
|
||||
vector_32_bit_mask, vector_32_bit_mask) \
|
||||
do_stub(compiler, vector_64_bit_mask) \
|
||||
do_arch_entry(x86, compiler, vector_64_bit_mask, \
|
||||
vector_64_bit_mask, vector_64_bit_mask) \
|
||||
do_stub(compiler, vector_byte_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_shuffle_mask, \
|
||||
vector_byte_shuffle_mask, vector_byte_shuffle_mask) \
|
||||
do_stub(compiler, vector_short_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_shuffle_mask, \
|
||||
vector_short_shuffle_mask, vector_short_shuffle_mask) \
|
||||
do_stub(compiler, vector_int_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_shuffle_mask, \
|
||||
vector_int_shuffle_mask, vector_int_shuffle_mask) \
|
||||
do_stub(compiler, vector_long_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_long_shuffle_mask, \
|
||||
vector_long_shuffle_mask, vector_long_shuffle_mask) \
|
||||
do_stub(compiler, vector_long_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_long_sign_mask, \
|
||||
vector_long_sign_mask, vector_long_sign_mask) \
|
||||
do_stub(compiler, vector_iota_indices) \
|
||||
do_arch_entry(x86, compiler, vector_iota_indices, \
|
||||
vector_iota_indices, vector_iota_indices) \
|
||||
do_stub(compiler, vector_count_leading_zeros_lut) \
|
||||
do_arch_entry(x86, compiler, vector_count_leading_zeros_lut, \
|
||||
vector_count_leading_zeros_lut, \
|
||||
vector_count_leading_zeros_lut) \
|
||||
do_stub(compiler, vector_reverse_bit_lut) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_bit_lut, \
|
||||
vector_reverse_bit_lut, vector_reverse_bit_lut) \
|
||||
do_stub(compiler, vector_reverse_byte_perm_mask_short) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_short, \
|
||||
vector_reverse_byte_perm_mask_short, \
|
||||
vector_reverse_byte_perm_mask_short) \
|
||||
do_stub(compiler, vector_reverse_byte_perm_mask_int) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_int, \
|
||||
vector_reverse_byte_perm_mask_int, \
|
||||
vector_reverse_byte_perm_mask_int) \
|
||||
do_stub(compiler, vector_reverse_byte_perm_mask_long) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_long, \
|
||||
vector_reverse_byte_perm_mask_long, \
|
||||
vector_reverse_byte_perm_mask_long) \
|
||||
do_stub(compiler, vector_popcount_lut) \
|
||||
do_arch_entry(x86, compiler, vector_popcount_lut, \
|
||||
vector_popcount_lut, vector_popcount_lut) \
|
||||
do_stub(compiler, upper_word_mask) \
|
||||
do_arch_entry(x86, compiler, upper_word_mask, upper_word_mask_addr, \
|
||||
upper_word_mask_addr) \
|
||||
do_stub(compiler, shuffle_byte_flip_mask) \
|
||||
do_arch_entry(x86, compiler, shuffle_byte_flip_mask, \
|
||||
shuffle_byte_flip_mask_addr, \
|
||||
shuffle_byte_flip_mask_addr) \
|
||||
do_stub(compiler, pshuffle_byte_flip_mask) \
|
||||
do_arch_entry(x86, compiler, pshuffle_byte_flip_mask, \
|
||||
pshuffle_byte_flip_mask_addr, \
|
||||
pshuffle_byte_flip_mask_addr) \
|
||||
LP64_ONLY( \
|
||||
/* x86_64 exposes these 3 stubs via a generic entry array */ \
|
||||
/* oher arches use arch-specific entries */ \
|
||||
/* this really needs rationalising */ \
|
||||
do_stub(compiler, string_indexof_linear_ll) \
|
||||
do_stub(compiler, string_indexof_linear_uu) \
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_stub(compiler, pshuffle_byte_flip_mask_sha512) \
|
||||
do_arch_entry(x86, compiler, pshuffle_byte_flip_mask_sha512, \
|
||||
pshuffle_byte_flip_mask_addr_sha512, \
|
||||
pshuffle_byte_flip_mask_addr_sha512) \
|
||||
do_stub(compiler, compress_perm_table32) \
|
||||
do_arch_entry(x86, compiler, compress_perm_table32, \
|
||||
compress_perm_table32, compress_perm_table32) \
|
||||
do_stub(compiler, compress_perm_table64) \
|
||||
do_arch_entry(x86, compiler, compress_perm_table64, \
|
||||
compress_perm_table64, compress_perm_table64) \
|
||||
do_stub(compiler, expand_perm_table32) \
|
||||
do_arch_entry(x86, compiler, expand_perm_table32, \
|
||||
expand_perm_table32, expand_perm_table32) \
|
||||
do_stub(compiler, expand_perm_table64) \
|
||||
do_arch_entry(x86, compiler, expand_perm_table64, \
|
||||
expand_perm_table64, expand_perm_table64) \
|
||||
do_stub(compiler, avx2_shuffle_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_shuffle_base64, \
|
||||
avx2_shuffle_base64, base64_avx2_shuffle_addr) \
|
||||
do_stub(compiler, avx2_input_mask_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_input_mask_base64, \
|
||||
avx2_input_mask_base64, \
|
||||
base64_avx2_input_mask_addr) \
|
||||
do_stub(compiler, avx2_lut_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_lut_base64, \
|
||||
avx2_lut_base64, base64_avx2_lut_addr) \
|
||||
do_stub(compiler, avx2_decode_tables_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_decode_tables_base64, \
|
||||
avx2_decode_tables_base64, \
|
||||
base64_AVX2_decode_tables_addr) \
|
||||
do_stub(compiler, avx2_decode_lut_tables_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_decode_lut_tables_base64, \
|
||||
avx2_decode_lut_tables_base64, \
|
||||
base64_AVX2_decode_LUT_tables_addr) \
|
||||
do_stub(compiler, shuffle_base64) \
|
||||
do_arch_entry(x86, compiler, shuffle_base64, shuffle_base64, \
|
||||
base64_shuffle_addr) \
|
||||
do_stub(compiler, lookup_lo_base64) \
|
||||
do_arch_entry(x86, compiler, lookup_lo_base64, lookup_lo_base64, \
|
||||
base64_vbmi_lookup_lo_addr) \
|
||||
do_stub(compiler, lookup_hi_base64) \
|
||||
do_arch_entry(x86, compiler, lookup_hi_base64, lookup_hi_base64, \
|
||||
base64_vbmi_lookup_hi_addr) \
|
||||
do_stub(compiler, lookup_lo_base64url) \
|
||||
do_arch_entry(x86, compiler, lookup_lo_base64url, \
|
||||
lookup_lo_base64url, \
|
||||
base64_vbmi_lookup_lo_url_addr) \
|
||||
do_stub(compiler, lookup_hi_base64url) \
|
||||
do_arch_entry(x86, compiler, lookup_hi_base64url, \
|
||||
lookup_hi_base64url, \
|
||||
base64_vbmi_lookup_hi_url_addr) \
|
||||
do_stub(compiler, pack_vec_base64) \
|
||||
do_arch_entry(x86, compiler, pack_vec_base64, pack_vec_base64, \
|
||||
base64_vbmi_pack_vec_addr) \
|
||||
do_stub(compiler, join_0_1_base64) \
|
||||
do_arch_entry(x86, compiler, join_0_1_base64, join_0_1_base64, \
|
||||
base64_vbmi_join_0_1_addr) \
|
||||
do_stub(compiler, join_1_2_base64) \
|
||||
do_arch_entry(x86, compiler, join_1_2_base64, join_1_2_base64, \
|
||||
base64_vbmi_join_1_2_addr) \
|
||||
do_stub(compiler, join_2_3_base64) \
|
||||
do_arch_entry(x86, compiler, join_2_3_base64, join_2_3_base64, \
|
||||
base64_vbmi_join_2_3_addr) \
|
||||
do_stub(compiler, encoding_table_base64) \
|
||||
do_arch_entry(x86, compiler, encoding_table_base64, \
|
||||
encoding_table_base64, base64_encoding_table_addr) \
|
||||
do_stub(compiler, decoding_table_base64) \
|
||||
do_arch_entry(x86, compiler, decoding_table_base64, \
|
||||
decoding_table_base64, base64_decoding_table_addr) \
|
||||
) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 11000 LP64_ONLY(+20000) \
|
||||
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
|
||||
|
||||
#endif // CPU_X86_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -28,7 +28,6 @@
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
// Stub Code definitions
|
||||
|
||||
@@ -88,29 +87,29 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_d2i_fixup();
|
||||
address generate_d2l_fixup();
|
||||
|
||||
address generate_count_leading_zeros_lut();
|
||||
address generate_popcount_avx_lut();
|
||||
address generate_iota_indices();
|
||||
address generate_vector_reverse_bit_lut();
|
||||
address generate_count_leading_zeros_lut(const char *stub_name);
|
||||
address generate_popcount_avx_lut(const char *stub_name);
|
||||
address generate_iota_indices(const char *stub_name);
|
||||
address generate_vector_reverse_bit_lut(const char *stub_name);
|
||||
|
||||
address generate_vector_reverse_byte_perm_mask_long();
|
||||
address generate_vector_reverse_byte_perm_mask_int();
|
||||
address generate_vector_reverse_byte_perm_mask_short();
|
||||
address generate_vector_byte_shuffle_mask();
|
||||
address generate_vector_reverse_byte_perm_mask_long(const char *stub_name);
|
||||
address generate_vector_reverse_byte_perm_mask_int(const char *stub_name);
|
||||
address generate_vector_reverse_byte_perm_mask_short(const char *stub_name);
|
||||
address generate_vector_byte_shuffle_mask(const char *stub_name);
|
||||
|
||||
address generate_fp_mask(StubGenStubId stub_id, int64_t mask);
|
||||
address generate_fp_mask(const char *stub_name, int64_t mask);
|
||||
|
||||
address generate_compress_perm_table(StubGenStubId stub_id);
|
||||
address generate_compress_perm_table(const char *stub_name, int32_t esize);
|
||||
|
||||
address generate_expand_perm_table(StubGenStubId stub_id);
|
||||
address generate_expand_perm_table(const char *stub_name, int32_t esize);
|
||||
|
||||
address generate_vector_mask(StubGenStubId stub_id, int64_t mask);
|
||||
address generate_vector_mask(const char *stub_name, int64_t mask);
|
||||
|
||||
address generate_vector_byte_perm_mask();
|
||||
address generate_vector_byte_perm_mask(const char *stub_name);
|
||||
|
||||
address generate_vector_fp_mask(StubGenStubId stub_id, int64_t mask);
|
||||
address generate_vector_fp_mask(const char *stub_name, int64_t mask);
|
||||
|
||||
address generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len,
|
||||
address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len,
|
||||
int32_t val0, int32_t val1, int32_t val2, int32_t val3,
|
||||
int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0,
|
||||
int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0,
|
||||
@@ -180,10 +179,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over
|
||||
// 64 byte vector registers (ZMMs).
|
||||
|
||||
address generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry);
|
||||
address generate_disjoint_copy_avx3_masked(address* entry, const char *name, int shift,
|
||||
bool aligned, bool is_oop, bool dest_uninitialized);
|
||||
|
||||
address generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry,
|
||||
address nooverlap_target);
|
||||
address generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift,
|
||||
address nooverlap_target, bool aligned, bool is_oop,
|
||||
bool dest_uninitialized);
|
||||
|
||||
void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from,
|
||||
Register to, Register count, int shift,
|
||||
@@ -224,21 +225,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register temp, int shift = Address::times_1, int offset = 0);
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
|
||||
address generate_disjoint_byte_copy(address* entry);
|
||||
address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name);
|
||||
|
||||
address generate_conjoint_byte_copy(address nooverlap_target, address* entry);
|
||||
address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
|
||||
address* entry, const char *name);
|
||||
|
||||
address generate_disjoint_short_copy(address *entry);
|
||||
address generate_disjoint_short_copy(bool aligned, address *entry, const char *name);
|
||||
|
||||
address generate_fill(StubGenStubId stub_id);
|
||||
address generate_fill(BasicType t, bool aligned, const char *name);
|
||||
|
||||
address generate_conjoint_short_copy(address nooverlap_target, address *entry);
|
||||
address generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry);
|
||||
address generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target,
|
||||
address *entry);
|
||||
address generate_disjoint_long_oop_copy(StubGenStubId stub_id, address* entry);
|
||||
address generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target,
|
||||
address *entry);
|
||||
address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
|
||||
address *entry, const char *name);
|
||||
address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
|
||||
const char *name, bool dest_uninitialized = false);
|
||||
address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
|
||||
address *entry, const char *name,
|
||||
bool dest_uninitialized = false);
|
||||
address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
|
||||
const char *name, bool dest_uninitialized = false);
|
||||
address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
|
||||
address nooverlap_target, address *entry,
|
||||
const char *name, bool dest_uninitialized = false);
|
||||
|
||||
// Helper for generating a dynamic type check.
|
||||
// Smashes no registers.
|
||||
@@ -248,7 +255,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_success);
|
||||
|
||||
// Generate checkcasting array copy stub
|
||||
address generate_checkcast_copy(StubGenStubId stub_id, address *entry);
|
||||
address generate_checkcast_copy(const char *name, address *entry,
|
||||
bool dest_uninitialized = false);
|
||||
|
||||
// Generate 'unsafe' array copy stub
|
||||
// Though just as safe as the other stubs, it takes an unscaled
|
||||
@@ -256,7 +264,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to a long, int, short, or byte copy loop.
|
||||
address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address generate_unsafe_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address long_copy_entry);
|
||||
|
||||
// Generate 'unsafe' set memory stub
|
||||
@@ -265,7 +274,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to an int, short, or byte copy loop.
|
||||
address generate_unsafe_setmemory(address byte_copy_entry);
|
||||
address generate_unsafe_setmemory(const char *name, address byte_copy_entry);
|
||||
|
||||
// Perform range checks on the proposed arraycopy.
|
||||
// Kills temp, but nothing else.
|
||||
@@ -279,7 +288,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_failed);
|
||||
|
||||
// Generate generic array copy stubs
|
||||
address generate_generic_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address generate_generic_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address oop_copy_entry,
|
||||
address long_copy_entry, address checkcast_copy_entry);
|
||||
|
||||
@@ -294,19 +304,19 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs)
|
||||
address generate_md5_implCompress(StubGenStubId stub_id);
|
||||
address generate_md5_implCompress(bool multi_block, const char *name);
|
||||
|
||||
|
||||
// SHA stubs
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
|
||||
address generate_sha1_implCompress(StubGenStubId stub_id);
|
||||
address generate_sha1_implCompress(bool multi_block, const char *name);
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
|
||||
address generate_sha256_implCompress(StubGenStubId stub_id);
|
||||
address generate_sha512_implCompress(StubGenStubId stub_id);
|
||||
address generate_sha256_implCompress(bool multi_block, const char *name);
|
||||
address generate_sha512_implCompress(bool multi_block, const char *name);
|
||||
|
||||
// Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
|
||||
address generate_pshuffle_byte_flip_mask_sha512();
|
||||
@@ -489,7 +499,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// SHA3 stubs
|
||||
void generate_sha3_stubs();
|
||||
address generate_sha3_implCompress(StubGenStubId stub_id);
|
||||
address generate_sha3_implCompress(bool multiBlock, const char *name);
|
||||
|
||||
// BASE64 stubs
|
||||
|
||||
@@ -585,7 +595,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void generate_string_indexof(address *fnptrs);
|
||||
#endif
|
||||
|
||||
address generate_cont_thaw(StubGenStubId stub_id);
|
||||
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind);
|
||||
address generate_cont_thaw();
|
||||
|
||||
// TODO: will probably need multiple return barriers depending on return type
|
||||
@@ -594,8 +604,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
address generate_cont_preempt_stub();
|
||||
|
||||
// TODO -- delete this as it is not implemented?
|
||||
//
|
||||
// Continuation point for throwing of implicit exceptions that are
|
||||
// not handled in the current activation. Fabricates an exception
|
||||
// oop and initiates normal exception dispatching in this
|
||||
@@ -621,7 +629,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_upcall_stub_load_target();
|
||||
|
||||
// Specialized stub implementations for UseSecondarySupersTable.
|
||||
void generate_lookup_secondary_supers_table_stub();
|
||||
address generate_lookup_secondary_supers_table_stub(u1 super_klass_index);
|
||||
|
||||
// Slow path implementation for UseSecondarySupersTable.
|
||||
address generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
@@ -634,8 +642,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void generate_compiler_stubs();
|
||||
void generate_final_stubs();
|
||||
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id);
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubsKind kind);
|
||||
};
|
||||
|
||||
#endif // CPU_X86_STUBGENERATOR_X86_64_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2021, 2023, Intel Corporation. All rights reserved.
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -66,8 +66,7 @@ address StubGenerator::generate_updateBytesAdler32() {
|
||||
assert(UseAdler32Intrinsics, "");
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
|
||||
address start = __ pc();
|
||||
|
||||
// Choose an appropriate LIMIT for inner loop based on the granularity
|
||||
|
||||
@@ -249,8 +249,7 @@ void StubGenerator::generate_aes_stubs() {
|
||||
// rax - number of processed bytes
|
||||
address StubGenerator::generate_galoisCounterMode_AESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register in = c_rarg0;
|
||||
@@ -336,8 +335,7 @@ address StubGenerator::generate_galoisCounterMode_AESCrypt() {
|
||||
// rax - number of processed bytes
|
||||
address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register in = c_rarg0;
|
||||
@@ -408,8 +406,7 @@ address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() {
|
||||
// Vector AES Counter implementation
|
||||
address StubGenerator::generate_counterMode_VectorAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -497,8 +494,7 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() {
|
||||
address StubGenerator::generate_counterMode_AESCrypt_Parallel() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -785,8 +781,7 @@ address StubGenerator::generate_counterMode_AESCrypt_Parallel() {
|
||||
address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() {
|
||||
assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -1068,8 +1063,7 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() {
|
||||
address StubGenerator::generate_aescrypt_encryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
|
||||
Label L_doLast;
|
||||
address start = __ pc();
|
||||
|
||||
@@ -1163,8 +1157,7 @@ address StubGenerator::generate_aescrypt_encryptBlock() {
|
||||
address StubGenerator::generate_aescrypt_decryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
|
||||
Label L_doLast;
|
||||
address start = __ pc();
|
||||
|
||||
@@ -1265,8 +1258,7 @@ address StubGenerator::generate_aescrypt_decryptBlock() {
|
||||
address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
|
||||
@@ -1417,8 +1409,7 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -1660,8 +1651,7 @@ __ opc(xmm_result3, src_reg); \
|
||||
|
||||
address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::electronicCodeBook_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -1681,8 +1671,7 @@ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() {
|
||||
|
||||
address StubGenerator::generate_electronicCodeBook_decryptAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::electronicCodeBook_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt");
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
|
||||
@@ -84,51 +84,74 @@ void StubGenerator::generate_arraycopy_stubs() {
|
||||
address entry_jlong_arraycopy;
|
||||
address entry_checkcast_arraycopy;
|
||||
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(&entry);
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(entry, &entry_jbyte_arraycopy);
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
|
||||
"jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
|
||||
"jbyte_arraycopy");
|
||||
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(&entry);
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(entry, &entry_jshort_arraycopy);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
|
||||
"jshort_disjoint_arraycopy");
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
|
||||
"jshort_arraycopy");
|
||||
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry,
|
||||
"jint_disjoint_arraycopy");
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry,
|
||||
&entry_jint_arraycopy, "jint_arraycopy");
|
||||
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::jlong_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::jlong_arraycopy_id, entry, &entry_jlong_arraycopy);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry,
|
||||
"jlong_disjoint_arraycopy");
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry,
|
||||
&entry_jlong_arraycopy, "jlong_arraycopy");
|
||||
if (UseCompressedOops) {
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry,
|
||||
&entry_oop_arraycopy, "oop_arraycopy");
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
|
||||
nullptr, "oop_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
} else {
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry,
|
||||
&entry_oop_arraycopy, "oop_arraycopy");
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
|
||||
nullptr, "oop_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
}
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy);
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr);
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
|
||||
/*dest_uninitialized*/true);
|
||||
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy,
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
|
||||
entry_jbyte_arraycopy,
|
||||
entry_jshort_arraycopy,
|
||||
entry_jint_arraycopy,
|
||||
entry_jlong_arraycopy);
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy(entry_jbyte_arraycopy,
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
|
||||
entry_jbyte_arraycopy,
|
||||
entry_jshort_arraycopy,
|
||||
entry_jint_arraycopy,
|
||||
entry_oop_arraycopy,
|
||||
entry_jlong_arraycopy,
|
||||
entry_checkcast_arraycopy);
|
||||
|
||||
StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id);
|
||||
StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id);
|
||||
StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id);
|
||||
StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id);
|
||||
StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id);
|
||||
StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id);
|
||||
StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
|
||||
StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
|
||||
StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
|
||||
StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
|
||||
StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
|
||||
StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
|
||||
|
||||
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory(StubRoutines::_jbyte_fill);
|
||||
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory("unsafe_setmemory", StubRoutines::_jbyte_fill);
|
||||
|
||||
// We don't generate specialized code for HeapWord-aligned source
|
||||
// arrays, so just use the code we've already generated
|
||||
@@ -484,50 +507,11 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest,
|
||||
// disjoint_copy_avx3_masked is set to the no-overlap entry point
|
||||
// used by generate_conjoint_[byte/int/short/long]_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
int shift;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_disjoint_arraycopy_id:
|
||||
shift = 0;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jshort_disjoint_arraycopy_id:
|
||||
shift = 1;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jint_disjoint_arraycopy_id:
|
||||
shift = 2;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jlong_disjoint_arraycopy_id:
|
||||
shift = 3;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_disjoint_arraycopy_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_disjoint_arraycopy_uninit_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_disjoint_copy_avx3_masked(address* entry, const char *name,
|
||||
int shift, bool aligned, bool is_oop,
|
||||
bool dest_uninitialized) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
int avx3threshold = VM_Version::avx3_threshold();
|
||||
@@ -822,50 +806,11 @@ void StubGenerator::arraycopy_avx3_large(Register to, Register from, Register te
|
||||
// c_rarg2 - element count, treated as ssize_t, can be zero
|
||||
//
|
||||
//
|
||||
address StubGenerator::generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry, address nooverlap_target) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
int shift;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_arraycopy_id:
|
||||
shift = 0;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jshort_arraycopy_id:
|
||||
shift = 1;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jint_arraycopy_id:
|
||||
shift = 2;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jlong_arraycopy_id:
|
||||
shift = 3;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_arraycopy_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_arraycopy_uninit_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift,
|
||||
address nooverlap_target, bool aligned,
|
||||
bool is_oop, bool dest_uninitialized) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
int avx3threshold = VM_Version::avx3_threshold();
|
||||
@@ -1317,7 +1262,9 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1330,20 +1277,18 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe
|
||||
// and stored atomically.
|
||||
//
|
||||
// Side Effects:
|
||||
// entry is set to the no-overlap entry point
|
||||
// disjoint_byte_copy_entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_byte_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_byte_copy(address* entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jbyte_disjoint_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
address StubGenerator::generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jbyte_disjoint_arraycopy_avx3", 0,
|
||||
aligned, false, false);
|
||||
}
|
||||
#endif
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
|
||||
|
||||
@@ -1438,8 +1383,9 @@ __ BIND(L_exit);
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1451,17 +1397,16 @@ __ BIND(L_exit);
|
||||
// dwords or qwords that span cache line boundaries will still be loaded
|
||||
// and stored atomically.
|
||||
//
|
||||
address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, address* entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jbyte_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
|
||||
address* entry, const char *name) {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jbyte_conjoint_arraycopy_avx3", 0,
|
||||
nooverlap_target, aligned, false, false);
|
||||
}
|
||||
#endif
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
|
||||
|
||||
@@ -1548,7 +1493,9 @@ address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, add
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1561,21 +1508,19 @@ address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, add
|
||||
// and stored atomically.
|
||||
//
|
||||
// Side Effects:
|
||||
// entry is set to the no-overlap entry point
|
||||
// disjoint_short_copy_entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_short_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_short_copy(address *entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jshort_disjoint_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
address StubGenerator::generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jshort_disjoint_arraycopy_avx3", 1,
|
||||
aligned, false, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
|
||||
|
||||
@@ -1662,41 +1607,9 @@ __ BIND(L_exit);
|
||||
}
|
||||
|
||||
|
||||
address StubGenerator::generate_fill(StubGenStubId stub_id) {
|
||||
BasicType t;
|
||||
bool aligned;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_fill_id:
|
||||
t = T_BYTE;
|
||||
aligned = false;
|
||||
break;
|
||||
case jshort_fill_id:
|
||||
t = T_SHORT;
|
||||
aligned = false;
|
||||
break;
|
||||
case jint_fill_id:
|
||||
t = T_INT;
|
||||
aligned = false;
|
||||
break;
|
||||
case arrayof_jbyte_fill_id:
|
||||
t = T_BYTE;
|
||||
aligned = true;
|
||||
break;
|
||||
case arrayof_jshort_fill_id:
|
||||
t = T_SHORT;
|
||||
aligned = true;
|
||||
break;
|
||||
case arrayof_jint_fill_id:
|
||||
t = T_INT;
|
||||
aligned = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
@@ -1723,8 +1636,9 @@ address StubGenerator::generate_fill(StubGenStubId stub_id) {
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1736,18 +1650,16 @@ address StubGenerator::generate_fill(StubGenStubId stub_id) {
|
||||
// or qwords that span cache line boundaries will still be loaded
|
||||
// and stored atomically.
|
||||
//
|
||||
address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, address *entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jshort_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
address StubGenerator::generate_conjoint_short_copy(bool aligned, address nooverlap_target,
|
||||
address *entry, const char *name) {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jshort_conjoint_arraycopy_avx3", 1,
|
||||
nooverlap_target, aligned, false, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
|
||||
|
||||
@@ -1826,9 +1738,10 @@ address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, ad
|
||||
|
||||
|
||||
// Arguments:
|
||||
// stub_id - unqiue id for stub to generate
|
||||
// entry - location for return of (post-push) entry
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1843,39 +1756,18 @@ address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, ad
|
||||
// disjoint_int_copy_entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_int_oop_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jint_disjoint_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_uninit_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
|
||||
const char *name, bool dest_uninitialized) {
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jint_disjoint_arraycopy_avx3", 2,
|
||||
aligned, is_oop, dest_uninitialized);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
|
||||
@@ -1961,9 +1853,10 @@ __ BIND(L_exit);
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1974,39 +1867,18 @@ __ BIND(L_exit);
|
||||
// the hardware handle it. The two dwords within qwords that span
|
||||
// cache line boundaries will still be loaded and stored atomically.
|
||||
//
|
||||
address StubGenerator::generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jint_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_uninit_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
|
||||
address *entry, const char *name,
|
||||
bool dest_uninitialized) {
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jint_conjoint_arraycopy_avx3", 2,
|
||||
nooverlap_target, aligned, is_oop, dest_uninitialized);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_exit;
|
||||
@@ -2096,7 +1968,10 @@ __ BIND(L_exit);
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -2107,39 +1982,17 @@ __ BIND(L_exit);
|
||||
// disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
|
||||
// no-overlap entry point used by generate_conjoint_long_oop_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_long_oop_copy(StubGenStubId stub_id, address *entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jlong_disjoint_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_uninit_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
|
||||
const char *name, bool dest_uninitialized) {
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jlong_disjoint_arraycopy_avx3", 3,
|
||||
aligned, is_oop, dest_uninitialized);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_exit;
|
||||
@@ -2231,48 +2084,28 @@ address StubGenerator::generate_disjoint_long_oop_copy(StubGenStubId stub_id, ad
|
||||
|
||||
|
||||
// Arguments:
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
// aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
// c_rarg1 - destination array address
|
||||
// c_rarg2 - element count, treated as ssize_t, can be zero
|
||||
//
|
||||
address StubGenerator::generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jlong_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_uninit_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
address StubGenerator::generate_conjoint_long_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
|
||||
address *entry, const char *name,
|
||||
bool dest_uninitialized) {
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jlong_conjoint_arraycopy_avx3", 3,
|
||||
nooverlap_target, aligned, is_oop, dest_uninitialized);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_exit;
|
||||
@@ -2391,19 +2224,7 @@ void StubGenerator::generate_type_check(Register sub_klass,
|
||||
// rax == 0 - success
|
||||
// rax == -1^K - failure, where K is partial transfer count
|
||||
//
|
||||
address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *entry) {
|
||||
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::checkcast_arraycopy_id:
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::checkcast_arraycopy_uninit_id:
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
address StubGenerator::generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized) {
|
||||
|
||||
Label L_load_element, L_store_element, L_do_card_marks, L_done;
|
||||
|
||||
@@ -2433,7 +2254,7 @@ address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *e
|
||||
// checked.
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
@@ -2609,7 +2430,8 @@ address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *e
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to a long, int, short, or byte copy loop.
|
||||
//
|
||||
address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address StubGenerator::generate_unsafe_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address long_copy_entry) {
|
||||
|
||||
Label L_long_aligned, L_int_aligned, L_short_aligned;
|
||||
@@ -2623,8 +2445,7 @@ address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address sho
|
||||
const Register bits = rax; // test copy of low bits
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
@@ -2757,10 +2578,10 @@ static void do_setmemory_atomic_loop(USM_TYPE type, Register dest,
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to an int, short, or byte fill loop.
|
||||
//
|
||||
address StubGenerator::generate_unsafe_setmemory(address unsafe_byte_fill) {
|
||||
address StubGenerator::generate_unsafe_setmemory(const char *name,
|
||||
address unsafe_byte_fill) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::unsafe_setmemory_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address start = __ pc();
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
@@ -2903,7 +2724,8 @@ void StubGenerator::arraycopy_range_checks(Register src, // source array oop
|
||||
// rax == 0 - success
|
||||
// rax == -1^K - failure, where K is partial transfer count
|
||||
//
|
||||
address StubGenerator::generate_generic_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address StubGenerator::generate_generic_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address oop_copy_entry,
|
||||
address long_copy_entry, address checkcast_copy_entry) {
|
||||
|
||||
@@ -2929,8 +2751,7 @@ address StubGenerator::generate_generic_copy(address byte_copy_entry, address sh
|
||||
if (advance < 0) advance += modulus;
|
||||
if (advance > 0) __ nop(advance);
|
||||
}
|
||||
StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
|
||||
// Short-hop target to L_failed. Makes for denser prologue code.
|
||||
__ BIND(L_failed_0);
|
||||
|
||||
@@ -112,8 +112,7 @@ void StubGenerator::generate_chacha_stubs() {
|
||||
/* The 2-block AVX/AVX2-enabled ChaCha20 block function implementation */
|
||||
address StubGenerator::generate_chacha20Block_avx() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::chacha20Block_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "chacha20Block");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_twoRounds;
|
||||
@@ -301,8 +300,7 @@ address StubGenerator::generate_chacha20Block_avx() {
|
||||
/* The 4-block AVX512-enabled ChaCha20 block function implementation */
|
||||
address StubGenerator::generate_chacha20Block_avx512() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::chacha20Block_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "chacha20Block");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_twoRounds;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved.
|
||||
* Intel Math Library (LIBM) Source Code
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@@ -173,8 +173,7 @@
|
||||
#define __ _masm->
|
||||
|
||||
address StubGenerator::generate_libmCos() {
|
||||
StubGenStubId stub_id = StubGenStubId::dcos_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "libmCos");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved.
|
||||
* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
|
||||
* Intel Math Library (LIBM) Source Code
|
||||
*
|
||||
@@ -165,8 +165,7 @@ ATTRIBUTE_ALIGNED(4) static const juint _INF[] =
|
||||
#define __ _masm->
|
||||
|
||||
address StubGenerator::generate_libmExp() {
|
||||
StubGenStubId stub_id = StubGenStubId::dexp_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "libmExp");
|
||||
address start = __ pc();
|
||||
|
||||
Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2024, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2023, Intel Corporation. All rights reserved.
|
||||
* Intel Math Library (LIBM) Source Code
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@@ -73,8 +73,7 @@ ATTRIBUTE_ALIGNED(32) static const uint64_t CONST_e307[] = {
|
||||
|
||||
address StubGenerator::generate_libmFmod() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubGenStubId stub_id = StubGenStubId::fmod_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "libmFmod");
|
||||
address start = __ pc();
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
|
||||
@@ -82,8 +82,7 @@ void StubGenerator::generate_ghash_stubs() {
|
||||
address StubGenerator::generate_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
Label L_ghash_loop, L_exit;
|
||||
StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
address start = __ pc();
|
||||
|
||||
const Register state = c_rarg0;
|
||||
@@ -219,8 +218,7 @@ address StubGenerator::generate_ghash_processBlocks() {
|
||||
address StubGenerator::generate_avx_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
address start = __ pc();
|
||||
|
||||
// arguments
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user