mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2026-01-22 16:30:53 +01:00
Compare commits
1507 Commits
lbourges/W
...
bookmark3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1594a68190 | ||
|
|
f9f04dd209 | ||
|
|
b96ca64dca | ||
|
|
57dc8b95d0 | ||
|
|
c7ebd96ef8 | ||
|
|
a5c089a8ef | ||
|
|
c4b4df537a | ||
|
|
bcc2409e64 | ||
|
|
c76f00986a | ||
|
|
bd1364245f | ||
|
|
2b05f795ca | ||
|
|
04621007ba | ||
|
|
182f84d1c0 | ||
|
|
fdc606b96c | ||
|
|
c8aeb88c2d | ||
|
|
0d6cce134d | ||
|
|
b53b5874f6 | ||
|
|
8ed74025bd | ||
|
|
6f13eb3d36 | ||
|
|
542bfad571 | ||
|
|
977c6e353f | ||
|
|
657c494ac4 | ||
|
|
bf1ef717b3 | ||
|
|
99f0598652 | ||
|
|
5b56209be4 | ||
|
|
6dae9f701a | ||
|
|
4d549bd19f | ||
|
|
3ccdf90b3a | ||
|
|
2f363c2895 | ||
|
|
6e4f9096b3 | ||
|
|
4d8663d10a | ||
|
|
b1cfc3b905 | ||
|
|
677cec90d5 | ||
|
|
6a4db44460 | ||
|
|
193444fbb8 | ||
|
|
efb88d2542 | ||
|
|
c1c34ce046 | ||
|
|
700cbdda2e | ||
|
|
a439f6bb98 | ||
|
|
de58abe271 | ||
|
|
b69ea9b09e | ||
|
|
3c0bbbdd95 | ||
|
|
613b56a45a | ||
|
|
8ecf137f80 | ||
|
|
d0fd1cc036 | ||
|
|
c60c7b26f0 | ||
|
|
f64dc6335c | ||
|
|
c29274775e | ||
|
|
a2901d1ce0 | ||
|
|
ad7ac7f124 | ||
|
|
53daabfffc | ||
|
|
898f325e40 | ||
|
|
e24bd624a9 | ||
|
|
0a4d1b7a08 | ||
|
|
6716171453 | ||
|
|
4e0f61fc2b | ||
|
|
3b1d4414d4 | ||
|
|
b4bbee3712 | ||
|
|
218660b3c6 | ||
|
|
71c9e0d712 | ||
|
|
a5c94eaf6b | ||
|
|
331529d1c2 | ||
|
|
ad0d40b20c | ||
|
|
c73dc1b888 | ||
|
|
f502b5ef0e | ||
|
|
2475d44d94 | ||
|
|
e395cdbac1 | ||
|
|
216087dcd0 | ||
|
|
1267ef5138 | ||
|
|
dc641d5c67 | ||
|
|
8fd7bf227d | ||
|
|
9c63184ea3 | ||
|
|
f79a53d8b8 | ||
|
|
35ec7fbd9e | ||
|
|
4628d5d3a9 | ||
|
|
754373547f | ||
|
|
f87c409d4c | ||
|
|
68237922aa | ||
|
|
09ebb06cc6 | ||
|
|
dd779827b9 | ||
|
|
bbca6c90d9 | ||
|
|
cf1a045ec4 | ||
|
|
1605ce29d8 | ||
|
|
78e4fe0d8a | ||
|
|
2e53b271a0 | ||
|
|
cc6bfd007d | ||
|
|
0dc7aac79f | ||
|
|
e84b462b65 | ||
|
|
34be1213d1 | ||
|
|
5cafca43c3 | ||
|
|
a653083a38 | ||
|
|
15b184762e | ||
|
|
56c5fd9a02 | ||
|
|
7ff58de1b7 | ||
|
|
2b06028269 | ||
|
|
07857a014a | ||
|
|
5a1e7f31ba | ||
|
|
0ff43449a9 | ||
|
|
5ee3511aca | ||
|
|
8d174e1b3a | ||
|
|
647ec911c2 | ||
|
|
26291b9479 | ||
|
|
005b472b38 | ||
|
|
d346bfdebf | ||
|
|
a730774f41 | ||
|
|
eaf2006905 | ||
|
|
9c16708378 | ||
|
|
6c791aeaf3 | ||
|
|
8a5906d6a2 | ||
|
|
9aa029e0bc | ||
|
|
4a7bf02d03 | ||
|
|
e0a9bd26b1 | ||
|
|
2998851c65 | ||
|
|
255e2b7331 | ||
|
|
d09fcea290 | ||
|
|
30a0205969 | ||
|
|
6639a8df12 | ||
|
|
2848ce85e6 | ||
|
|
2a8ccaa6bb | ||
|
|
d43735d11f | ||
|
|
9eb40bd0fd | ||
|
|
00bc36948c | ||
|
|
b8edaaa750 | ||
|
|
3dcd3acc8d | ||
|
|
9c53abcb26 | ||
|
|
e7f388362f | ||
|
|
807dbddf56 | ||
|
|
43091f84c0 | ||
|
|
d4ead255eb | ||
|
|
456097753a | ||
|
|
d543f68ced | ||
|
|
fd48ad5974 | ||
|
|
53d29987fe | ||
|
|
ed2cf44285 | ||
|
|
86cea66436 | ||
|
|
50fba58edb | ||
|
|
776fef2f44 | ||
|
|
580775cf39 | ||
|
|
508eb0d7cf | ||
|
|
b34934653e | ||
|
|
7357709f00 | ||
|
|
08f2e5d49f | ||
|
|
8d1f68aeff | ||
|
|
9a5a1df47f | ||
|
|
c11403f329 | ||
|
|
ed008bf8ad | ||
|
|
0f6247037a | ||
|
|
eff509f929 | ||
|
|
901f24bf79 | ||
|
|
eea8a0ef36 | ||
|
|
f7fdcaa9bd | ||
|
|
037765d340 | ||
|
|
15bd2c5282 | ||
|
|
eafeccd44b | ||
|
|
8abfcf8b88 | ||
|
|
a68b150802 | ||
|
|
81b53b71f2 | ||
|
|
72c720fdf2 | ||
|
|
24b2fc5ea4 | ||
|
|
ccd7dfeb28 | ||
|
|
f93932865d | ||
|
|
4f534e8edc | ||
|
|
881124d2af | ||
|
|
96f0710e54 | ||
|
|
38d1225a4a | ||
|
|
7a41e664de | ||
|
|
36dad223f5 | ||
|
|
fb0827b0b9 | ||
|
|
31041c223c | ||
|
|
6d1c91564a | ||
|
|
a514ffeafc | ||
|
|
8f962f3041 | ||
|
|
0b193b34d2 | ||
|
|
41ab7262aa | ||
|
|
59ad49c6d7 | ||
|
|
b4917da19b | ||
|
|
598f615909 | ||
|
|
a552d05a29 | ||
|
|
0b2d8a7569 | ||
|
|
46e34d51bb | ||
|
|
5e71781e24 | ||
|
|
194e32989a | ||
|
|
3314adc81f | ||
|
|
e38358a728 | ||
|
|
62e5b825c2 | ||
|
|
1d672b5816 | ||
|
|
717ae79a05 | ||
|
|
763c42669a | ||
|
|
7244939ab8 | ||
|
|
cea6628318 | ||
|
|
7025c24d17 | ||
|
|
b9f8a0edd0 | ||
|
|
e7f7b2fb2f | ||
|
|
ab830a927d | ||
|
|
6bbcd88b46 | ||
|
|
755467958a | ||
|
|
ac28e2eedd | ||
|
|
73d6ee78fc | ||
|
|
60932435da | ||
|
|
d713266667 | ||
|
|
b9472cbdd3 | ||
|
|
8941fcc068 | ||
|
|
bfe5c6c587 | ||
|
|
7b5d54be6a | ||
|
|
729a8c5127 | ||
|
|
916f985a01 | ||
|
|
3bd56fcf0f | ||
|
|
46f99887ca | ||
|
|
6934cbd0f5 | ||
|
|
fb6520c0c3 | ||
|
|
c24cb13a91 | ||
|
|
2889fe4e8d | ||
|
|
383028facb | ||
|
|
9a4ba65e82 | ||
|
|
73dad5c9b6 | ||
|
|
9fc1ffcd03 | ||
|
|
6b032d6639 | ||
|
|
6b3a4b52c5 | ||
|
|
3ed83e24c3 | ||
|
|
ee783d497a | ||
|
|
7c4d84ac79 | ||
|
|
16bb7d0747 | ||
|
|
b50a29e5b5 | ||
|
|
872bcdbfd8 | ||
|
|
c059dc1d1d | ||
|
|
aab11b4b39 | ||
|
|
d2916c0fe5 | ||
|
|
4c3826aed5 | ||
|
|
3fb610c20b | ||
|
|
00137a1fc2 | ||
|
|
c99236e0bb | ||
|
|
14d9f04011 | ||
|
|
659670b25e | ||
|
|
688a7f7191 | ||
|
|
620921a3e6 | ||
|
|
cbd9b2a58b | ||
|
|
cd53b6d90f | ||
|
|
310244f580 | ||
|
|
dfe659da2e | ||
|
|
35f0ee8075 | ||
|
|
302d935c3f | ||
|
|
fd2ec37c51 | ||
|
|
ca8abea2b9 | ||
|
|
3726bc6e6a | ||
|
|
c53792178a | ||
|
|
4d4ed8b07f | ||
|
|
91ece342f2 | ||
|
|
141a404c1e | ||
|
|
ce6060e7fa | ||
|
|
64dc7844ce | ||
|
|
3fffc08065 | ||
|
|
102a142ca3 | ||
|
|
9a3828a689 | ||
|
|
1d181d96c7 | ||
|
|
d8e88c1c73 | ||
|
|
98d19b2b25 | ||
|
|
676758b8b2 | ||
|
|
33efc6ba16 | ||
|
|
1a2c1eb672 | ||
|
|
714f0a66de | ||
|
|
7a7226e873 | ||
|
|
a873261d80 | ||
|
|
aad99be49d | ||
|
|
a710f29d56 | ||
|
|
78c42832e4 | ||
|
|
6fa9545ded | ||
|
|
0da93496ae | ||
|
|
3092e98436 | ||
|
|
8b077b43a0 | ||
|
|
f67e2611df | ||
|
|
17c54cb91f | ||
|
|
8ef7cdaea1 | ||
|
|
47266fd897 | ||
|
|
2ea8fd22d3 | ||
|
|
7a41216188 | ||
|
|
073a5580c5 | ||
|
|
294340140f | ||
|
|
856c00d6e4 | ||
|
|
7c02a01a2a | ||
|
|
a6df8a107e | ||
|
|
80a01a2eb2 | ||
|
|
58a62136a6 | ||
|
|
1e4b4f8126 | ||
|
|
063fa45c29 | ||
|
|
45811a85ab | ||
|
|
5de22f71c3 | ||
|
|
cda6338d54 | ||
|
|
0afaea59a5 | ||
|
|
d890190fc7 | ||
|
|
e49cf8b741 | ||
|
|
08feb34296 | ||
|
|
edb2ab2a2e | ||
|
|
744e8f1f6d | ||
|
|
c58ce0b966 | ||
|
|
e809bc57f1 | ||
|
|
8e8e5544fc | ||
|
|
6020864717 | ||
|
|
28a58c8285 | ||
|
|
ae5a5b8c4e | ||
|
|
1de71fd154 | ||
|
|
a4d35593c7 | ||
|
|
6b5e4e098e | ||
|
|
3dab586fd6 | ||
|
|
89319da074 | ||
|
|
7e7649fd3c | ||
|
|
f8e324a55b | ||
|
|
94acb555f7 | ||
|
|
7fefcf37f3 | ||
|
|
e441b53756 | ||
|
|
64985d6589 | ||
|
|
043c88d410 | ||
|
|
d34c8ae24d | ||
|
|
1dbb382c06 | ||
|
|
cde8adffff | ||
|
|
17c8ef1bc5 | ||
|
|
558b18f7c8 | ||
|
|
a0a0996a3e | ||
|
|
32451b08bc | ||
|
|
ae4a12bf1a | ||
|
|
31fb8e9a39 | ||
|
|
b1f6fd9a53 | ||
|
|
3b4c579e27 | ||
|
|
c9830a4fb3 | ||
|
|
df66f68b3f | ||
|
|
e1ee1e507c | ||
|
|
6c157f1254 | ||
|
|
0edfbdc003 | ||
|
|
45ae2649b9 | ||
|
|
d8d326e5f2 | ||
|
|
acbb682990 | ||
|
|
f017ca086a | ||
|
|
481d18acf2 | ||
|
|
2d90892e19 | ||
|
|
841b9ac9a5 | ||
|
|
44d99ea7cc | ||
|
|
f174febd6a | ||
|
|
8d9d7ecdd3 | ||
|
|
e88e54df7e | ||
|
|
7d10777c0e | ||
|
|
978b0cb368 | ||
|
|
3ad9461514 | ||
|
|
54c37eccbe | ||
|
|
25659dd7ef | ||
|
|
b5139b4473 | ||
|
|
f05c9c5cc8 | ||
|
|
0438808489 | ||
|
|
87ba9228d2 | ||
|
|
2d1dfb6666 | ||
|
|
54f3ad4b0b | ||
|
|
0962202098 | ||
|
|
b0b20e15de | ||
|
|
450d310dff | ||
|
|
1f8e1cd8b0 | ||
|
|
6edf9799e4 | ||
|
|
25314386ce | ||
|
|
1c937605f3 | ||
|
|
d4d5a507f4 | ||
|
|
607c8b0696 | ||
|
|
cc0c596c67 | ||
|
|
6f7416c522 | ||
|
|
9679524b5d | ||
|
|
bb0bf8e679 | ||
|
|
aa3eb05937 | ||
|
|
8d726ef33c | ||
|
|
01e04e052b | ||
|
|
40e6379291 | ||
|
|
5765893d8f | ||
|
|
132819aeae | ||
|
|
7b7b4429e5 | ||
|
|
d09b4240f0 | ||
|
|
6d1a23980f | ||
|
|
8f7ca506a1 | ||
|
|
35c8d2c7d4 | ||
|
|
1b31696bd5 | ||
|
|
15fdd36be9 | ||
|
|
df746071a1 | ||
|
|
20f749f967 | ||
|
|
305e3c7d77 | ||
|
|
2c2f7f249d | ||
|
|
9b9cd71b62 | ||
|
|
20962b9a86 | ||
|
|
54c286c6ea | ||
|
|
bf1c63abf2 | ||
|
|
86695c2fcb | ||
|
|
e4b935eb0d | ||
|
|
9f6768d3b5 | ||
|
|
ecb8c01c94 | ||
|
|
92c050658b | ||
|
|
e2236538c7 | ||
|
|
70201ceff9 | ||
|
|
eaf5c2fee3 | ||
|
|
c56588850d | ||
|
|
a0b8948753 | ||
|
|
4c5d2c6761 | ||
|
|
9cd8855de4 | ||
|
|
31d9ea4d13 | ||
|
|
21cdb10629 | ||
|
|
cc2f30f661 | ||
|
|
2236469809 | ||
|
|
49b430ec9d | ||
|
|
c6cbf6e9e0 | ||
|
|
49ab0f1b0c | ||
|
|
b42b739efe | ||
|
|
c8e301c9be | ||
|
|
523ad1632a | ||
|
|
99a4f244f7 | ||
|
|
9078fb271e | ||
|
|
d935557924 | ||
|
|
96d468e95e | ||
|
|
0cadeff5d0 | ||
|
|
c28da3e4b1 | ||
|
|
4d0fe34ad3 | ||
|
|
dd6d8b375f | ||
|
|
7896f20270 | ||
|
|
4226d4cfb4 | ||
|
|
dfc918666f | ||
|
|
45eef9a80f | ||
|
|
6f7db65fe6 | ||
|
|
8a408bb72d | ||
|
|
0c43b6973a | ||
|
|
37fc64f014 | ||
|
|
9a6d17a6e4 | ||
|
|
b12e7ab475 | ||
|
|
68f9a06cbc | ||
|
|
e02311f5ac | ||
|
|
e0372e21ae | ||
|
|
82bf7eccdb | ||
|
|
6833397d24 | ||
|
|
bd7527aee2 | ||
|
|
12a2738b84 | ||
|
|
45b01f5c38 | ||
|
|
7a612985e3 | ||
|
|
10e438f5f1 | ||
|
|
cc015855b1 | ||
|
|
8dddeb519b | ||
|
|
9a6aa016ce | ||
|
|
27ca4a25e2 | ||
|
|
49ca855653 | ||
|
|
59d2254339 | ||
|
|
bce7bd8447 | ||
|
|
49466f7870 | ||
|
|
f1292f0ea5 | ||
|
|
a1dd6caba9 | ||
|
|
9332f75580 | ||
|
|
732c972c45 | ||
|
|
89e69e5d40 | ||
|
|
d05a6d21a5 | ||
|
|
87ec35c713 | ||
|
|
c046b5b938 | ||
|
|
84b14c9ab4 | ||
|
|
6944955f2a | ||
|
|
c455e54a91 | ||
|
|
5c65001667 | ||
|
|
ab549f1fc8 | ||
|
|
3b293ab894 | ||
|
|
ce4915526e | ||
|
|
34bb05435d | ||
|
|
5e94ce8e42 | ||
|
|
2abc2283ba | ||
|
|
7e1643eb84 | ||
|
|
51158aac51 | ||
|
|
a4608db2c5 | ||
|
|
52880ef646 | ||
|
|
f0b7ed36c0 | ||
|
|
b131e5fceb | ||
|
|
cda6a6e3fe | ||
|
|
44d9fbbae1 | ||
|
|
cc00ef7754 | ||
|
|
921486502d | ||
|
|
9202b50550 | ||
|
|
9becb7b266 | ||
|
|
45db887691 | ||
|
|
8d5c873ced | ||
|
|
2fb5fc0ab5 | ||
|
|
aa9197d4c2 | ||
|
|
7f95696937 | ||
|
|
47ab2fd816 | ||
|
|
53253a1b8f | ||
|
|
971a84c60c | ||
|
|
8cc51f7e86 | ||
|
|
59fb00ba3c | ||
|
|
10d6d14b8a | ||
|
|
97754906c3 | ||
|
|
28e7e6002a | ||
|
|
ba55f2524f | ||
|
|
1c6fe465b7 | ||
|
|
a0bd41ff0e | ||
|
|
40da9e48a6 | ||
|
|
117037c920 | ||
|
|
8a5cd13a49 | ||
|
|
eda13e56df | ||
|
|
ca27c7fe4c | ||
|
|
39e55ea746 | ||
|
|
b96003193a | ||
|
|
871093cad8 | ||
|
|
2fcb6ef946 | ||
|
|
bda7696c5e | ||
|
|
08bcf7eaae | ||
|
|
a038608ece | ||
|
|
d0d6b217d0 | ||
|
|
981fb9b696 | ||
|
|
6bd3576b5e | ||
|
|
613c9fdfe5 | ||
|
|
11da84dc12 | ||
|
|
728e2c91c6 | ||
|
|
6c69a5b5f4 | ||
|
|
fb06145af8 | ||
|
|
24a709ec16 | ||
|
|
590cc980dd | ||
|
|
6d3dca0a3b | ||
|
|
5aa9e342b8 | ||
|
|
9ae799b328 | ||
|
|
7979221a9e | ||
|
|
dbf5ed13bc | ||
|
|
18d37718f2 | ||
|
|
0799341191 | ||
|
|
f4c9ebcdc2 | ||
|
|
832b007eb5 | ||
|
|
e22fc527d1 | ||
|
|
c190f6e1d0 | ||
|
|
ba8acd0cea | ||
|
|
a43810895f | ||
|
|
d082eac9d9 | ||
|
|
33dceea732 | ||
|
|
17ed67a785 | ||
|
|
ddc729c6e2 | ||
|
|
b90a9c299a | ||
|
|
0544f29fe3 | ||
|
|
d499561f40 | ||
|
|
3d3bd10f1b | ||
|
|
64dfaa6c63 | ||
|
|
88c191ba71 | ||
|
|
aa43b9a0e6 | ||
|
|
63b911b880 | ||
|
|
7881a6a4da | ||
|
|
297043ce0a | ||
|
|
141391444d | ||
|
|
0fcfd105d5 | ||
|
|
0de65ad3a3 | ||
|
|
51a3c1c49d | ||
|
|
92b490f2ad | ||
|
|
e698049719 | ||
|
|
5f41f54b0c | ||
|
|
e2ef87829b | ||
|
|
66d823b5d9 | ||
|
|
0e12f65e6c | ||
|
|
6f91033367 | ||
|
|
493f347750 | ||
|
|
e5f1fa0c01 | ||
|
|
d343a40f28 | ||
|
|
e6546ab53b | ||
|
|
bb80572a71 | ||
|
|
b535b1ecea | ||
|
|
8f8e8d3d09 | ||
|
|
82022cba0e | ||
|
|
73e43a6361 | ||
|
|
631e26f297 | ||
|
|
f3909f2192 | ||
|
|
dadca6a85e | ||
|
|
668f6856f2 | ||
|
|
3e1f382f2a | ||
|
|
88866ce657 | ||
|
|
fcf5dc702b | ||
|
|
8f44f6497d | ||
|
|
a46d5465ce | ||
|
|
fc59abc78b | ||
|
|
8ffa8879c8 | ||
|
|
320ec394c2 | ||
|
|
2490347def | ||
|
|
f5577f1920 | ||
|
|
b8e03b0daf | ||
|
|
569ee12884 | ||
|
|
013446e4db | ||
|
|
5438c90462 | ||
|
|
d2e963995e | ||
|
|
bb337f3944 | ||
|
|
590a7f02c0 | ||
|
|
271e8088bd | ||
|
|
2d1ca4104e | ||
|
|
4535e02be6 | ||
|
|
32bbda291e | ||
|
|
b3582137d3 | ||
|
|
c5872311f2 | ||
|
|
b6644434be | ||
|
|
8e0edf42c2 | ||
|
|
26c0cb16ad | ||
|
|
9ff916c51c | ||
|
|
2cb3080996 | ||
|
|
6866f39483 | ||
|
|
70e3e6f76e | ||
|
|
1a8cd613e7 | ||
|
|
37f5c8ad1b | ||
|
|
a5dc6a8e5a | ||
|
|
6fbc69e630 | ||
|
|
3e217e19f7 | ||
|
|
bedfa42c87 | ||
|
|
0cbae5bcdb | ||
|
|
87aeb6b8ed | ||
|
|
2bcee6bc0e | ||
|
|
e140f2f243 | ||
|
|
b55524a35e | ||
|
|
478c871795 | ||
|
|
afea9b0c14 | ||
|
|
7200bcf69d | ||
|
|
a34b3c8c73 | ||
|
|
0148f10995 | ||
|
|
90e0593eb0 | ||
|
|
706454260e | ||
|
|
4e0f6f74ec | ||
|
|
6c8da8ee19 | ||
|
|
368878a793 | ||
|
|
d14992da4b | ||
|
|
31bfd1f679 | ||
|
|
f1143f7092 | ||
|
|
5ba5aedbf6 | ||
|
|
f75484862c | ||
|
|
9b47c2a9f8 | ||
|
|
4f35d56030 | ||
|
|
160c69ebf0 | ||
|
|
560266ad55 | ||
|
|
f51ccb5f06 | ||
|
|
df004a73f7 | ||
|
|
8452b2a84d | ||
|
|
51f07d413e | ||
|
|
22cad9c8e1 | ||
|
|
a109ba7a6c | ||
|
|
2796874588 | ||
|
|
44e0f04a4f | ||
|
|
16f5abc99a | ||
|
|
2c2520ca60 | ||
|
|
dfa2b49137 | ||
|
|
60e26aebd9 | ||
|
|
edacc284d8 | ||
|
|
d679b7cd88 | ||
|
|
9cb00bbb04 | ||
|
|
9ffc5bc9f1 | ||
|
|
7800856f76 | ||
|
|
88b258e072 | ||
|
|
7a92b43901 | ||
|
|
c38972150b | ||
|
|
cebdca6e10 | ||
|
|
870891a1a4 | ||
|
|
ed8b55c114 | ||
|
|
6429e2fd34 | ||
|
|
29136f21cc | ||
|
|
c6b83a9b6c | ||
|
|
45b4b5db15 | ||
|
|
c19a2eabae | ||
|
|
86ae32fa3e | ||
|
|
0c72795af2 | ||
|
|
739e37a40c | ||
|
|
af962cd973 | ||
|
|
c5146e47a8 | ||
|
|
201ff3b86a | ||
|
|
99228e7d23 | ||
|
|
667c21a8cd | ||
|
|
ba203d4d97 | ||
|
|
4d91e3fc20 | ||
|
|
eac6ba04eb | ||
|
|
9a4fb48262 | ||
|
|
04c53efaf8 | ||
|
|
9610ab600d | ||
|
|
8550ab09f2 | ||
|
|
9b38fc6661 | ||
|
|
05847b9756 | ||
|
|
4d2860cd52 | ||
|
|
8d7a653f29 | ||
|
|
59881779d1 | ||
|
|
38ccf6314c | ||
|
|
9031d0775c | ||
|
|
a5cee78983 | ||
|
|
d575e05513 | ||
|
|
8e0db7ee44 | ||
|
|
08c079aed0 | ||
|
|
04053d0548 | ||
|
|
581405bef8 | ||
|
|
1bdaf7d463 | ||
|
|
4a01bc4791 | ||
|
|
3d88ad4dfa | ||
|
|
e75d930089 | ||
|
|
9d131de125 | ||
|
|
f19919c157 | ||
|
|
3879448092 | ||
|
|
9ee6608b3f | ||
|
|
d85a83fee9 | ||
|
|
9bb92afb2a | ||
|
|
86e62a0dd8 | ||
|
|
e9e50b6386 | ||
|
|
b78ed443a0 | ||
|
|
323c12bd6b | ||
|
|
de128cf85c | ||
|
|
4db658fcb7 | ||
|
|
046716cae0 | ||
|
|
5a9b7b19f5 | ||
|
|
7bbe0bbe18 | ||
|
|
af1951059d | ||
|
|
6f8d8f4c7c | ||
|
|
fa1a512c19 | ||
|
|
a21bc81f54 | ||
|
|
e2729e0727 | ||
|
|
070f4866a5 | ||
|
|
e503587080 | ||
|
|
217b03f6f4 | ||
|
|
b982ec634b | ||
|
|
30983a46cb | ||
|
|
c733a2ae37 | ||
|
|
60f142fd86 | ||
|
|
f3e7e489a3 | ||
|
|
348cfeb851 | ||
|
|
24686c6697 | ||
|
|
d86075d424 | ||
|
|
d616594e79 | ||
|
|
5d13777951 | ||
|
|
946e84ec98 | ||
|
|
8a019384fc | ||
|
|
e82a61f012 | ||
|
|
6b22bdc87e | ||
|
|
d57a9396dd | ||
|
|
44b56c7566 | ||
|
|
36e2ead79d | ||
|
|
c761d6febf | ||
|
|
4129c6883e | ||
|
|
af70ae2242 | ||
|
|
8e0784994e | ||
|
|
eaed463a62 | ||
|
|
e1b5c69dfc | ||
|
|
49cd277a44 | ||
|
|
555b337a58 | ||
|
|
4e085b90d2 | ||
|
|
e6e04cc2ff | ||
|
|
f34c0fd7db | ||
|
|
c3539c5b3e | ||
|
|
9c285c0371 | ||
|
|
ea0a86c712 | ||
|
|
9451cc3ff1 | ||
|
|
d52da52cf9 | ||
|
|
985cfb449a | ||
|
|
6f6ddc6c4b | ||
|
|
7219811f90 | ||
|
|
a116d6df2d | ||
|
|
1b11ed14c3 | ||
|
|
7c2171952d | ||
|
|
82912dad6c | ||
|
|
796c53efea | ||
|
|
db43ebbd54 | ||
|
|
67cdb216a8 | ||
|
|
f23420e7e9 | ||
|
|
f7ed71eff5 | ||
|
|
727a7f44c1 | ||
|
|
cce7b5c95b | ||
|
|
44e6a670ea | ||
|
|
2dd64b95ea | ||
|
|
82acda290a | ||
|
|
5803b524ca | ||
|
|
a4abc5522a | ||
|
|
89c1e3ec8e | ||
|
|
1088965039 | ||
|
|
ffd459336f | ||
|
|
05c8d41021 | ||
|
|
c707d77503 | ||
|
|
6a1f597c39 | ||
|
|
d555a562e0 | ||
|
|
5bb8c1b32c | ||
|
|
ad7221c9ef | ||
|
|
1538a00f65 | ||
|
|
e23ad4a840 | ||
|
|
c3b3843fba | ||
|
|
38c9e7868a | ||
|
|
f541b2f885 | ||
|
|
01cceaa9d9 | ||
|
|
7648a84286 | ||
|
|
f116201ac1 | ||
|
|
07ef808a03 | ||
|
|
fd3a12c366 | ||
|
|
2420bb326c | ||
|
|
da1d0b3276 | ||
|
|
1657e44e16 | ||
|
|
f7ec741912 | ||
|
|
50e5066f45 | ||
|
|
bfc5de0093 | ||
|
|
37ab739b36 | ||
|
|
2b378d703a | ||
|
|
3a86b596d3 | ||
|
|
29d6fd4b2f | ||
|
|
c1d024e54a | ||
|
|
1671480bb3 | ||
|
|
abe78a9df0 | ||
|
|
edda461538 | ||
|
|
f2929e4abe | ||
|
|
fd8f03ecf5 | ||
|
|
9da9287323 | ||
|
|
4d966775db | ||
|
|
757dd0dce4 | ||
|
|
89b0f74284 | ||
|
|
a98e639910 | ||
|
|
2b8c946e65 | ||
|
|
d588c28fd5 | ||
|
|
6e234c64a1 | ||
|
|
63ad1473e6 | ||
|
|
3acc4d098e | ||
|
|
433ac44290 | ||
|
|
baa6ac92cd | ||
|
|
f7e9d4d76a | ||
|
|
3ea019c646 | ||
|
|
22dc8cec22 | ||
|
|
83bf0c4592 | ||
|
|
c14ac82adf | ||
|
|
39d6f9efd6 | ||
|
|
12e0c0a592 | ||
|
|
6267da196d | ||
|
|
519378763f | ||
|
|
49d0736f33 | ||
|
|
018cc6df4a | ||
|
|
c0d7d0a611 | ||
|
|
1ce19d4709 | ||
|
|
527c95c24e | ||
|
|
56da9b6497 | ||
|
|
008f59eb3a | ||
|
|
83e585f54d | ||
|
|
7c07809c8a | ||
|
|
53703c3460 | ||
|
|
0a5fe0a1f4 | ||
|
|
bb5b7d3c31 | ||
|
|
08c1946ca1 | ||
|
|
3c46ccd400 | ||
|
|
d0136bed30 | ||
|
|
24487d3936 | ||
|
|
419598389f | ||
|
|
11d13f09c8 | ||
|
|
e2f8e9b5e3 | ||
|
|
5fbcb38d57 | ||
|
|
9034287a34 | ||
|
|
026e4da407 | ||
|
|
4a3834d778 | ||
|
|
defa0e1387 | ||
|
|
9c7cf1e5ab | ||
|
|
bf0cda7746 | ||
|
|
70e92cfa7e | ||
|
|
252ad9f5aa | ||
|
|
daa51bc27d | ||
|
|
d7d649b9b7 | ||
|
|
929bd97308 | ||
|
|
e364ad5ace | ||
|
|
92eefee876 | ||
|
|
405e7d747c | ||
|
|
c4f4066d0f | ||
|
|
a7f65db1eb | ||
|
|
79025f8d7c | ||
|
|
af8f02e16d | ||
|
|
964eef88c2 | ||
|
|
6fae1c439c | ||
|
|
681ada91e9 | ||
|
|
fb9b33d31b | ||
|
|
eb235c7653 | ||
|
|
ec8b51fc21 | ||
|
|
5d7ee022a5 | ||
|
|
f9583d6212 | ||
|
|
259e4888b5 | ||
|
|
d549e1ef12 | ||
|
|
ddf23623c6 | ||
|
|
12dd287bc1 | ||
|
|
689aab7bce | ||
|
|
b7d223e44a | ||
|
|
5b8130377c | ||
|
|
b7ef66d110 | ||
|
|
4f8ff3ae55 | ||
|
|
e79f5bbd23 | ||
|
|
64b4fab279 | ||
|
|
058f604622 | ||
|
|
21597b66f4 | ||
|
|
f7b55d2970 | ||
|
|
071380a044 | ||
|
|
d9b4c55cba | ||
|
|
8d2d382b23 | ||
|
|
46892e2738 | ||
|
|
5ea108b09e | ||
|
|
a5eafa0eed | ||
|
|
bcb4f1043e | ||
|
|
2b543cf701 | ||
|
|
64f0212260 | ||
|
|
50d8cbecae | ||
|
|
9e35cd96b4 | ||
|
|
2abbd617d2 | ||
|
|
8c3e6c3263 | ||
|
|
29556c2873 | ||
|
|
7a86e9604a | ||
|
|
1ac845562d | ||
|
|
2a063a1a39 | ||
|
|
ad97f526ea | ||
|
|
d4f887cbdb | ||
|
|
46d918bd26 | ||
|
|
2f96a0b443 | ||
|
|
75cd796dad | ||
|
|
e590a80f1d | ||
|
|
57f36d0026 | ||
|
|
d957349972 | ||
|
|
8ba87ebfa2 | ||
|
|
7840e60ef5 | ||
|
|
1046b98723 | ||
|
|
bb4e29ebab | ||
|
|
053c490785 | ||
|
|
0bb8796747 | ||
|
|
ace6587167 | ||
|
|
0f57e62a95 | ||
|
|
7e8e4180fe | ||
|
|
7f5e71441f | ||
|
|
a57fa9ec46 | ||
|
|
c2e57cf03d | ||
|
|
634e755260 | ||
|
|
4757641b63 | ||
|
|
d07f487d1f | ||
|
|
40979f90cd | ||
|
|
6ae37f0b5a | ||
|
|
53da5ddc49 | ||
|
|
605acf766c | ||
|
|
f7f0322133 | ||
|
|
acf612e3d7 | ||
|
|
ec7fcf93a9 | ||
|
|
6472e7a5de | ||
|
|
beadaba95a | ||
|
|
8498c60b5e | ||
|
|
eb4e45fa0c | ||
|
|
15706e5899 | ||
|
|
da56872b8b | ||
|
|
d6c75dc9ae | ||
|
|
eb66bfdaab | ||
|
|
50d29df82d | ||
|
|
ad65b48169 | ||
|
|
12bb59ce9a | ||
|
|
23e15559c6 | ||
|
|
5d34aeadc9 | ||
|
|
2c9ab8d9eb | ||
|
|
5a58204174 | ||
|
|
fbdefc120b | ||
|
|
c0b5f5f75e | ||
|
|
029295ee95 | ||
|
|
2a72f7869e | ||
|
|
7f56c0d44f | ||
|
|
3d823a3919 | ||
|
|
b506d5cb28 | ||
|
|
25e21c666e | ||
|
|
6c52056273 | ||
|
|
f122c08d19 | ||
|
|
f36c6ed12f | ||
|
|
ea18fcdc85 | ||
|
|
d743410b20 | ||
|
|
cd8d6e1880 | ||
|
|
1221d2c722 | ||
|
|
a990f1df97 | ||
|
|
77208bb113 | ||
|
|
76014e5482 | ||
|
|
da0ee9cf3f | ||
|
|
a4845c30d1 | ||
|
|
3bfe83df8a | ||
|
|
4df2811a88 | ||
|
|
8fab97677c | ||
|
|
94d1f572af | ||
|
|
01032fd84f | ||
|
|
02c82e31d4 | ||
|
|
f26de41025 | ||
|
|
5151a24a1c | ||
|
|
125f75d399 | ||
|
|
bb3271d85c | ||
|
|
448f5b8b77 | ||
|
|
f911291f0a | ||
|
|
2139035ff9 | ||
|
|
76f638503e | ||
|
|
0121967172 | ||
|
|
ea1a1d0d6a | ||
|
|
2d474986da | ||
|
|
8947d1a14a | ||
|
|
5df1292ce2 | ||
|
|
c3fbb42cdd | ||
|
|
7382dc0100 | ||
|
|
afd494c630 | ||
|
|
36a531d3f7 | ||
|
|
b5df606ca2 | ||
|
|
83e3ced869 | ||
|
|
7dc923690e | ||
|
|
5a37801db3 | ||
|
|
df42f2b4ca | ||
|
|
5ac68cf690 | ||
|
|
4c44d30a95 | ||
|
|
e33b745d1b | ||
|
|
a772913666 | ||
|
|
a50c6cfdcd | ||
|
|
321644588a | ||
|
|
0572ec1a26 | ||
|
|
6ab9b0fabb | ||
|
|
7b81488059 | ||
|
|
170d8fed72 | ||
|
|
27265ef260 | ||
|
|
0fb3b8ea33 | ||
|
|
0deaea00e0 | ||
|
|
51aefe0697 | ||
|
|
de24ee6551 | ||
|
|
801be2b361 | ||
|
|
434e5692d0 | ||
|
|
57109376f8 | ||
|
|
4e06175ff4 | ||
|
|
c0bff35ed5 | ||
|
|
3a3b5fad30 | ||
|
|
f34eed1157 | ||
|
|
830e56afa9 | ||
|
|
822bf3ab74 | ||
|
|
e840837621 | ||
|
|
ef3307c1b1 | ||
|
|
ecf0b85c85 | ||
|
|
5e020ed2ad | ||
|
|
681a33285c | ||
|
|
afc2a7f15e | ||
|
|
0113601371 | ||
|
|
ea58a3590e | ||
|
|
1cb8666d4c | ||
|
|
cbbd4ab0db | ||
|
|
9725e5d310 | ||
|
|
d67b20cc3e | ||
|
|
94ecfb6cde | ||
|
|
5790095cbd | ||
|
|
dedb7d58f2 | ||
|
|
43865a142c | ||
|
|
502a1a3f62 | ||
|
|
a109834ddf | ||
|
|
5718e9cbd6 | ||
|
|
d7b2705537 | ||
|
|
3c7cf594ad | ||
|
|
6a84ab0648 | ||
|
|
9b8677a58e | ||
|
|
aaa54d9835 | ||
|
|
03126a21a4 | ||
|
|
2787a72db5 | ||
|
|
bbced9a323 | ||
|
|
ee8db185bf | ||
|
|
61f492e54c | ||
|
|
da95bf0c1f | ||
|
|
2b9632abd5 | ||
|
|
83fdf14db8 | ||
|
|
bca003fba4 | ||
|
|
fdf25b9925 | ||
|
|
945b732c9a | ||
|
|
1383982ef9 | ||
|
|
c9dd761a13 | ||
|
|
181f65e7f6 | ||
|
|
eac08e30ba | ||
|
|
19ae2a9521 | ||
|
|
5a4d66cf3c | ||
|
|
9644aa238a | ||
|
|
be5914d875 | ||
|
|
2a6cd649ce | ||
|
|
57ec05bac1 | ||
|
|
19270368c0 | ||
|
|
1176ecba70 | ||
|
|
31485e9950 | ||
|
|
529d3b49ba | ||
|
|
51576fd885 | ||
|
|
b78695f3c2 | ||
|
|
ad80894394 | ||
|
|
dfadb3d1dd | ||
|
|
8f73dc097b | ||
|
|
2ea04d349f | ||
|
|
e689595f66 | ||
|
|
ffcc47da6f | ||
|
|
4de624d6d0 | ||
|
|
c9b74fc3db | ||
|
|
bffd9d25b1 | ||
|
|
75d2f22c7e | ||
|
|
2ce2185198 | ||
|
|
3fda986da0 | ||
|
|
79c1fe5574 | ||
|
|
0cf478d4e0 | ||
|
|
3562f70de0 | ||
|
|
7058f71de5 | ||
|
|
cbf9c03723 | ||
|
|
74d2f70f2c | ||
|
|
23ec54e23a | ||
|
|
81431313b4 | ||
|
|
3251054675 | ||
|
|
23607e8385 | ||
|
|
f17c191bab | ||
|
|
c57056c4c8 | ||
|
|
50248545c5 | ||
|
|
54c8538ad9 | ||
|
|
f2f1e283f6 | ||
|
|
1e266f7ea9 | ||
|
|
49afb92d1c | ||
|
|
ee9870ec38 | ||
|
|
bbd89f661e | ||
|
|
2ab1882ae4 | ||
|
|
a4216d9fb5 | ||
|
|
905758698f | ||
|
|
91673d4eff | ||
|
|
d77c8e6d8a | ||
|
|
ba6fe9c5d3 | ||
|
|
458f86c497 | ||
|
|
d6d94a31f6 | ||
|
|
c2d6300a79 | ||
|
|
4a11be34b7 | ||
|
|
681feefb85 | ||
|
|
c12255422d | ||
|
|
57bb257a29 | ||
|
|
60dcbf384e | ||
|
|
9c818ad58c | ||
|
|
c4e7dc5188 | ||
|
|
406ca4038b | ||
|
|
c663219df4 | ||
|
|
6d05532eff | ||
|
|
5ed80a8bbe | ||
|
|
546df2543c | ||
|
|
1e93b4a095 | ||
|
|
f8145f8df3 | ||
|
|
8131360692 | ||
|
|
a36098821c | ||
|
|
8cfeffbe54 | ||
|
|
8d8401bd4c | ||
|
|
a4337149d4 | ||
|
|
33bdee86c5 | ||
|
|
7edc406dc0 | ||
|
|
55fcb88d5d | ||
|
|
a69a3156ad | ||
|
|
6ef3f7c56f | ||
|
|
9450f9c440 | ||
|
|
740a78227b | ||
|
|
7b976911b7 | ||
|
|
0404a9125b | ||
|
|
2f96a1388b | ||
|
|
389aed7e97 | ||
|
|
deb291ee3b | ||
|
|
7618a905ed | ||
|
|
4b1286f4bb | ||
|
|
6f0f31e9f6 | ||
|
|
57b593d7b4 | ||
|
|
b4aca9e439 | ||
|
|
8e0a6c2b97 | ||
|
|
a08e6a0919 | ||
|
|
a681a4d9b5 | ||
|
|
b0339ca9c6 | ||
|
|
d425c0fb71 | ||
|
|
36e515c2f2 | ||
|
|
1da2f23778 | ||
|
|
d41ca5bac6 | ||
|
|
7f18491c3e | ||
|
|
f20edb0934 | ||
|
|
fd5fc93dc5 | ||
|
|
4d95dc0022 | ||
|
|
2321ee3b8b | ||
|
|
ecccd2c5c1 | ||
|
|
14b4ec1937 | ||
|
|
e1cfa15545 | ||
|
|
f82f03f775 | ||
|
|
2782ba6f78 | ||
|
|
35610609b4 | ||
|
|
d441fb9507 | ||
|
|
0620cdce79 | ||
|
|
36f1082582 | ||
|
|
cca578c1a3 | ||
|
|
0679e9db55 | ||
|
|
46b0c9c381 | ||
|
|
5a4084fcde | ||
|
|
78b5ca0437 | ||
|
|
9bf0fc249a | ||
|
|
b104c5eafb | ||
|
|
99bd6aa732 | ||
|
|
53d9be5ba7 | ||
|
|
dee46f3b8e | ||
|
|
2f98f145fe | ||
|
|
a7219791e9 | ||
|
|
8af21c514f | ||
|
|
a5871206b6 | ||
|
|
b5dacd0ea3 | ||
|
|
e842dd9000 | ||
|
|
e72367a18f | ||
|
|
68c0f9253c | ||
|
|
ca820fe3e3 | ||
|
|
75f6821ded | ||
|
|
5c62622c44 | ||
|
|
f99d5e6cfe | ||
|
|
faffacac33 | ||
|
|
667f899f9d | ||
|
|
bc167bfb8d | ||
|
|
8654e674c1 | ||
|
|
cc862b523d | ||
|
|
76c5205d04 | ||
|
|
574311214e | ||
|
|
862070ee2d | ||
|
|
e2ba260f98 | ||
|
|
4e6f198c62 | ||
|
|
d1a9f34cd9 | ||
|
|
71d0cc9c8a | ||
|
|
026f299d9b | ||
|
|
13f4801fa8 | ||
|
|
ade7fa34fe | ||
|
|
c63ed481c9 | ||
|
|
d8b122756a | ||
|
|
df444c014b | ||
|
|
bee1bf3538 | ||
|
|
290b8302db | ||
|
|
eedb44f849 | ||
|
|
09bbb0b426 | ||
|
|
6efd0522b6 | ||
|
|
e34b57a196 | ||
|
|
965475f63d | ||
|
|
c1750ac1c6 | ||
|
|
5eb0a4bea8 | ||
|
|
b0bfd16c0a | ||
|
|
52abacd4cc | ||
|
|
fe7084260d | ||
|
|
e3ce23c001 | ||
|
|
3da4c73d53 | ||
|
|
6d507e8134 | ||
|
|
89beec07e7 | ||
|
|
e4fb225b0c | ||
|
|
b1fed30bd7 | ||
|
|
7dbfb184bb | ||
|
|
cfbba67e41 | ||
|
|
465369e618 | ||
|
|
88e278e022 | ||
|
|
72f375aa04 | ||
|
|
4962beb193 | ||
|
|
f63fc4129e | ||
|
|
95ba7b16ed | ||
|
|
d506682d58 | ||
|
|
9364115b4b | ||
|
|
060006757c | ||
|
|
5b3a2eb12e | ||
|
|
ed3146de6f | ||
|
|
9bd813701e | ||
|
|
886a2f6e2c | ||
|
|
df4465959a | ||
|
|
9868cd799f | ||
|
|
2a85fdf1a5 | ||
|
|
cf1beb8c28 | ||
|
|
3c3988ab00 | ||
|
|
9d9fe3aa18 | ||
|
|
51718e3d5d | ||
|
|
997501cf74 | ||
|
|
504ae92510 | ||
|
|
86eecc9384 | ||
|
|
dd19a1f070 | ||
|
|
84b830753f | ||
|
|
d16452c11b | ||
|
|
48a19a4fe9 | ||
|
|
7938cdd285 | ||
|
|
9ef9d15443 | ||
|
|
ac20cbf266 | ||
|
|
85733757d9 | ||
|
|
b022578f17 | ||
|
|
fcd0dd8c50 | ||
|
|
7860bfdc3a | ||
|
|
56bdcf8884 | ||
|
|
ed916a40f4 | ||
|
|
e9838c14ac | ||
|
|
23597cb959 | ||
|
|
394e294b3f | ||
|
|
8af0e93b1d | ||
|
|
2eb710eb5a | ||
|
|
54ac2166e6 | ||
|
|
22773fa1aa | ||
|
|
be99080b4c | ||
|
|
827071ff01 | ||
|
|
cb3b00626a | ||
|
|
0e40e332c8 | ||
|
|
85b0fc9471 | ||
|
|
1d932be484 | ||
|
|
1be009209b | ||
|
|
fafb7dd643 | ||
|
|
d698b8f88c | ||
|
|
2389d4d1a1 | ||
|
|
5070049d36 | ||
|
|
7597109df2 | ||
|
|
ba019444bc | ||
|
|
51c6ce8637 | ||
|
|
6ff603edad | ||
|
|
3aa16e6191 | ||
|
|
dd62331de8 | ||
|
|
da2202de34 | ||
|
|
b105825671 | ||
|
|
5f351d6e61 | ||
|
|
ae01d94829 | ||
|
|
8611481a8a | ||
|
|
70b9e2b3d9 | ||
|
|
6d9cb5538e | ||
|
|
2b8ad20826 | ||
|
|
0fd222fb30 | ||
|
|
940963ffa2 | ||
|
|
6bcb03308c | ||
|
|
f515c53fc6 | ||
|
|
b06c885819 | ||
|
|
ab15461837 | ||
|
|
68284da1e3 | ||
|
|
ac75f3d785 | ||
|
|
601170ce37 | ||
|
|
4d8316a3b0 | ||
|
|
668a69e172 | ||
|
|
37b8d19ac3 | ||
|
|
238d80d64d | ||
|
|
a256eda9ed | ||
|
|
98a1ebbcca | ||
|
|
76e86a41ab | ||
|
|
2f2a211d36 | ||
|
|
971d46c6e9 | ||
|
|
af3cac2933 | ||
|
|
d2df9426f4 | ||
|
|
98c00e3e79 | ||
|
|
ae37740de7 | ||
|
|
1961148ebe | ||
|
|
b6e37899a8 | ||
|
|
8ed5004d8e | ||
|
|
4729ba2ddc | ||
|
|
b56eabdcb9 | ||
|
|
d2924b95c7 | ||
|
|
6a5b2d7b7f | ||
|
|
1738299e06 | ||
|
|
ccf8da70ac | ||
|
|
0d9c5d62b3 | ||
|
|
03261706cd | ||
|
|
8278e2cfca | ||
|
|
dcd89b3872 | ||
|
|
2cb4f54167 | ||
|
|
d0fa77bc48 | ||
|
|
1a46344d0b | ||
|
|
6c59ccf8c7 | ||
|
|
5b5ce84d83 | ||
|
|
68a0f9d253 | ||
|
|
0e64dd43e6 | ||
|
|
09dbac06f0 | ||
|
|
6367088625 | ||
|
|
8b57d50e1d | ||
|
|
f15753732c | ||
|
|
ed8ed5b3d5 | ||
|
|
9dc574f838 | ||
|
|
8de75f0abe | ||
|
|
bc25c29635 | ||
|
|
b5d653da69 | ||
|
|
6fe6b44302 | ||
|
|
dce1c681a3 | ||
|
|
dc7d55c11d | ||
|
|
804e58ac86 | ||
|
|
b79e70f6bb | ||
|
|
e5a47c0aa2 | ||
|
|
dcdf7acf7f | ||
|
|
ce83044b18 | ||
|
|
058d5a7597 | ||
|
|
caab3cbbc8 | ||
|
|
3d0acb5f0c | ||
|
|
b3cdf766af | ||
|
|
c641b7ded3 | ||
|
|
2cf979dd83 | ||
|
|
edbfd37291 | ||
|
|
2a1588155f | ||
|
|
c48de425c7 | ||
|
|
2be0ca7384 | ||
|
|
0e9e73c4f0 | ||
|
|
35d9e84981 | ||
|
|
1c8ce0f769 | ||
|
|
46ad7f6abb | ||
|
|
85cf0199e0 | ||
|
|
4db760b4e7 | ||
|
|
bf073fc7d3 | ||
|
|
1e919c29c7 | ||
|
|
7bf9bec72c | ||
|
|
04a24b785e | ||
|
|
d7f9cb1e54 | ||
|
|
710369577a | ||
|
|
399b5207ca | ||
|
|
ffe198aa7e | ||
|
|
7d0b1cbb1e | ||
|
|
9d23de58d1 | ||
|
|
d222c186a6 | ||
|
|
bad39b6d88 | ||
|
|
250ff86dc8 | ||
|
|
ee4caa4180 | ||
|
|
b985347c23 | ||
|
|
beb43e2633 | ||
|
|
e91a6ec49c | ||
|
|
d699aba26f | ||
|
|
81126c20cb | ||
|
|
beae8843b9 | ||
|
|
c545a3e028 | ||
|
|
7ea176d79c | ||
|
|
9b49597244 | ||
|
|
43979fb1be | ||
|
|
618c5eb27b | ||
|
|
bb837d2f9b | ||
|
|
a57c9b101a | ||
|
|
d330421d28 | ||
|
|
3f1d9b5735 | ||
|
|
6f4fc82149 | ||
|
|
9aa6d09bf7 | ||
|
|
2cce5eeb09 | ||
|
|
305bbdae7f | ||
|
|
651ac3cc0f | ||
|
|
2d6045a26f | ||
|
|
13d852a0b8 | ||
|
|
137ad5d03c | ||
|
|
77647421c5 | ||
|
|
fcd5ebc7e8 | ||
|
|
bba6a8f0c1 | ||
|
|
ee87d187d1 | ||
|
|
a414a591dd | ||
|
|
5878e45ae0 | ||
|
|
8f7e6e2dbc | ||
|
|
c3923fbb9a | ||
|
|
669f8c0c07 | ||
|
|
5a45de5e1e | ||
|
|
2df9d5bc2f | ||
|
|
03f5c33b53 | ||
|
|
0d30b869d8 | ||
|
|
cdc84acdcc | ||
|
|
8986cb2772 | ||
|
|
d589a82d08 | ||
|
|
f05c53c2c4 | ||
|
|
cc775b1620 | ||
|
|
06ebb170ba | ||
|
|
1ac2d6e0fb | ||
|
|
0cae888046 | ||
|
|
fdfb68c8d0 | ||
|
|
5d5b294b0a | ||
|
|
3f8a875497 | ||
|
|
fac63d4383 | ||
|
|
2efb6aaadb | ||
|
|
a937f6db30 | ||
|
|
1858dc1410 | ||
|
|
22069ff42b | ||
|
|
f81772a49e | ||
|
|
fb0f2d25d2 | ||
|
|
bb528d5ad6 | ||
|
|
e0c2cb48b3 | ||
|
|
14136f8b11 | ||
|
|
04c24f18d5 | ||
|
|
d985b31cbb | ||
|
|
cbe9ec530f | ||
|
|
6b581d22e1 | ||
|
|
4662363e86 | ||
|
|
d266ca965d | ||
|
|
cbc89a7ac6 | ||
|
|
96fefed37f | ||
|
|
51cce6e612 | ||
|
|
6bfae3acc1 | ||
|
|
f98d9a3301 | ||
|
|
fe6d9ab951 | ||
|
|
fff97d6d9a | ||
|
|
69ec45348a | ||
|
|
3af4cfc52d | ||
|
|
168a471fd0 | ||
|
|
55c3e78f4e | ||
|
|
98a93e1151 | ||
|
|
5e81fa6a1f | ||
|
|
3a564ed101 | ||
|
|
1efae9a41e | ||
|
|
c018a604f6 | ||
|
|
c3c3888762 | ||
|
|
9f4d3de442 | ||
|
|
a224f12cb7 | ||
|
|
81032560f7 | ||
|
|
fb066caea8 | ||
|
|
5fec999474 | ||
|
|
2bef5b4a87 | ||
|
|
3a8680e919 | ||
|
|
a4942a2f7a | ||
|
|
558255ae70 | ||
|
|
1f74caa7da | ||
|
|
f71541c93b | ||
|
|
2537a05c06 | ||
|
|
46f48e4e3d | ||
|
|
1916a7773f | ||
|
|
21feef3280 | ||
|
|
8cc1304542 | ||
|
|
039e73fcdb | ||
|
|
aba60a9274 | ||
|
|
03106eb2d3 | ||
|
|
ad01dfb670 | ||
|
|
1d8ccb8920 | ||
|
|
7d6055a786 | ||
|
|
f1e07974a0 | ||
|
|
ffeb9b5aff | ||
|
|
afcc2b03af | ||
|
|
175e58b2e3 | ||
|
|
b8c68c0e8c | ||
|
|
70eec9001a | ||
|
|
a1fd5f4e88 | ||
|
|
002679ac9f | ||
|
|
99002e4f9d | ||
|
|
5431668cb9 | ||
|
|
1d2eb2fbae | ||
|
|
f446cefee0 | ||
|
|
7c0985fc32 | ||
|
|
f8e81d2339 | ||
|
|
907350e9e8 | ||
|
|
dec93675ab | ||
|
|
bef5f56544 | ||
|
|
8e8f800071 | ||
|
|
76f792b552 | ||
|
|
909cef5198 | ||
|
|
56a1877989 | ||
|
|
9768f60a94 | ||
|
|
c5a69b620a | ||
|
|
9c55e2538c | ||
|
|
e20bd018c4 | ||
|
|
5a0bdd04e0 | ||
|
|
4a375e5b88 | ||
|
|
0df9dcb6aa | ||
|
|
a09f06d538 | ||
|
|
0395593a8a | ||
|
|
2daafe49c7 | ||
|
|
50ca450417 | ||
|
|
416d46947c | ||
|
|
471d63c91f |
2
.github/README.md
vendored
2
.github/README.md
vendored
@@ -24,7 +24,7 @@ can be found on the [releases page](https://github.com/JetBrains/JetBrainsRuntim
|
||||
|
||||
| IDE Version | Latest JBR | Date Released |
|
||||
|-------------|---------------------------------------------------------------------------------------------------------|---------------|
|
||||
| 2025.1 | [21.0.5-b792.48](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.5b792.48) | 20-Jan-2025 |
|
||||
| 2025.1 | [21.0.6-b872.80](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.6b872.80) | 03-Feb-2025 |
|
||||
| 2024.3 | [21.0.5-b631.28](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.5b631.28) | 26-Nov-2024 |
|
||||
| 2024.2 | [21.0.4-b509.30](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.5b509.30) | 26-Nov-2024 |
|
||||
| 2024.1 | [21.0.2-b346.3](https://github.com/JetBrains/JetBrainsRuntime/releases/tag/jbr-release-21.0.2b346.3) | 30-Jan-2024 |
|
||||
|
||||
11
Makefile
11
Makefile
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -24,8 +24,9 @@
|
||||
#
|
||||
|
||||
###
|
||||
### This file is just a very small wrapper needed to run the real make/Init.gmk.
|
||||
### It also performs some sanity checks on make.
|
||||
### This file is just a very small wrapper which will include make/PreInit.gmk,
|
||||
### where the real work is done. This wrapper also performs some sanity checks
|
||||
### on make that must be done before we can include another file.
|
||||
###
|
||||
|
||||
# The shell code below will be executed on /usr/bin/make on Solaris, but not in GNU Make.
|
||||
@@ -60,5 +61,5 @@ else
|
||||
endif
|
||||
topdir := $(strip $(patsubst %/, %, $(dir $(makefile_path))))
|
||||
|
||||
# ... and then we can include the real makefile
|
||||
include $(topdir)/make/Init.gmk
|
||||
# ... and then we can include the real makefile to bootstrap the build
|
||||
include $(topdir)/make/PreInit.gmk
|
||||
|
||||
@@ -127,7 +127,7 @@ if [ "$VERBOSE" = true ] ; then
|
||||
echo "Will generate IDEA project files in \"$IDEA_OUTPUT\" for project \"$TOPLEVEL_DIR\""
|
||||
fi
|
||||
|
||||
cd $TOP ; make idea-gen-config MAKEOVERRIDES= TOPLEVEL_DIR="$TOPLEVEL_DIR" \
|
||||
cd $TOP ; make idea-gen-config ALLOW=TOPLEVEL_DIR,IDEA_OUTPUT_PARENT,IDEA_OUTPUT,MODULES TOPLEVEL_DIR="$TOPLEVEL_DIR" \
|
||||
IDEA_OUTPUT_PARENT="$IDEA_OUTPUT_PARENT" IDEA_OUTPUT="$IDEA_OUTPUT" MODULES="$*" $CONF_ARG || exit 1
|
||||
cd $SCRIPT_DIR
|
||||
|
||||
|
||||
@@ -217,10 +217,10 @@ file as the first include line. Declarations needed by other files
|
||||
should be put in the .hpp file, and not in the .inline.hpp file. This
|
||||
rule exists to resolve problems with circular dependencies between
|
||||
.inline.hpp files.</p></li>
|
||||
<li><p>All .cpp files include precompiled.hpp as the first include
|
||||
line.</p></li>
|
||||
<li><p>precompiled.hpp is just a build time optimization, so don't rely
|
||||
on it to resolve include problems.</p></li>
|
||||
<li><p>Some build configurations use precompiled headers to speed up the
|
||||
build times. The precompiled headers are included in the precompiled.hpp
|
||||
file. Note that precompiled.hpp is just a build time optimization, so
|
||||
don't rely on it to resolve include problems.</p></li>
|
||||
<li><p>Keep the include lines alphabetically sorted.</p></li>
|
||||
<li><p>Put conditional inclusions (<code>#if ...</code>) at the end of
|
||||
the include list.</p></li>
|
||||
|
||||
@@ -150,10 +150,10 @@ the first include line. Declarations needed by other files should be put
|
||||
in the .hpp file, and not in the .inline.hpp file. This rule exists to
|
||||
resolve problems with circular dependencies between .inline.hpp files.
|
||||
|
||||
* All .cpp files include precompiled.hpp as the first include line.
|
||||
|
||||
* precompiled.hpp is just a build time optimization, so don't rely on
|
||||
it to resolve include problems.
|
||||
* Some build configurations use precompiled headers to speed up the
|
||||
build times. The precompiled headers are included in the precompiled.hpp
|
||||
file. Note that precompiled.hpp is just a build time optimization, so
|
||||
don't rely on it to resolve include problems.
|
||||
|
||||
* Keep the include lines alphabetically sorted.
|
||||
|
||||
|
||||
@@ -55,8 +55,10 @@ done
|
||||
|
||||
log "Signing jmod files"
|
||||
JMODS_DIR="$APPLICATION_PATH/Contents/Home/jmods"
|
||||
JMOD_EXE="$APPLICATION_PATH/Contents/Home/bin/jmod"
|
||||
JMOD_EXE="$BOOT_JDK/bin/jmod"
|
||||
if [ -d "$JMODS_DIR" ]; then
|
||||
log "processing jmods"
|
||||
|
||||
for jmod_file in "$JMODS_DIR"/*.jmod; do
|
||||
log "Processing $jmod_file"
|
||||
|
||||
@@ -64,16 +66,16 @@ if [ -d "$JMODS_DIR" ]; then
|
||||
rm -rf "$TMP_DIR"
|
||||
mkdir "$TMP_DIR"
|
||||
|
||||
log "Unzipping $jmod_file"
|
||||
log "Unzipping $jmod_file"
|
||||
$JMOD_EXE extract --dir "$TMP_DIR" "$jmod_file" >/dev/null
|
||||
log "Removing $jmod_file"
|
||||
rm -f "$jmod_file"
|
||||
|
||||
log "Signing dylibs in $TMP_DIR"
|
||||
find "$TMP_DIR" \
|
||||
-type f \( -name "*.dylib" -o -name "*.so"-o -perm +111 -o -name jarsigner -o -name jnativescan -o -name jdeps -o -name jpackageapplauncher -o -name jspawnhelper -o -name jar -o -name javap -o -name jdeprscan -o -name jfr -o -name rmiregistry -o -name java -o -name jhsdb -o -name jstatd -o -name jstatd -o -name jpackage -o -name keytool -o -name jmod -o -name jlink -o -name jimage -o -name jstack -o -name jcmd -o -name jps -o -name jmap -o -name jstat -o -name jinfo -o -name jshell -o -name jwebserver -o -name javac -o -name serialver -o -name jrunscript -o -name jdb -o -name jconsole -o -name javadoc \) \
|
||||
-exec sh -c '"$1" --timestamp -v -s "$2" --options=runtime --force --entitlements "$3" "$4" || exit 1' sh "$SIGN_UTILITY" "$JB_DEVELOPER_CERT" "$SCRIPT_DIR/entitlements.xml" {} \;
|
||||
|
||||
log "Removing $jmod_file"
|
||||
rm -f "$jmod_file"
|
||||
cmd="$JMOD_EXE create --class-path $TMP_DIR/classes"
|
||||
|
||||
# Check each directory and add to the command if it exists
|
||||
@@ -84,6 +86,8 @@ if [ -d "$JMODS_DIR" ]; then
|
||||
[ -d "$TMP_DIR/legal" ] && cmd="$cmd --legal-notices $TMP_DIR/legal"
|
||||
[ -d "$TMP_DIR/man" ] && cmd="$cmd --man-pages $TMP_DIR/man"
|
||||
|
||||
log "Creating jmod file"
|
||||
log "$cmd"
|
||||
# Add the output file
|
||||
cmd="$cmd $jmod_file"
|
||||
|
||||
@@ -93,6 +97,41 @@ if [ -d "$JMODS_DIR" ]; then
|
||||
log "Removing $TMP_DIR"
|
||||
rm -rf "$TMP_DIR"
|
||||
done
|
||||
|
||||
log "Repack java.base.jmod with new hashes of modules"
|
||||
hash_modules=$($JMOD_EXE describe $JMODS_DIR/java.base.jmod | grep hashes | awk '{print $2}' | tr '\n' '|' | sed s/\|$//) || exit $?
|
||||
|
||||
TMP_DIR="$JMODS_DIR/tmp"
|
||||
rm -rf "$TMP_DIR"
|
||||
mkdir "$TMP_DIR"
|
||||
|
||||
jmod_file="$JMODS_DIR/java.base.jmod"
|
||||
log "Unzipping $jmod_file"
|
||||
$JMOD_EXE extract --dir "$TMP_DIR" "$jmod_file" >/dev/null
|
||||
|
||||
log "Removing java.base.jmod"
|
||||
rm -f "$jmod_file"
|
||||
|
||||
cmd="$JMOD_EXE create --class-path $TMP_DIR/classes --hash-modules \"$hash_modules\" --module-path $JMODS_DIR"
|
||||
|
||||
# Check each directory and add to the command if it exists
|
||||
[ -d "$TMP_DIR/bin" ] && cmd="$cmd --cmds $TMP_DIR/bin"
|
||||
[ -d "$TMP_DIR/conf" ] && cmd="$cmd --config $TMP_DIR/conf"
|
||||
[ -d "$TMP_DIR/lib" ] && cmd="$cmd --libs $TMP_DIR/lib"
|
||||
[ -d "$TMP_DIR/include" ] && cmd="$cmd --header-files $TMP_DIR/include"
|
||||
[ -d "$TMP_DIR/legal" ] && cmd="$cmd --legal-notices $TMP_DIR/legal"
|
||||
[ -d "$TMP_DIR/man" ] && cmd="$cmd --man-pages $TMP_DIR/man"
|
||||
|
||||
log "Creating jmod file"
|
||||
log "$cmd"
|
||||
# Add the output file
|
||||
cmd="$cmd $jmod_file"
|
||||
|
||||
# Execute the command
|
||||
eval $cmd
|
||||
|
||||
log "Removing $TMP_DIR"
|
||||
rm -rf "$TMP_DIR"
|
||||
else
|
||||
echo "Directory '$JMODS_DIR' does not exist. Skipping signing of jmod files."
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -108,6 +108,7 @@ help:
|
||||
$(info $(_) MICRO="OPT1=x;OPT2=y" # Control the MICRO test harness, use 'make test-only MICRO=help' to list)
|
||||
$(info $(_) TEST_OPTS="OPT1=x;..." # Generic control of all test harnesses)
|
||||
$(info $(_) TEST_VM_OPTS="ARG ..." # Same as setting TEST_OPTS to VM_OPTIONS="ARG ...")
|
||||
$(info $(_) ALLOW="FOO,BAR" # Do not warn that FOO and BAR are non-control variables)
|
||||
$(info )
|
||||
$(if $(all_confs), $(info Available configurations in $(build_dir):) $(foreach var,$(all_confs),$(info * $(var))), \
|
||||
$(info No configurations were found in $(build_dir).) $(info Run 'bash configure' to create a configuration.))
|
||||
|
||||
312
make/Init.gmk
312
make/Init.gmk
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -24,9 +24,11 @@
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# This is the bootstrapping part of the build. This file is included from the
|
||||
# top level Makefile, and is responsible for launching the Main.gmk file with
|
||||
# the proper make and the proper make arguments.
|
||||
# Init.gmk sits between PreInit.gmk and Main.gmk when bootstrapping the build.
|
||||
# It is called from PreInit.gmk, and its main responsibility is to launch
|
||||
# Main.gmk with the proper make and the proper make arguments.
|
||||
# PreMain.gmk has provided us with a proper SPEC. This allows us to use the
|
||||
# value of $(MAKE) for all further make calls.
|
||||
################################################################################
|
||||
|
||||
# This must be the first rule
|
||||
@@ -37,249 +39,68 @@ default:
|
||||
# serially, regardless of -j.
|
||||
.NOTPARALLEL:
|
||||
|
||||
ifeq ($(HAS_SPEC), )
|
||||
##############################################################################
|
||||
# This is the default mode. We have not been recursively called with a SPEC.
|
||||
##############################################################################
|
||||
include $(SPEC)
|
||||
|
||||
# Include our helper functions.
|
||||
include $(topdir)/make/InitSupport.gmk
|
||||
include $(TOPDIR)/make/common/MakeBase.gmk
|
||||
|
||||
# Here are "global" targets, i.e. targets that can be executed without having
|
||||
# a configuration. This will define ALL_GLOBAL_TARGETS.
|
||||
include $(topdir)/make/Global.gmk
|
||||
# Our helper functions.
|
||||
include $(TOPDIR)/make/InitSupport.gmk
|
||||
include $(TOPDIR)/make/common/LogUtils.gmk
|
||||
|
||||
# Targets provided by Init.gmk.
|
||||
ALL_INIT_TARGETS := print-modules print-targets print-configuration \
|
||||
print-tests reconfigure pre-compare-build post-compare-build
|
||||
# Parse COMPARE_BUILD (for makefile development)
|
||||
$(eval $(call ParseCompareBuild))
|
||||
|
||||
# CALLED_TARGETS is the list of targets that the user provided,
|
||||
# or "default" if unspecified.
|
||||
CALLED_TARGETS := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), default)
|
||||
# Setup reproducible build environment
|
||||
$(eval $(call SetupReproducibleBuild))
|
||||
|
||||
# Extract non-global targets that require a spec file.
|
||||
CALLED_SPEC_TARGETS := $(filter-out $(ALL_GLOBAL_TARGETS), $(CALLED_TARGETS))
|
||||
|
||||
# If we have only global targets, or if we are called with -qp (assuming an
|
||||
# external part, e.g. bash completion, is trying to understand our targets),
|
||||
# we will skip SPEC location and the sanity checks.
|
||||
ifeq ($(CALLED_SPEC_TARGETS), )
|
||||
ONLY_GLOBAL_TARGETS := true
|
||||
endif
|
||||
ifeq ($(findstring p, $(MAKEFLAGS))$(findstring q, $(MAKEFLAGS)), pq)
|
||||
ONLY_GLOBAL_TARGETS := true
|
||||
endif
|
||||
|
||||
ifeq ($(ONLY_GLOBAL_TARGETS), true)
|
||||
############################################################################
|
||||
# We have only global targets, or are called with -pq.
|
||||
############################################################################
|
||||
|
||||
ifeq ($(wildcard $(SPEC)), )
|
||||
# If we have no SPEC provided, we will just make a "best effort" target list.
|
||||
# First try to grab any available pre-existing main-targets.gmk.
|
||||
main_targets_file := $(firstword $(wildcard $(build_dir)/*/make-support/main-targets.gmk))
|
||||
ifneq ($(main_targets_file), )
|
||||
# Extract the SPEC that corresponds to this main-targets.gmk file.
|
||||
SPEC := $(patsubst %/make-support/main-targets.gmk, %/spec.gmk, $(main_targets_file))
|
||||
else
|
||||
# None found, pick an arbitrary SPEC for which to generate a file
|
||||
SPEC := $(firstword $(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(wildcard $(SPEC)), )
|
||||
$(eval $(call DefineMainTargets, LAZY, $(SPEC)))
|
||||
else
|
||||
# If we have no configurations we can not provide any main targets.
|
||||
ALL_MAIN_TARGETS :=
|
||||
endif
|
||||
|
||||
ALL_TARGETS := $(sort $(ALL_GLOBAL_TARGETS) $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS))
|
||||
|
||||
# Just list all our targets.
|
||||
$(ALL_TARGETS):
|
||||
|
||||
.PHONY: $(ALL_TARGETS)
|
||||
|
||||
else
|
||||
############################################################################
|
||||
# This is the normal case, we have been called from the command line by the
|
||||
# user and we need to call ourself back with a proper SPEC.
|
||||
# We have at least one non-global target, so we need to find a spec file.
|
||||
############################################################################
|
||||
|
||||
# Basic checks on environment and command line.
|
||||
$(eval $(call CheckControlVariables))
|
||||
$(eval $(call CheckDeprecatedEnvironment))
|
||||
$(eval $(call CheckInvalidMakeFlags))
|
||||
|
||||
# Check that CONF_CHECK is valid.
|
||||
$(eval $(call ParseConfCheckOption))
|
||||
|
||||
# Check that the LOG given is valid, and set LOG_LEVEL, LOG_NOFILE, MAKE_LOG_VARS and MAKE_LOG_FLAGS.
|
||||
# If no LOG= was given on command line, but we have a non-standard default
|
||||
# value, use that instead and re-parse log level.
|
||||
ifeq ($(LOG), )
|
||||
ifneq ($(DEFAULT_LOG), )
|
||||
override LOG := $(DEFAULT_LOG)
|
||||
$(eval $(call ParseLogLevel))
|
||||
|
||||
# After this SPECS contain 1..N spec files (otherwise ParseConfAndSpec fails).
|
||||
$(eval $(call ParseConfAndSpec))
|
||||
|
||||
# Extract main targets from Main.gmk using the spec(s) provided. In theory,
|
||||
# with multiple specs, we should find the intersection of targets provided
|
||||
# by all specs, but we approximate this by an arbitrary spec from the list.
|
||||
# This will setup ALL_MAIN_TARGETS.
|
||||
$(eval $(call DefineMainTargets, FORCE, $(firstword $(SPECS))))
|
||||
|
||||
# Separate called targets depending on type.
|
||||
INIT_TARGETS := $(filter $(ALL_INIT_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
MAIN_TARGETS := $(filter $(ALL_MAIN_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
SEQUENTIAL_TARGETS := $(filter dist-clean clean%, $(MAIN_TARGETS))
|
||||
PARALLEL_TARGETS := $(filter-out $(SEQUENTIAL_TARGETS), $(MAIN_TARGETS))
|
||||
|
||||
# The spec files depend on the autoconf source code. This check makes sure
|
||||
# the configuration is up to date after changes to configure.
|
||||
$(SPECS): $(wildcard $(topdir)/make/autoconf/*) \
|
||||
$(if $(CUSTOM_CONFIG_DIR), $(wildcard $(CUSTOM_CONFIG_DIR)/*)) \
|
||||
$(addprefix $(topdir)/make/conf/, version-numbers.conf branding.conf) \
|
||||
$(if $(CUSTOM_CONF_DIR), $(wildcard $(addprefix $(CUSTOM_CONF_DIR)/, \
|
||||
version-numbers.conf branding.conf)))
|
||||
ifeq ($(CONF_CHECK), fail)
|
||||
@echo Error: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
$(call PrintConfCheckFailed)
|
||||
@exit 2
|
||||
else ifeq ($(CONF_CHECK), auto)
|
||||
@echo Note: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
@( cd $(topdir) && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$@ HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
reconfigure )
|
||||
else ifeq ($(CONF_CHECK), ignore)
|
||||
# Do nothing
|
||||
endif
|
||||
|
||||
# Do not let make delete spec files even if aborted while doing a reconfigure
|
||||
.PRECIOUS: $(SPECS)
|
||||
|
||||
# Unless reconfigure is explicitly called, let all main targets depend on
|
||||
# the spec files to be up to date.
|
||||
ifeq ($(findstring reconfigure, $(INIT_TARGETS)), )
|
||||
$(MAIN_TARGETS): $(SPECS)
|
||||
endif
|
||||
|
||||
make-info:
|
||||
ifneq ($(findstring $(LOG_LEVEL), info debug trace), )
|
||||
$(info Running make as '$(strip $(MAKE) $(MFLAGS) \
|
||||
$(COMMAND_LINE_VARIABLES) $(MAKECMDGOALS))')
|
||||
endif
|
||||
|
||||
MAKE_INIT_WITH_SPEC_ARGUMENTS := ACTUAL_TOPDIR=$(topdir) \
|
||||
USER_MAKE_VARS="$(USER_MAKE_VARS)" MAKE_LOG_FLAGS=$(MAKE_LOG_FLAGS) \
|
||||
$(MAKE_LOG_VARS) \
|
||||
INIT_TARGETS="$(INIT_TARGETS)" \
|
||||
SEQUENTIAL_TARGETS="$(SEQUENTIAL_TARGETS)" \
|
||||
PARALLEL_TARGETS="$(PARALLEL_TARGETS)"
|
||||
|
||||
# Now the init and main targets will be called, once for each SPEC. The
|
||||
# recipe will be run once for every target specified, but we only want to
|
||||
# execute the recipe a single time, hence the TARGET_DONE with a dummy
|
||||
# command if true.
|
||||
# The COMPARE_BUILD part implements special support for makefile development.
|
||||
$(ALL_INIT_TARGETS) $(ALL_MAIN_TARGETS): make-info
|
||||
@$(if $(TARGET_DONE), \
|
||||
true \
|
||||
, \
|
||||
( cd $(topdir) && \
|
||||
$(foreach spec, $(SPECS), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
main && \
|
||||
$(if $(and $(COMPARE_BUILD), $(PARALLEL_TARGETS)), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD)" pre-compare-build && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" main && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" post-compare-build && \
|
||||
) \
|
||||
) true ) \
|
||||
$(eval TARGET_DONE=true) \
|
||||
)
|
||||
|
||||
.PHONY: $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS)
|
||||
|
||||
endif # $(ONLY_GLOBAL_TARGETS)!=true
|
||||
|
||||
else # HAS_SPEC=true
|
||||
|
||||
##############################################################################
|
||||
# Now we have a spec. This part provides the "main" target that acts as a
|
||||
# trampoline to call the Main.gmk with the value of $(MAKE) found in the spec
|
||||
# file.
|
||||
##############################################################################
|
||||
|
||||
include $(SPEC)
|
||||
|
||||
# Our helper functions.
|
||||
include $(TOPDIR)/make/InitSupport.gmk
|
||||
|
||||
# Parse COMPARE_BUILD (for makefile development)
|
||||
$(eval $(call ParseCompareBuild))
|
||||
|
||||
# Setup reproducible build environment
|
||||
$(eval $(call SetupReproducibleBuild))
|
||||
|
||||
# If no LOG= was given on command line, but we have a non-standard default
|
||||
# value, use that instead and re-parse log level.
|
||||
ifeq ($(LOG), )
|
||||
ifneq ($(DEFAULT_LOG), )
|
||||
override LOG := $(DEFAULT_LOG)
|
||||
$(eval $(call ParseLogLevel))
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(LOG_NOFILE), true)
|
||||
# Disable build log if LOG=[level,]nofile was given
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
ifeq ($(LOG_NOFILE), true)
|
||||
# Disable build log if LOG=[level,]nofile was given
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
|
||||
ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean)
|
||||
# We can't have a log file if we're about to remove it.
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean)
|
||||
# We can't have a log file if we're about to remove it.
|
||||
override BUILD_LOG_PIPE :=
|
||||
override BUILD_LOG_PIPE_SIMPLE :=
|
||||
endif
|
||||
|
||||
ifeq ($(OUTPUT_SYNC_SUPPORTED), true)
|
||||
OUTPUT_SYNC_FLAG := -O$(OUTPUT_SYNC)
|
||||
endif
|
||||
ifeq ($(OUTPUT_SYNC_SUPPORTED), true)
|
||||
OUTPUT_SYNC_FLAG := -O$(OUTPUT_SYNC)
|
||||
endif
|
||||
|
||||
##############################################################################
|
||||
# Init targets
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
# Init targets. These are handled fully, here and now.
|
||||
##############################################################################
|
||||
|
||||
print-modules:
|
||||
print-modules:
|
||||
( cd $(TOPDIR) && \
|
||||
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
NO_RECIPES=true print-modules )
|
||||
|
||||
print-targets:
|
||||
print-targets:
|
||||
( cd $(TOPDIR) && \
|
||||
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
NO_RECIPES=true print-targets )
|
||||
|
||||
print-tests:
|
||||
print-tests:
|
||||
( cd $(TOPDIR) && \
|
||||
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
NO_RECIPES=true print-tests )
|
||||
|
||||
print-configuration:
|
||||
$(ECHO) $(CONFIGURE_COMMAND_LINE)
|
||||
print-configuration:
|
||||
$(ECHO) $(CONFIGURE_COMMAND_LINE)
|
||||
|
||||
reconfigure:
|
||||
reconfigure:
|
||||
ifneq ($(REAL_CONFIGURE_COMMAND_EXEC_FULL), )
|
||||
$(ECHO) "Re-running configure using original command line '$(REAL_CONFIGURE_COMMAND_EXEC_SHORT) $(REAL_CONFIGURE_COMMAND_LINE)'"
|
||||
$(eval RECONFIGURE_COMMAND := $(REAL_CONFIGURE_COMMAND_EXEC_FULL) $(REAL_CONFIGURE_COMMAND_LINE))
|
||||
@@ -295,25 +116,27 @@ else # HAS_SPEC=true
|
||||
CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
|
||||
$(RECONFIGURE_COMMAND) )
|
||||
|
||||
##############################################################################
|
||||
# The main target, for delegating into Main.gmk
|
||||
##############################################################################
|
||||
.PHONY: print-modules print-targets print-tests print-configuration reconfigure
|
||||
|
||||
MAIN_TARGETS := $(SEQUENTIAL_TARGETS) $(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE)
|
||||
# If building the default target, add what they are to the description.
|
||||
DESCRIPTION_TARGETS := $(strip $(MAIN_TARGETS))
|
||||
ifeq ($(DESCRIPTION_TARGETS), default)
|
||||
DESCRIPTION_TARGETS += ($(DEFAULT_MAKE_TARGET))
|
||||
endif
|
||||
TARGET_DESCRIPTION := target$(if $(word 2, $(MAIN_TARGETS)),s) \
|
||||
'$(strip $(DESCRIPTION_TARGETS))' in configuration '$(CONF_NAME)'
|
||||
##############################################################################
|
||||
# The main target. This will delegate all other targets into Main.gmk.
|
||||
##############################################################################
|
||||
|
||||
# MAKEOVERRIDES is automatically set and propagated by Make to sub-Make calls.
|
||||
# We need to clear it of the init-specific variables. The user-specified
|
||||
# variables are explicitly propagated using $(USER_MAKE_VARS).
|
||||
main: MAKEOVERRIDES :=
|
||||
MAIN_TARGETS := $(SEQUENTIAL_TARGETS) $(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE)
|
||||
# If building the default target, add what they are to the description.
|
||||
DESCRIPTION_TARGETS := $(strip $(MAIN_TARGETS))
|
||||
ifeq ($(DESCRIPTION_TARGETS), default)
|
||||
DESCRIPTION_TARGETS += ($(DEFAULT_MAKE_TARGET))
|
||||
endif
|
||||
TARGET_DESCRIPTION := target$(if $(word 2, $(MAIN_TARGETS)),s) \
|
||||
'$(strip $(DESCRIPTION_TARGETS))' in configuration '$(CONF_NAME)'
|
||||
|
||||
main: $(INIT_TARGETS)
|
||||
# MAKEOVERRIDES is automatically set and propagated by Make to sub-Make calls.
|
||||
# We need to clear it of the init-specific variables. The user-specified
|
||||
# variables are explicitly propagated using $(USER_MAKE_VARS).
|
||||
main: MAKEOVERRIDES :=
|
||||
|
||||
main: $(INIT_TARGETS)
|
||||
ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), )
|
||||
$(call RotateLogFiles)
|
||||
$(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE)
|
||||
@@ -333,7 +156,7 @@ else # HAS_SPEC=true
|
||||
# treat it as NOT using jobs at all.
|
||||
( cd $(TOPDIR) && \
|
||||
$(NICE) $(MAKE) $(MAKE_ARGS) $(OUTPUT_SYNC_FLAG) \
|
||||
$(if $(JOBS), -j $(JOBS)) \
|
||||
$(if $(JOBS), -j $(JOBS)) \
|
||||
-f make/Main.gmk $(USER_MAKE_VARS) \
|
||||
$(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \
|
||||
( exitcode=$$? && \
|
||||
@@ -353,7 +176,7 @@ else # HAS_SPEC=true
|
||||
$(call ReportProfileTimes)
|
||||
endif
|
||||
|
||||
on-failure:
|
||||
on-failure:
|
||||
$(call CleanupJavacServer)
|
||||
$(call StopGlobalTimer)
|
||||
$(call ReportBuildTimes)
|
||||
@@ -365,15 +188,14 @@ else # HAS_SPEC=true
|
||||
$(call CleanupCompareBuild)
|
||||
endif
|
||||
|
||||
# Support targets for COMPARE_BUILD, used for makefile development
|
||||
pre-compare-build:
|
||||
# Support targets for COMPARE_BUILD, used for makefile development
|
||||
pre-compare-build:
|
||||
$(call WaitForJavacServerFinish)
|
||||
$(call PrepareCompareBuild)
|
||||
|
||||
post-compare-build:
|
||||
post-compare-build:
|
||||
$(call WaitForJavacServerFinish)
|
||||
$(call CleanupCompareBuild)
|
||||
$(call CompareBuildDoComparison)
|
||||
|
||||
.PHONY: print-targets print-modules reconfigure main on-failure
|
||||
endif
|
||||
.PHONY: main on-failure pre-compare-build post-compare-build
|
||||
|
||||
@@ -25,389 +25,108 @@
|
||||
|
||||
################################################################################
|
||||
# This file contains helper functions for Init.gmk.
|
||||
# It is divided in two parts, depending on if a SPEC is present or not
|
||||
# (HAS_SPEC is true or not).
|
||||
################################################################################
|
||||
|
||||
ifndef _INITSUPPORT_GMK
|
||||
_INITSUPPORT_GMK := 1
|
||||
# Define basic logging setup
|
||||
BUILD_LOG := $(OUTPUTDIR)/build.log
|
||||
BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log
|
||||
|
||||
ifeq ($(HAS_SPEC), )
|
||||
BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait
|
||||
# Use this for simple echo/printf commands that are never expected to print
|
||||
# to stderr.
|
||||
BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG)
|
||||
|
||||
# COMMA is defined in spec.gmk, but that is not included yet
|
||||
COMMA := ,
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
topdir = $(CUSTOM_ROOT)
|
||||
else
|
||||
topdir = $(TOPDIR)
|
||||
endif
|
||||
|
||||
# Include the corresponding closed file, if present.
|
||||
ifneq ($(CUSTOM_MAKE_DIR), )
|
||||
-include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
|
||||
# Setup the build environment to match the requested specification on
|
||||
# level of reproducible builds
|
||||
define SetupReproducibleBuild
|
||||
ifeq ($$(SOURCE_DATE), updated)
|
||||
# For static values of SOURCE_DATE (not "updated"), these are set in spec.gmk
|
||||
export SOURCE_DATE_EPOCH := $$(shell $$(DATE) +"%s")
|
||||
export SOURCE_DATE_ISO_8601 := $$(call EpochToISO8601, $$(SOURCE_DATE_EPOCH))
|
||||
endif
|
||||
endef
|
||||
|
||||
##############################################################################
|
||||
# Helper functions for the initial part of Init.gmk, before the spec file is
|
||||
# loaded. Most of these functions provide parsing and setting up make options
|
||||
# from the command-line.
|
||||
##############################################################################
|
||||
# Parse COMPARE_BUILD into COMPARE_BUILD_*
|
||||
# Syntax: COMPARE_BUILD=CONF=<configure options>:PATCH=<patch file>:
|
||||
# MAKE=<make targets>:COMP_OPTS=<compare script options>:
|
||||
# COMP_DIR=<compare script base dir>|<default>:
|
||||
# FAIL=<bool>
|
||||
# If neither CONF or PATCH is given, assume <default> means CONF if it
|
||||
# begins with "--", otherwise assume it means PATCH.
|
||||
# MAKE and COMP_OPTS can only be used with CONF and/or PATCH specified.
|
||||
# If any value contains "+", it will be replaced by space.
|
||||
# FAIL can be set to false to have the return value of compare be ignored.
|
||||
define ParseCompareBuild
|
||||
ifneq ($$(COMPARE_BUILD), )
|
||||
COMPARE_BUILD_OUTPUTDIR := $(topdir)/build/compare-build/$(CONF_NAME)
|
||||
COMPARE_BUILD_FAIL := true
|
||||
|
||||
# Make control variables, handled by Init.gmk
|
||||
INIT_CONTROL_VARIABLES += LOG CONF CONF_NAME SPEC JOBS TEST_JOBS CONF_CHECK \
|
||||
COMPARE_BUILD JTREG GTEST MICRO TEST_OPTS TEST_VM_OPTS TEST_DEPS
|
||||
|
||||
# All known make control variables
|
||||
MAKE_CONTROL_VARIABLES := $(INIT_CONTROL_VARIABLES) TEST JDK_FILTER SPEC_FILTER
|
||||
|
||||
# Define a simple reverse function.
|
||||
# Should maybe move to MakeBase.gmk, but we can't include that file now.
|
||||
reverse = \
|
||||
$(if $(strip $(1)), $(call reverse, $(wordlist 2, $(words $(1)), $(1)))) \
|
||||
$(firstword $(1))
|
||||
|
||||
# The variable MAKEOVERRIDES contains variable assignments from the command
|
||||
# line, but in reverse order to what the user entered.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
COMMAND_LINE_VARIABLES := $(subst §,\ , $(call reverse, $(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# A list like FOO="val1" BAR="val2" containing all user-supplied make
|
||||
# variables that we should propagate.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
USER_MAKE_VARS := $(subst §,\ , $(filter-out $(addsuffix =%, $(INIT_CONTROL_VARIABLES)), \
|
||||
$(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# Setup information about available configurations, if any.
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
build_dir = $(CUSTOM_ROOT)/build
|
||||
else
|
||||
build_dir = $(topdir)/build
|
||||
endif
|
||||
all_spec_files = $(wildcard $(build_dir)/*/spec.gmk)
|
||||
# Extract the configuration names from the path
|
||||
all_confs = $(patsubst %/spec.gmk, %, $(patsubst $(build_dir)/%, %, $(all_spec_files)))
|
||||
|
||||
# Check for unknown command-line variables
|
||||
define CheckControlVariables
|
||||
command_line_variables := $$(strip $$(foreach var, \
|
||||
$$(subst \ ,_,$$(MAKEOVERRIDES)), \
|
||||
$$(firstword $$(subst =, , $$(var)))))
|
||||
unknown_command_line_variables := $$(strip \
|
||||
$$(filter-out $$(MAKE_CONTROL_VARIABLES), $$(command_line_variables)))
|
||||
ifneq ($$(unknown_command_line_variables), )
|
||||
$$(info Note: Command line contains non-control variables:)
|
||||
$$(foreach var, $$(unknown_command_line_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info Make sure it is not mistyped, and that you intend to override this variable.)
|
||||
$$(info 'make help' will list known control variables.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for deprecated ALT_ variables
|
||||
define CheckDeprecatedEnvironment
|
||||
defined_alt_variables := $$(filter ALT_%, $$(.VARIABLES))
|
||||
ifneq ($$(defined_alt_variables), )
|
||||
$$(info Warning: You have the following ALT_ variables set:)
|
||||
$$(foreach var, $$(defined_alt_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info ALT_ variables are deprecated, and may result in a failed build.)
|
||||
$$(info Please clean your environment.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for invalid make flags like -j
|
||||
define CheckInvalidMakeFlags
|
||||
# This is a trick to get this rule to execute before any other rules
|
||||
# MAKEFLAGS only indicate -j if read in a recipe (!)
|
||||
$$(topdir)/make/Init.gmk: .FORCE
|
||||
$$(if $$(findstring --jobserver, $$(MAKEFLAGS)), \
|
||||
$$(info Error: 'make -jN' is not supported, use 'make JOBS=N') \
|
||||
$$(error Cannot continue) \
|
||||
)
|
||||
.FORCE:
|
||||
.PHONY: .FORCE
|
||||
endef
|
||||
|
||||
# Check that the CONF_CHECK option is valid and set up handling
|
||||
define ParseConfCheckOption
|
||||
ifeq ($$(CONF_CHECK), )
|
||||
# Default behavior is fail
|
||||
CONF_CHECK := fail
|
||||
else ifneq ($$(filter-out auto fail ignore, $$(CONF_CHECK)), )
|
||||
$$(info Error: CONF_CHECK must be one of: auto, fail or ignore.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
define ParseConfAndSpec
|
||||
ifneq ($$(origin SPEC), undefined)
|
||||
# We have been given a SPEC, check that it works out properly
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
# We also have a CONF_NAME argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF_NAME=$$(CONF_NAME) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(wildcard $$(SPEC)), )
|
||||
$$(info Error: Cannot locate spec.gmk, given by SPEC=$$(SPEC).)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(filter /%, $$(SPEC)), )
|
||||
# If given with relative path, make it absolute
|
||||
SPECS := $$(CURDIR)/$$(strip $$(SPEC))
|
||||
else
|
||||
SPECS := $$(SPEC)
|
||||
endif
|
||||
|
||||
# For now, unset this SPEC variable.
|
||||
override SPEC :=
|
||||
ifneq ($$(findstring :, $$(COMPARE_BUILD)), )
|
||||
$$(foreach part, $$(subst :, , $$(COMPARE_BUILD)), \
|
||||
$$(if $$(filter PATCH=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(part)))) \
|
||||
) \
|
||||
$$(if $$(filter CONF=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter MAKE=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_MAKE = $$(strip $$(subst +, , $$(patsubst MAKE=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_OPTS=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_OPTS = $$(strip $$(subst +, , $$(patsubst COMP_OPTS=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_DIR=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_DIR = $$(strip $$(subst +, , $$(patsubst COMP_DIR=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter FAIL=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_FAIL = $$(strip $$(subst +, , $$(patsubst FAIL=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter NODRYRUN=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_NODRYRUN = $$(strip $$(subst +, , $$(patsubst NODRYRUN=%, %, $$(part))))) \
|
||||
) \
|
||||
)
|
||||
else
|
||||
# Use spec.gmk files in the build output directory
|
||||
ifeq ($$(all_spec_files), )
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
$$(info Error: No configurations found for $$(CUSTOM_ROOT).)
|
||||
else
|
||||
$$(info Error: No configurations found for $$(topdir).)
|
||||
endif
|
||||
$$(info Please run 'bash configure' to create a configuration.)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and CONF_NAME=$$(CONF_NAME) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
matching_conf := $$(strip $$(filter $$(CONF_NAME), $$(all_confs)))
|
||||
ifeq ($$(matching_conf), )
|
||||
$$(info Error: No configurations found matching CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else ifneq ($$(words $$(matching_conf)), 1)
|
||||
$$(info Error: Matching more than one configuration CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
$$(info Building configuration '$$(matching_conf)' (matching CONF_NAME=$$(CONF_NAME)))
|
||||
endif
|
||||
# Create a SPEC definition. This will contain the path to exactly one spec file.
|
||||
SPECS := $$(build_dir)/$$(matching_conf)/spec.gmk
|
||||
else ifneq ($$(origin CONF), undefined)
|
||||
# User have given a CONF= argument.
|
||||
ifeq ($$(CONF), )
|
||||
# If given CONF=, match all configurations
|
||||
matching_confs := $$(strip $$(all_confs))
|
||||
else
|
||||
# Otherwise select those that contain the given CONF string
|
||||
ifeq ($$(patsubst !%,,$$(CONF)), )
|
||||
# A CONF starting with ! means we should negate the search term
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(subst !,,$$(CONF)), $$(var)), ,$$(var))))
|
||||
else
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(CONF), $$(var)), $$(var))))
|
||||
endif
|
||||
ifneq ($$(filter $$(CONF), $$(matching_confs)), )
|
||||
ifneq ($$(word 2, $$(matching_confs)), )
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
$$(info Using exact match for CONF=$$(CONF) (other matches are possible))
|
||||
endif
|
||||
endif
|
||||
# If we found an exact match, use that
|
||||
matching_confs := $$(CONF)
|
||||
endif
|
||||
endif
|
||||
ifeq ($$(matching_confs), )
|
||||
$$(info Error: No configurations found matching CONF=$$(CONF).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
ifeq ($$(words $$(matching_confs)), 1)
|
||||
ifneq ($$(findstring $$(LOG_LEVEL), info debug trace), )
|
||||
$$(info Building configuration '$$(matching_confs)' (matching CONF=$$(CONF)))
|
||||
endif
|
||||
else
|
||||
$$(info Building these configurations (matching CONF=$$(CONF)):)
|
||||
$$(foreach var, $$(matching_confs), $$(info * $$(var)))
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a SPEC definition. This will contain the path to one or more spec.gmk files.
|
||||
SPECS := $$(addsuffix /spec.gmk, $$(addprefix $$(build_dir)/, $$(matching_confs)))
|
||||
# Separate handling for single field case, to allow for spaces in values.
|
||||
ifneq ($$(filter PATCH=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(COMPARE_BUILD)))
|
||||
else ifneq ($$(filter CONF=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(COMPARE_BUILD))))
|
||||
else ifneq ($$(filter --%, $$(COMPARE_BUILD)), )
|
||||
# Assume CONF if value begins with --
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(COMPARE_BUILD)))
|
||||
else
|
||||
# No CONF or SPEC given, check the available configurations
|
||||
ifneq ($$(words $$(all_spec_files)), 1)
|
||||
$$(info Error: No CONF given, but more than one configuration found.)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(info Please retry building with CONF=<config pattern> (or SPEC=<spec file>).)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
# Otherwise assume patch file
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(COMPARE_BUILD))
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_PATCH), )
|
||||
ifneq ($$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH)), )
|
||||
# Assume relative path, if file exists
|
||||
COMPARE_BUILD_PATCH := $$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH))
|
||||
else ifeq ($$(wildcard $$(COMPARE_BUILD_PATCH)), )
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not exist)
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_NODRYRUN), true)
|
||||
PATCH_DRY_RUN := $$(shell cd $$(topdir) && $$(PATCH) --dry-run -p1 < $$(COMPARE_BUILD_PATCH) > /dev/null 2>&1 || $$(ECHO) FAILED)
|
||||
ifeq ($$(PATCH_DRY_RUN), FAILED)
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not apply cleanly)
|
||||
endif
|
||||
|
||||
# We found exactly one configuration, use it
|
||||
SPECS := $$(strip $$(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
endef
|
||||
|
||||
# Extract main targets from Main.gmk using the spec provided in $2.
|
||||
#
|
||||
# Param 1: FORCE = force generation of main-targets.gmk or LAZY = do not force.
|
||||
# Param 2: The SPEC file to use.
|
||||
define DefineMainTargets
|
||||
|
||||
# We will start by making sure the main-targets.gmk file is removed, if
|
||||
# make has not been restarted. By the -include, we will trigger the
|
||||
# rule for generating the file (which is never there since we removed it),
|
||||
# thus generating it fresh, and make will restart, incrementing the restart
|
||||
# count.
|
||||
main_targets_file := $$(dir $(strip $2))make-support/main-targets.gmk
|
||||
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
# Only do this if make has not been restarted, and if we do not force it.
|
||||
ifeq ($(strip $1), FORCE)
|
||||
$$(shell rm -f $$(main_targets_file))
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_FAIL), true)
|
||||
COMPARE_BUILD_IGNORE_RESULT := || true
|
||||
endif
|
||||
|
||||
$$(main_targets_file):
|
||||
@( cd $$(topdir) && \
|
||||
$$(MAKE) $$(MAKE_LOG_FLAGS) -r -R -f $$(topdir)/make/Main.gmk \
|
||||
-I $$(topdir)/make/common SPEC=$(strip $2) NO_RECIPES=true \
|
||||
$$(MAKE_LOG_VARS) \
|
||||
create-main-targets-include )
|
||||
|
||||
# Now include main-targets.gmk. This will define ALL_MAIN_TARGETS.
|
||||
-include $$(main_targets_file)
|
||||
endef
|
||||
|
||||
define PrintConfCheckFailed
|
||||
@echo ' '
|
||||
@echo "Please rerun configure! Easiest way to do this is by running"
|
||||
@echo "'make reconfigure'."
|
||||
@echo "This behavior may also be changed using CONF_CHECK=<ignore|auto>."
|
||||
@echo ' '
|
||||
endef
|
||||
|
||||
else # $(HAS_SPEC)=true
|
||||
##############################################################################
|
||||
# Helper functions for the 'main' target. These functions assume a single,
|
||||
# proper and existing SPEC is included.
|
||||
##############################################################################
|
||||
|
||||
include $(TOPDIR)/make/common/MakeBase.gmk
|
||||
|
||||
# Define basic logging setup
|
||||
BUILD_LOG := $(OUTPUTDIR)/build.log
|
||||
BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log
|
||||
|
||||
BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait
|
||||
# Use this for simple echo/printf commands that are never expected to print
|
||||
# to stderr.
|
||||
BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG)
|
||||
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
topdir = $(CUSTOM_ROOT)
|
||||
else
|
||||
topdir = $(TOPDIR)
|
||||
endif
|
||||
endef
|
||||
|
||||
# Setup the build environment to match the requested specification on
|
||||
# level of reproducible builds
|
||||
define SetupReproducibleBuild
|
||||
ifeq ($$(SOURCE_DATE), updated)
|
||||
# For static values of SOURCE_DATE (not "updated"), these are set in spec.gmk
|
||||
export SOURCE_DATE_EPOCH := $$(shell $$(DATE) +"%s")
|
||||
export SOURCE_DATE_ISO_8601 := $$(call EpochToISO8601, $$(SOURCE_DATE_EPOCH))
|
||||
endif
|
||||
endef
|
||||
|
||||
# Parse COMPARE_BUILD into COMPARE_BUILD_*
|
||||
# Syntax: COMPARE_BUILD=CONF=<configure options>:PATCH=<patch file>:
|
||||
# MAKE=<make targets>:COMP_OPTS=<compare script options>:
|
||||
# COMP_DIR=<compare script base dir>|<default>:
|
||||
# FAIL=<bool>
|
||||
# If neither CONF or PATCH is given, assume <default> means CONF if it
|
||||
# begins with "--", otherwise assume it means PATCH.
|
||||
# MAKE and COMP_OPTS can only be used with CONF and/or PATCH specified.
|
||||
# If any value contains "+", it will be replaced by space.
|
||||
# FAIL can be set to false to have the return value of compare be ignored.
|
||||
define ParseCompareBuild
|
||||
ifneq ($$(COMPARE_BUILD), )
|
||||
COMPARE_BUILD_OUTPUTDIR := $(topdir)/build/compare-build/$(CONF_NAME)
|
||||
COMPARE_BUILD_FAIL := true
|
||||
|
||||
ifneq ($$(findstring :, $$(COMPARE_BUILD)), )
|
||||
$$(foreach part, $$(subst :, , $$(COMPARE_BUILD)), \
|
||||
$$(if $$(filter PATCH=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(part)))) \
|
||||
) \
|
||||
$$(if $$(filter CONF=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter MAKE=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_MAKE = $$(strip $$(subst +, , $$(patsubst MAKE=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_OPTS=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_OPTS = $$(strip $$(subst +, , $$(patsubst COMP_OPTS=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter COMP_DIR=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_COMP_DIR = $$(strip $$(subst +, , $$(patsubst COMP_DIR=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter FAIL=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_FAIL = $$(strip $$(subst +, , $$(patsubst FAIL=%, %, $$(part))))) \
|
||||
) \
|
||||
$$(if $$(filter NODRYRUN=%, $$(part)), \
|
||||
$$(eval COMPARE_BUILD_NODRYRUN = $$(strip $$(subst +, , $$(patsubst NODRYRUN=%, %, $$(part))))) \
|
||||
) \
|
||||
)
|
||||
else
|
||||
# Separate handling for single field case, to allow for spaces in values.
|
||||
ifneq ($$(filter PATCH=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(patsubst PATCH=%, %, $$(COMPARE_BUILD)))
|
||||
else ifneq ($$(filter CONF=%, $$(COMPARE_BUILD)), )
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(patsubst CONF=%, %, $$(COMPARE_BUILD))))
|
||||
else ifneq ($$(filter --%, $$(COMPARE_BUILD)), )
|
||||
# Assume CONF if value begins with --
|
||||
COMPARE_BUILD_CONF = $$(strip $$(subst +, , $$(COMPARE_BUILD)))
|
||||
else
|
||||
# Otherwise assume patch file
|
||||
COMPARE_BUILD_PATCH = $$(strip $$(COMPARE_BUILD))
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_PATCH), )
|
||||
ifneq ($$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH)), )
|
||||
# Assume relative path, if file exists
|
||||
COMPARE_BUILD_PATCH := $$(wildcard $$(topdir)/$$(COMPARE_BUILD_PATCH))
|
||||
else ifeq ($$(wildcard $$(COMPARE_BUILD_PATCH)), )
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not exist)
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_NODRYRUN), true)
|
||||
PATCH_DRY_RUN := $$(shell cd $$(topdir) && $$(PATCH) --dry-run -p1 < $$(COMPARE_BUILD_PATCH) > /dev/null 2>&1 || $$(ECHO) FAILED)
|
||||
ifeq ($$(PATCH_DRY_RUN), FAILED)
|
||||
$$(error Patch file $$(COMPARE_BUILD_PATCH) does not apply cleanly)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
ifneq ($$(COMPARE_BUILD_FAIL), true)
|
||||
COMPARE_BUILD_IGNORE_RESULT := || true
|
||||
endif
|
||||
endif
|
||||
endef
|
||||
|
||||
# Prepare for a comparison rebuild
|
||||
define PrepareCompareBuild
|
||||
# Prepare for a comparison rebuild
|
||||
define PrepareCompareBuild
|
||||
$(ECHO) "Preparing for comparison rebuild"
|
||||
# Apply patch, if any
|
||||
$(if $(COMPARE_BUILD_PATCH), cd $(topdir) && $(PATCH) -p1 < $(COMPARE_BUILD_PATCH))
|
||||
@@ -425,10 +144,10 @@ else # $(HAS_SPEC)=true
|
||||
# must be done after patching.
|
||||
( cd $(CONFIGURE_START_DIR) && PATH="$(ORIGINAL_PATH)" \
|
||||
$(BASH) $(topdir)/configure $(CONFIGURE_COMMAND_LINE) $(COMPARE_BUILD_CONF))
|
||||
endef
|
||||
endef
|
||||
|
||||
# Cleanup after a compare build
|
||||
define CleanupCompareBuild
|
||||
# Cleanup after a compare build
|
||||
define CleanupCompareBuild
|
||||
# If running with a COMPARE_BUILD patch, reverse-apply it, but continue
|
||||
# even if that fails (can happen with removed files).
|
||||
$(if $(COMPARE_BUILD_PATCH), cd $(topdir) && $(PATCH) -R -p1 < $(COMPARE_BUILD_PATCH) || true)
|
||||
@@ -437,10 +156,10 @@ else # $(HAS_SPEC)=true
|
||||
$(MV) $(OUTPUTDIR) $(COMPARE_BUILD_OUTPUTDIR)
|
||||
$(MV) $(topdir)/build/.compare-build-temp/$(CONF_NAME) $(OUTPUTDIR)
|
||||
$(RM) -r $(topdir)/build/.compare-build-temp
|
||||
endef
|
||||
endef
|
||||
|
||||
# Do the actual comparison of two builds
|
||||
define CompareBuildDoComparison
|
||||
# Do the actual comparison of two builds
|
||||
define CompareBuildDoComparison
|
||||
# Compare first and second build. Ignore any error code from compare.sh.
|
||||
$(ECHO) "Comparing between comparison rebuild (this/new) and baseline (other/old)"
|
||||
$(if $(COMPARE_BUILD_COMP_DIR), \
|
||||
@@ -450,9 +169,9 @@ else # $(HAS_SPEC)=true
|
||||
+(cd $(COMPARE_BUILD_OUTPUTDIR) && ./compare.sh --diffs $(COMPARE_BUILD_COMP_OPTS) \
|
||||
-o $(OUTPUTDIR) $(COMPARE_BUILD_IGNORE_RESULT)) \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define PrintFailureReports
|
||||
define PrintFailureReports
|
||||
$(if $(filter none, $(LOG_REPORT)), , \
|
||||
$(RM) $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log ; \
|
||||
$(if $(wildcard $(MAKESUPPORT_OUTPUTDIR)/failure-logs/*.log), \
|
||||
@@ -474,9 +193,9 @@ else # $(HAS_SPEC)=true
|
||||
) >> $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log \
|
||||
) \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define PrintBuildLogFailures
|
||||
define PrintBuildLogFailures
|
||||
$(if $(filter none, $(LOG_REPORT)), , \
|
||||
if $(GREP) -q "recipe for target .* failed" $(BUILD_LOG) 2> /dev/null; then \
|
||||
$(PRINTF) "\n=== Make failed targets repeated here ===\n" ; \
|
||||
@@ -489,96 +208,96 @@ else # $(HAS_SPEC)=true
|
||||
fi >> $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log ; \
|
||||
$(CAT) $(MAKESUPPORT_OUTPUTDIR)/failure-summary.log \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define RotateLogFiles
|
||||
define RotateLogFiles
|
||||
$(RM) $(BUILD_LOG).old 2> /dev/null && \
|
||||
$(MV) $(BUILD_LOG) $(BUILD_LOG).old 2> /dev/null || true
|
||||
$(if $(findstring true, $(LOG_PROFILE_TIMES_FILE)), \
|
||||
$(RM) $(BUILD_PROFILE_LOG).old 2> /dev/null && \
|
||||
$(MV) $(BUILD_PROFILE_LOG) $(BUILD_PROFILE_LOG).old 2> /dev/null || true \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
# Failure logs are only supported for "parallel" main targets, not the
|
||||
# (trivial) sequential make targets (such as clean and reconfigure),
|
||||
# since the failure-logs directory creation will conflict with clean.
|
||||
# We also make sure the javatmp directory exists, which is needed if a java
|
||||
# process (like javac) is using java.io.tmpdir.
|
||||
define PrepareFailureLogs
|
||||
# Failure logs are only supported for "parallel" main targets, not the
|
||||
# (trivial) sequential make targets (such as clean and reconfigure),
|
||||
# since the failure-logs directory creation will conflict with clean.
|
||||
# We also make sure the javatmp directory exists, which is needed if a java
|
||||
# process (like javac) is using java.io.tmpdir.
|
||||
define PrepareFailureLogs
|
||||
$(RM) -r $(MAKESUPPORT_OUTPUTDIR)/failure-logs 2> /dev/null && \
|
||||
$(MKDIR) -p $(MAKESUPPORT_OUTPUTDIR)/failure-logs
|
||||
$(MKDIR) -p $(JAVA_TMP_DIR)
|
||||
$(RM) $(MAKESUPPORT_OUTPUTDIR)/exit-with-error 2> /dev/null
|
||||
endef
|
||||
endef
|
||||
|
||||
# Remove any javac server logs and port files. This
|
||||
# prevents a new make run to reuse the previous servers.
|
||||
define PrepareJavacServer
|
||||
# Remove any javac server logs and port files. This
|
||||
# prevents a new make run to reuse the previous servers.
|
||||
define PrepareJavacServer
|
||||
$(if $(JAVAC_SERVER_DIR), \
|
||||
$(RM) -r $(JAVAC_SERVER_DIR) 2> /dev/null && \
|
||||
$(MKDIR) -p $(JAVAC_SERVER_DIR) \
|
||||
)
|
||||
endef
|
||||
endef
|
||||
|
||||
define CleanupJavacServer
|
||||
define CleanupJavacServer
|
||||
[ -f $(JAVAC_SERVER_DIR)/server.port ] && $(ECHO) Stopping javac server && \
|
||||
$(TOUCH) $(JAVAC_SERVER_DIR)/server.port.stop; true
|
||||
endef
|
||||
endef
|
||||
|
||||
ifeq ($(call isBuildOs, windows), true)
|
||||
# On windows we need to synchronize with the javac server to be able to
|
||||
# move or remove the build output directory. Since we have no proper
|
||||
# synchronization process, wait for a while and hope it helps. This is only
|
||||
# used by build comparisons.
|
||||
ifeq ($(call isBuildOs, windows), true)
|
||||
# On windows we need to synchronize with the javac server to be able to
|
||||
# move or remove the build output directory. Since we have no proper
|
||||
# synchronization process, wait for a while and hope it helps. This is only
|
||||
# used by build comparisons.
|
||||
define WaitForJavacServerFinish
|
||||
$(if $(JAVAC_SERVER_DIR), \
|
||||
sleep 5 \
|
||||
)
|
||||
endef
|
||||
else
|
||||
define WaitForJavacServerFinish
|
||||
endef
|
||||
endif
|
||||
endef
|
||||
else
|
||||
define WaitForJavacServerFinish
|
||||
endef
|
||||
endif
|
||||
|
||||
##############################################################################
|
||||
# Functions for timers
|
||||
##############################################################################
|
||||
##############################################################################
|
||||
# Functions for timers
|
||||
##############################################################################
|
||||
|
||||
# Store the build times in this directory.
|
||||
BUILDTIMESDIR = $(OUTPUTDIR)/make-support/build-times
|
||||
# Store the build times in this directory.
|
||||
BUILDTIMESDIR = $(OUTPUTDIR)/make-support/build-times
|
||||
|
||||
# Record starting time for build of a sub repository.
|
||||
define RecordStartTime
|
||||
# Record starting time for build of a sub repository.
|
||||
define RecordStartTime
|
||||
$(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_start_$(strip $1) && \
|
||||
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_start_$(strip $1)_human_readable
|
||||
endef
|
||||
endef
|
||||
|
||||
# Record ending time and calculate the difference and store it in a
|
||||
# easy to read format. Handles builds that cross midnight. Expects
|
||||
# that a build will never take 24 hours or more.
|
||||
define RecordEndTime
|
||||
# Record ending time and calculate the difference and store it in a
|
||||
# easy to read format. Handles builds that cross midnight. Expects
|
||||
# that a build will never take 24 hours or more.
|
||||
define RecordEndTime
|
||||
$(DATE) '+%Y %m %d %H %M %S' | $(AWK) '{ print $$1,$$2,$$3,$$4,$$5,$$6,($$4*3600+$$5*60+$$6) }' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)
|
||||
$(DATE) '+%Y-%m-%d %H:%M:%S' > $(BUILDTIMESDIR)/build_time_end_$(strip $1)_human_readable
|
||||
$(ECHO) `$(CAT) $(BUILDTIMESDIR)/build_time_start_$(strip $1)` `$(CAT) $(BUILDTIMESDIR)/build_time_end_$(strip $1)` $1 | \
|
||||
$(AWK) '{ F=$$7; T=$$14; if (F > T) { T+=3600*24 }; D=T-F; H=int(D/3600); \
|
||||
M=int((D-H*3600)/60); S=D-H*3600-M*60; printf("%02d:%02d:%02d %s\n",H,M,S,$$15); }' \
|
||||
> $(BUILDTIMESDIR)/build_time_diff_$(strip $1)
|
||||
endef
|
||||
endef
|
||||
|
||||
define StartGlobalTimer
|
||||
define StartGlobalTimer
|
||||
$(RM) -r $(BUILDTIMESDIR) 2> /dev/null && \
|
||||
$(MKDIR) -p $(BUILDTIMESDIR) && \
|
||||
$(call RecordStartTime,TOTAL)
|
||||
endef
|
||||
endef
|
||||
|
||||
define StopGlobalTimer
|
||||
define StopGlobalTimer
|
||||
$(call RecordEndTime,TOTAL)
|
||||
endef
|
||||
endef
|
||||
|
||||
# Find all build_time_* files and print their contents in a list sorted
|
||||
# on the name of the sub repository.
|
||||
define ReportBuildTimes
|
||||
# Find all build_time_* files and print their contents in a list sorted
|
||||
# on the name of the sub repository.
|
||||
define ReportBuildTimes
|
||||
$(PRINTF) $(LOG_INFO) -- \
|
||||
"----- Build times -------\nStart %s\nEnd %s\n%s\n%s\n-------------------------\n" \
|
||||
"`$(CAT) $(BUILDTIMESDIR)/build_time_start_TOTAL_human_readable`" \
|
||||
@@ -587,119 +306,15 @@ else # $(HAS_SPEC)=true
|
||||
$(XARGS) $(CAT) | $(SORT) -k 2`" \
|
||||
"`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`" \
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
endef
|
||||
|
||||
define ReportProfileTimes
|
||||
$(if $(findstring true, $(LOG_PROFILE_TIMES_LOG)), \
|
||||
[ ! -f $(BUILD_PROFILE_LOG) ] || \
|
||||
{ $(ECHO) Begin $(notdir $(BUILD_PROFILE_LOG)) && \
|
||||
$(CAT) $(BUILD_PROFILE_LOG) && \
|
||||
$(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \
|
||||
} \
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
)
|
||||
endef
|
||||
|
||||
endif # HAS_SPEC
|
||||
|
||||
# Look for a given option in the LOG variable, and if found, set a variable
|
||||
# and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to "true" if the option is found
|
||||
define ParseLogOption
|
||||
ifneq ($$(findstring $1, $$(LOG)), )
|
||||
override $2 := true
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
# Look for a given option with an assignment in the LOG variable, and if found,
|
||||
# set a variable to that value and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to the value of the option, if found
|
||||
define ParseLogValue
|
||||
ifneq ($$(findstring $1=, $$(LOG)), )
|
||||
# Make words of out comma-separated list and find the one with opt=val
|
||||
value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
|
||||
override $2 := $$(value)
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
|
||||
$$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
define ReportProfileTimes
|
||||
$(if $(findstring true, $(LOG_PROFILE_TIMES_LOG)), \
|
||||
[ ! -f $(BUILD_PROFILE_LOG) ] || \
|
||||
{ $(ECHO) Begin $(notdir $(BUILD_PROFILE_LOG)) && \
|
||||
$(CAT) $(BUILD_PROFILE_LOG) && \
|
||||
$(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \
|
||||
} \
|
||||
$(BUILD_LOG_PIPE_SIMPLE)
|
||||
)
|
||||
endef
|
||||
|
||||
|
||||
define ParseLogLevel
|
||||
# Catch old-style VERBOSE= command lines.
|
||||
ifneq ($$(origin VERBOSE), undefined)
|
||||
$$(info Error: VERBOSE is deprecated. Use LOG=<warn|info|debug|trace> instead.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# Setup logging according to LOG
|
||||
|
||||
# If "nofile" is present, do not log to a file
|
||||
$$(eval $$(call ParseLogOption, nofile, LOG_NOFILE))
|
||||
|
||||
# If "cmdline" is present, print all executes "important" command lines.
|
||||
$$(eval $$(call ParseLogOption, cmdlines, LOG_CMDLINES))
|
||||
|
||||
# If "report" is present, use non-standard reporting options at build failure.
|
||||
$$(eval $$(call ParseLogValue, report, LOG_REPORT))
|
||||
ifneq ($$(LOG_REPORT), )
|
||||
ifeq ($$(filter $$(LOG_REPORT), none all default), )
|
||||
$$(info Error: LOG=report has invalid value: $$(LOG_REPORT).)
|
||||
$$(info Valid values: LOG=report=<none>|<all>|<default>)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endif
|
||||
|
||||
# If "profile-to-log" is present, write shell times in build log
|
||||
$$(eval $$(call ParseLogOption, profile-to-log, LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
# If "profile" is present, write shell times in separate log file
|
||||
# IMPORTANT: $(ParseLogOption profile-to-log) should go first. Otherwise
|
||||
# parsing of 'LOG=debug,profile-to-log,nofile' ends up in the following error:
|
||||
# Error: LOG contains unknown option or log level: debug-to-log.
|
||||
$$(eval $$(call ParseLogOption, profile, LOG_PROFILE_TIMES_FILE))
|
||||
|
||||
# Treat LOG=profile-to-log as if it were LOG=profile,profile-to-log
|
||||
LOG_PROFILE_TIMES_FILE := $$(firstword $$(LOG_PROFILE_TIMES_FILE) $$(LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
override LOG_LEVEL := $$(LOG)
|
||||
|
||||
ifeq ($$(LOG_LEVEL), )
|
||||
# Set LOG to "warn" as default if not set
|
||||
override LOG_LEVEL := warn
|
||||
endif
|
||||
|
||||
ifeq ($$(LOG_LEVEL), warn)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), info)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), debug)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else ifeq ($$(LOG_LEVEL), trace)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else
|
||||
$$(info Error: LOG contains unknown option or log level: $$(LOG).)
|
||||
$$(info LOG can be <level>[,<opt>[...]] where <opt> is nofile | cmdlines | profile | profile-to-log)
|
||||
$$(info and <level> is warn | info | debug | trace)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
MAKE_LOG_VARS = $(foreach v, \
|
||||
LOG_LEVEL LOG_NOFILE LOG_CMDLINES LOG_REPORT LOG_PROFILE_TIMES_LOG \
|
||||
LOG_PROFILE_TIMES_FILE, \
|
||||
$v=$($v) \
|
||||
)
|
||||
|
||||
endif # _INITSUPPORT_GMK
|
||||
|
||||
215
make/PreInit.gmk
Normal file
215
make/PreInit.gmk
Normal file
@@ -0,0 +1,215 @@
|
||||
#
|
||||
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# This file is the earliest part of the build bootstrap process (not counting
|
||||
# Makefile that includes it). Its main responsibility is to figure out what
|
||||
# configuration to use and pick up the corresponding SPEC file. It will then
|
||||
# call Init.gmk with this SPEC for further bootstrapping.
|
||||
################################################################################
|
||||
|
||||
# This must be the first rule
|
||||
default:
|
||||
.PHONY: default
|
||||
|
||||
# Inclusion of this pseudo-target will cause make to execute this file
|
||||
# serially, regardless of -j.
|
||||
.NOTPARALLEL:
|
||||
|
||||
# Include our helper functions.
|
||||
include $(topdir)/make/PreInitSupport.gmk
|
||||
include $(topdir)/make/common/LogUtils.gmk
|
||||
|
||||
# Here are "global" targets, i.e. targets that can be executed without having
|
||||
# a configuration. This will define ALL_GLOBAL_TARGETS.
|
||||
include $(topdir)/make/Global.gmk
|
||||
|
||||
# CALLED_TARGETS is the list of targets that the user provided,
|
||||
# or "default" if unspecified.
|
||||
CALLED_TARGETS := $(if $(MAKECMDGOALS), $(MAKECMDGOALS), default)
|
||||
|
||||
# Extract non-global targets that require a spec file.
|
||||
CALLED_SPEC_TARGETS := $(filter-out $(ALL_GLOBAL_TARGETS), $(CALLED_TARGETS))
|
||||
|
||||
# If we have only global targets, or if we are called with -qp (assuming an
|
||||
# external part, e.g. bash completion, is trying to understand our targets),
|
||||
# we will skip SPEC location and the sanity checks.
|
||||
ifeq ($(CALLED_SPEC_TARGETS), )
|
||||
SKIP_SPEC := true
|
||||
endif
|
||||
ifeq ($(findstring p, $(MAKEFLAGS))$(findstring q, $(MAKEFLAGS)), pq)
|
||||
SKIP_SPEC := true
|
||||
endif
|
||||
|
||||
ifneq ($(SKIP_SPEC), true)
|
||||
|
||||
############################################################################
|
||||
# This is the common case: we have been called from the command line by the
|
||||
# user with a target that should be delegated to Main.gmk, so we need to
|
||||
# figure out a proper SPEC and call Init.gmk with it.
|
||||
############################################################################
|
||||
|
||||
# Basic checks on environment and command line.
|
||||
$(eval $(call CheckControlVariables))
|
||||
$(eval $(call CheckDeprecatedEnvironment))
|
||||
$(eval $(call CheckInvalidMakeFlags))
|
||||
|
||||
# Check that CONF_CHECK is valid.
|
||||
$(eval $(call ParseConfCheckOption))
|
||||
|
||||
# Check that the LOG given is valid, and set LOG_LEVEL, LOG_NOFILE, MAKE_LOG_VARS and MAKE_LOG_FLAGS.
|
||||
$(eval $(call ParseLogLevel))
|
||||
|
||||
# After this SPECS contain 1..N spec files (otherwise ParseConfAndSpec fails).
|
||||
$(eval $(call ParseConfAndSpec))
|
||||
|
||||
# Extract main targets from Main.gmk using the spec(s) provided. In theory,
|
||||
# with multiple specs, we should find the intersection of targets provided
|
||||
# by all specs, but we approximate this by an arbitrary spec from the list.
|
||||
# This will setup ALL_MAIN_TARGETS.
|
||||
$(eval $(call DefineMainTargets, FORCE, $(firstword $(SPECS))))
|
||||
|
||||
# Targets provided by Init.gmk.
|
||||
ALL_INIT_TARGETS := print-modules print-targets print-configuration \
|
||||
print-tests reconfigure pre-compare-build post-compare-build
|
||||
|
||||
# Separate called targets depending on type.
|
||||
INIT_TARGETS := $(filter $(ALL_INIT_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
MAIN_TARGETS := $(filter $(ALL_MAIN_TARGETS), $(CALLED_SPEC_TARGETS))
|
||||
SEQUENTIAL_TARGETS := $(filter dist-clean clean%, $(MAIN_TARGETS))
|
||||
PARALLEL_TARGETS := $(filter-out $(SEQUENTIAL_TARGETS), $(MAIN_TARGETS))
|
||||
|
||||
# The spec files depend on the autoconf source code. This check makes sure
|
||||
# the configuration is up to date after changes to configure.
|
||||
$(SPECS): $(wildcard $(topdir)/make/autoconf/*) \
|
||||
$(if $(CUSTOM_CONFIG_DIR), $(wildcard $(CUSTOM_CONFIG_DIR)/*)) \
|
||||
$(addprefix $(topdir)/make/conf/, version-numbers.conf branding.conf) \
|
||||
$(if $(CUSTOM_CONF_DIR), $(wildcard $(addprefix $(CUSTOM_CONF_DIR)/, \
|
||||
version-numbers.conf branding.conf)))
|
||||
ifeq ($(CONF_CHECK), fail)
|
||||
@echo Error: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
$(call PrintConfCheckFailed)
|
||||
@exit 2
|
||||
else ifeq ($(CONF_CHECK), auto)
|
||||
@echo Note: The configuration is not up to date for \
|
||||
"'$(lastword $(subst /, , $(dir $@)))'."
|
||||
@( cd $(topdir) && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$@ HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
reconfigure )
|
||||
else ifeq ($(CONF_CHECK), ignore)
|
||||
# Do nothing
|
||||
endif
|
||||
|
||||
# Do not let make delete spec files even if aborted while doing a reconfigure
|
||||
.PRECIOUS: $(SPECS)
|
||||
|
||||
# Unless reconfigure is explicitly called, let all main targets depend on
|
||||
# the spec files to be up to date.
|
||||
ifeq ($(findstring reconfigure, $(INIT_TARGETS)), )
|
||||
$(MAIN_TARGETS): $(SPECS)
|
||||
endif
|
||||
|
||||
make-info:
|
||||
ifneq ($(findstring $(LOG_LEVEL), info debug trace), )
|
||||
$(info Running make as '$(strip $(MAKE) $(MFLAGS) \
|
||||
$(COMMAND_LINE_VARIABLES) $(MAKECMDGOALS))')
|
||||
endif
|
||||
|
||||
MAKE_INIT_WITH_SPEC_ARGUMENTS := ACTUAL_TOPDIR=$(topdir) \
|
||||
USER_MAKE_VARS="$(USER_MAKE_VARS)" MAKE_LOG_FLAGS=$(MAKE_LOG_FLAGS) \
|
||||
$(MAKE_LOG_VARS) \
|
||||
INIT_TARGETS="$(INIT_TARGETS)" \
|
||||
SEQUENTIAL_TARGETS="$(SEQUENTIAL_TARGETS)" \
|
||||
PARALLEL_TARGETS="$(PARALLEL_TARGETS)"
|
||||
|
||||
# Now the init and main targets will be called, once for each SPEC. The
|
||||
# recipe will be run once for every target specified, but we only want to
|
||||
# execute the recipe a single time, hence the TARGET_DONE with a dummy
|
||||
# command if true.
|
||||
# The COMPARE_BUILD part implements special support for makefile development.
|
||||
$(ALL_INIT_TARGETS) $(ALL_MAIN_TARGETS): make-info
|
||||
@$(if $(TARGET_DONE), \
|
||||
true \
|
||||
, \
|
||||
( cd $(topdir) && \
|
||||
$(foreach spec, $(SPECS), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
main && \
|
||||
$(if $(and $(COMPARE_BUILD), $(PARALLEL_TARGETS)), \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD)" pre-compare-build && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -j 1 -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true $(MAKE_INIT_WITH_SPEC_ARGUMENTS) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" main && \
|
||||
$(MAKE) $(MFLAGS) $(MAKE_LOG_FLAGS) -r -R -f $(topdir)/make/Init.gmk \
|
||||
SPEC=$(spec) HAS_SPEC=true ACTUAL_TOPDIR=$(topdir) \
|
||||
COMPARE_BUILD="$(COMPARE_BUILD):NODRYRUN=true" post-compare-build && \
|
||||
) \
|
||||
) true ) \
|
||||
$(eval TARGET_DONE=true) \
|
||||
)
|
||||
|
||||
.PHONY: $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS)
|
||||
|
||||
else # SKIP_SPEC=true
|
||||
|
||||
############################################################################
|
||||
# We have only global targets, or are called with -pq (from command
|
||||
# completion). In this case we might not even have a configuration at all, but
|
||||
# still need to handle the situation gracefully even if there is no SPEC file.
|
||||
############################################################################
|
||||
|
||||
ifeq ($(wildcard $(SPEC)), )
|
||||
# If we have no SPEC provided, we will just make a "best effort" target list.
|
||||
# First try to grab any available pre-existing main-targets.gmk.
|
||||
main_targets_file := $(firstword $(wildcard $(build_dir)/*/make-support/main-targets.gmk))
|
||||
ifneq ($(main_targets_file), )
|
||||
# Extract the SPEC that corresponds to this main-targets.gmk file.
|
||||
SPEC := $(patsubst %/make-support/main-targets.gmk, %/spec.gmk, $(main_targets_file))
|
||||
else
|
||||
# None found, pick an arbitrary SPEC for which to generate a file
|
||||
SPEC := $(firstword $(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(wildcard $(SPEC)), )
|
||||
$(eval $(call DefineMainTargets, LAZY, $(SPEC)))
|
||||
else
|
||||
# If we have no configurations we can not provide any main targets.
|
||||
ALL_MAIN_TARGETS :=
|
||||
endif
|
||||
|
||||
ALL_TARGETS := $(sort $(ALL_GLOBAL_TARGETS) $(ALL_MAIN_TARGETS) $(ALL_INIT_TARGETS))
|
||||
|
||||
# Just list all our targets.
|
||||
$(ALL_TARGETS):
|
||||
|
||||
.PHONY: $(ALL_TARGETS)
|
||||
|
||||
endif # $(SKIP_SPEC)!=true
|
||||
297
make/PreInitSupport.gmk
Normal file
297
make/PreInitSupport.gmk
Normal file
@@ -0,0 +1,297 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
##############################################################################
|
||||
# Helper functions for PreInit.gmk, the initial part of initialization before
|
||||
# the SPEC file is loaded. Most of these functions provide parsing and setting
|
||||
# up make options from the command-line.
|
||||
##############################################################################
|
||||
|
||||
# COMMA is defined in spec.gmk, but that is not included yet
|
||||
COMMA := ,
|
||||
|
||||
# Include the corresponding closed file, if present.
|
||||
ifneq ($(CUSTOM_MAKE_DIR), )
|
||||
-include $(CUSTOM_MAKE_DIR)/InitSupport.gmk
|
||||
endif
|
||||
|
||||
# Essential control variables that are handled by PreInit.gmk or Init.gmk
|
||||
INIT_CONTROL_VARIABLES := LOG CONF CONF_NAME SPEC JOBS CONF_CHECK ALLOW \
|
||||
COMPARE_BUILD
|
||||
|
||||
# All known make control variables; these are handled in other makefiles
|
||||
MAKE_CONTROL_VARIABLES += JDK_FILTER SPEC_FILTER \
|
||||
TEST TEST_JOBS JTREG GTEST MICRO TEST_OPTS TEST_VM_OPTS TEST_DEPS
|
||||
|
||||
ALL_CONTROL_VARIABLES := $(INIT_CONTROL_VARIABLES) $(MAKE_CONTROL_VARIABLES)
|
||||
|
||||
# Define a simple reverse function.
|
||||
# Should maybe move to MakeBase.gmk, but we can't include that file now.
|
||||
reverse = \
|
||||
$(if $(strip $(1)), $(call reverse, $(wordlist 2, $(words $(1)), $(1)))) \
|
||||
$(firstword $(1))
|
||||
|
||||
# The variable MAKEOVERRIDES contains variable assignments from the command
|
||||
# line, but in reverse order to what the user entered.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
COMMAND_LINE_VARIABLES := $(subst §,\ , $(call reverse, $(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# A list like FOO="val1" BAR="val2" containing all user-supplied make
|
||||
# variables that we should propagate.
|
||||
# The '§' <=> '\ 'dance is needed to keep values with space in them connected.
|
||||
# This explicit propagation is needed to avoid problems with characters that needs
|
||||
# escaping.
|
||||
USER_MAKE_VARS := $(subst §,\ , $(filter-out $(addsuffix =%, $(ALL_CONTROL_VARIABLES)), \
|
||||
$(subst \ ,§,$(MAKEOVERRIDES))))
|
||||
|
||||
# Setup information about available configurations, if any.
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
build_dir = $(CUSTOM_ROOT)/build
|
||||
else
|
||||
build_dir = $(topdir)/build
|
||||
endif
|
||||
all_spec_files = $(wildcard $(build_dir)/*/spec.gmk)
|
||||
# Extract the configuration names from the path
|
||||
all_confs = $(patsubst %/spec.gmk, %, $(patsubst $(build_dir)/%, %, $(all_spec_files)))
|
||||
|
||||
# Check for unknown command-line variables
|
||||
define CheckControlVariables
|
||||
command_line_variables := $$(strip $$(foreach var, \
|
||||
$$(subst \ ,_,$$(MAKEOVERRIDES)), \
|
||||
$$(firstword $$(subst =, , $$(var)))))
|
||||
allowed_command_line_variables := $$(strip $$(subst $$(COMMA), , $$(ALLOW)))
|
||||
unknown_command_line_variables := $$(strip \
|
||||
$$(filter-out $$(ALL_CONTROL_VARIABLES) $$(allowed_command_line_variables), \
|
||||
$$(command_line_variables)))
|
||||
ifneq ($$(unknown_command_line_variables), )
|
||||
$$(info Note: Command line contains non-control variables:)
|
||||
$$(foreach var, $$(unknown_command_line_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info Make sure it is not mistyped, and that you intend to override this variable.)
|
||||
$$(info 'make help' will list known control variables.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for deprecated ALT_ variables
|
||||
define CheckDeprecatedEnvironment
|
||||
defined_alt_variables := $$(filter ALT_%, $$(.VARIABLES))
|
||||
ifneq ($$(defined_alt_variables), )
|
||||
$$(info Warning: You have the following ALT_ variables set:)
|
||||
$$(foreach var, $$(defined_alt_variables), $$(info * $$(var)=$$($$(var))))
|
||||
$$(info ALT_ variables are deprecated, and may result in a failed build.)
|
||||
$$(info Please clean your environment.)
|
||||
$$(info )
|
||||
endif
|
||||
endef
|
||||
|
||||
# Check for invalid make flags like -j
|
||||
define CheckInvalidMakeFlags
|
||||
# This is a trick to get this rule to execute before any other rules
|
||||
# MAKEFLAGS only indicate -j if read in a recipe (!)
|
||||
$$(topdir)/make/PreInit.gmk: .FORCE
|
||||
$$(if $$(findstring --jobserver, $$(MAKEFLAGS)), \
|
||||
$$(info Error: 'make -jN' is not supported, use 'make JOBS=N') \
|
||||
$$(error Cannot continue) \
|
||||
)
|
||||
.FORCE:
|
||||
.PHONY: .FORCE
|
||||
endef
|
||||
|
||||
# Check that the CONF_CHECK option is valid and set up handling
|
||||
define ParseConfCheckOption
|
||||
ifeq ($$(CONF_CHECK), )
|
||||
# Default behavior is fail
|
||||
CONF_CHECK := fail
|
||||
else ifneq ($$(filter-out auto fail ignore, $$(CONF_CHECK)), )
|
||||
$$(info Error: CONF_CHECK must be one of: auto, fail or ignore.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
define ParseConfAndSpec
|
||||
ifneq ($$(origin SPEC), undefined)
|
||||
# We have been given a SPEC, check that it works out properly
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
# We also have a CONF_NAME argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF_NAME=$$(CONF_NAME) and SPEC=$$(SPEC) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(wildcard $$(SPEC)), )
|
||||
$$(info Error: Cannot locate spec.gmk, given by SPEC=$$(SPEC).)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
ifeq ($$(filter /%, $$(SPEC)), )
|
||||
# If given with relative path, make it absolute
|
||||
SPECS := $$(CURDIR)/$$(strip $$(SPEC))
|
||||
else
|
||||
SPECS := $$(SPEC)
|
||||
endif
|
||||
|
||||
# For now, unset this SPEC variable.
|
||||
override SPEC :=
|
||||
else
|
||||
# Use spec.gmk files in the build output directory
|
||||
ifeq ($$(all_spec_files), )
|
||||
ifneq ($(CUSTOM_ROOT), )
|
||||
$$(info Error: No configurations found for $$(CUSTOM_ROOT).)
|
||||
else
|
||||
$$(info Error: No configurations found for $$(topdir).)
|
||||
endif
|
||||
$$(info Please run 'bash configure' to create a configuration.)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
ifneq ($$(origin CONF_NAME), undefined)
|
||||
ifneq ($$(origin CONF), undefined)
|
||||
# We also have a CONF argument. We can't have both.
|
||||
$$(info Error: Cannot use CONF=$$(CONF) and CONF_NAME=$$(CONF_NAME) at the same time. Choose one.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
matching_conf := $$(strip $$(filter $$(CONF_NAME), $$(all_confs)))
|
||||
ifeq ($$(matching_conf), )
|
||||
$$(info Error: No configurations found matching CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else ifneq ($$(words $$(matching_conf)), 1)
|
||||
$$(info Error: Matching more than one configuration CONF_NAME=$$(CONF_NAME).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
$$(info Building configuration '$$(matching_conf)' (matching CONF_NAME=$$(CONF_NAME)))
|
||||
endif
|
||||
# Create a SPEC definition. This will contain the path to exactly one spec file.
|
||||
SPECS := $$(build_dir)/$$(matching_conf)/spec.gmk
|
||||
else ifneq ($$(origin CONF), undefined)
|
||||
# User have given a CONF= argument.
|
||||
ifeq ($$(CONF), )
|
||||
# If given CONF=, match all configurations
|
||||
matching_confs := $$(strip $$(all_confs))
|
||||
else
|
||||
# Otherwise select those that contain the given CONF string
|
||||
ifeq ($$(patsubst !%,,$$(CONF)), )
|
||||
# A CONF starting with ! means we should negate the search term
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(subst !,,$$(CONF)), $$(var)), ,$$(var))))
|
||||
else
|
||||
matching_confs := $$(strip $$(foreach var, $$(all_confs), \
|
||||
$$(if $$(findstring $$(CONF), $$(var)), $$(var))))
|
||||
endif
|
||||
ifneq ($$(filter $$(CONF), $$(matching_confs)), )
|
||||
ifneq ($$(word 2, $$(matching_confs)), )
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
$$(info Using exact match for CONF=$$(CONF) (other matches are possible))
|
||||
endif
|
||||
endif
|
||||
# If we found an exact match, use that
|
||||
matching_confs := $$(CONF)
|
||||
endif
|
||||
endif
|
||||
ifeq ($$(matching_confs), )
|
||||
$$(info Error: No configurations found matching CONF=$$(CONF).)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(error Cannot continue)
|
||||
else
|
||||
# Don't repeat this output on make restarts caused by including
|
||||
# generated files.
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
ifeq ($$(words $$(matching_confs)), 1)
|
||||
ifneq ($$(findstring $$(LOG_LEVEL), info debug trace), )
|
||||
$$(info Building configuration '$$(matching_confs)' (matching CONF=$$(CONF)))
|
||||
endif
|
||||
else
|
||||
$$(info Building these configurations (matching CONF=$$(CONF)):)
|
||||
$$(foreach var, $$(matching_confs), $$(info * $$(var)))
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
# Create a SPEC definition. This will contain the path to one or more spec.gmk files.
|
||||
SPECS := $$(addsuffix /spec.gmk, $$(addprefix $$(build_dir)/, $$(matching_confs)))
|
||||
else
|
||||
# No CONF or SPEC given, check the available configurations
|
||||
ifneq ($$(words $$(all_spec_files)), 1)
|
||||
$$(info Error: No CONF given, but more than one configuration found.)
|
||||
$$(info Available configurations in $$(build_dir):)
|
||||
$$(foreach var, $$(all_confs), $$(info * $$(var)))
|
||||
$$(info Please retry building with CONF=<config pattern> (or SPEC=<spec file>).)
|
||||
$$(info )
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# We found exactly one configuration, use it
|
||||
SPECS := $$(strip $$(all_spec_files))
|
||||
endif
|
||||
endif
|
||||
endef
|
||||
|
||||
# Extract main targets from Main.gmk using the spec provided in $2.
|
||||
#
|
||||
# Param 1: FORCE = force generation of main-targets.gmk or LAZY = do not force.
|
||||
# Param 2: The SPEC file to use.
|
||||
define DefineMainTargets
|
||||
|
||||
# We will start by making sure the main-targets.gmk file is removed, if
|
||||
# make has not been restarted. By the -include, we will trigger the
|
||||
# rule for generating the file (which is never there since we removed it),
|
||||
# thus generating it fresh, and make will restart, incrementing the restart
|
||||
# count.
|
||||
main_targets_file := $$(dir $(strip $2))make-support/main-targets.gmk
|
||||
|
||||
ifeq ($$(MAKE_RESTARTS), )
|
||||
# Only do this if make has not been restarted, and if we do not force it.
|
||||
ifeq ($(strip $1), FORCE)
|
||||
$$(shell rm -f $$(main_targets_file))
|
||||
endif
|
||||
endif
|
||||
|
||||
$$(main_targets_file):
|
||||
@( cd $$(topdir) && \
|
||||
$$(MAKE) $$(MAKE_LOG_FLAGS) -r -R -f $$(topdir)/make/Main.gmk \
|
||||
-I $$(topdir)/make/common SPEC=$(strip $2) NO_RECIPES=true \
|
||||
$$(MAKE_LOG_VARS) \
|
||||
create-main-targets-include )
|
||||
|
||||
# Now include main-targets.gmk. This will define ALL_MAIN_TARGETS.
|
||||
-include $$(main_targets_file)
|
||||
endef
|
||||
|
||||
define PrintConfCheckFailed
|
||||
@echo ' '
|
||||
@echo "Please rerun configure! Easiest way to do this is by running"
|
||||
@echo "'make reconfigure'."
|
||||
@echo "This behavior may also be changed using CONF_CHECK=<ignore|auto>."
|
||||
@echo ' '
|
||||
endef
|
||||
@@ -78,6 +78,9 @@ $(eval $(call IncludeCustomExtension, RunTests.gmk))
|
||||
|
||||
# This is the JDK that we will test
|
||||
JDK_UNDER_TEST := $(JDK_IMAGE_DIR)
|
||||
# The JDK used to compile jtreg test code. By default it is the same as
|
||||
# JDK_UNDER_TEST.
|
||||
JDK_FOR_COMPILE := $(JDK_IMAGE_DIR)
|
||||
|
||||
TEST_RESULTS_DIR := $(OUTPUTDIR)/test-results
|
||||
TEST_SUPPORT_DIR := $(OUTPUTDIR)/test-support
|
||||
@@ -979,6 +982,7 @@ define SetupRunJtregTestBody
|
||||
$$(JTREG_JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
|
||||
-Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
|
||||
$$($1_JTREG_BASIC_OPTIONS) \
|
||||
-compilejdk:$$(JDK_FOR_COMPILE) \
|
||||
-testjdk:$$(JDK_UNDER_TEST) \
|
||||
-dir:$$(JTREG_TOPDIR) \
|
||||
-reportDir:$$($1_TEST_RESULTS_DIR) \
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# Initial bootstrapping, copied and stripped down from Makefile and Init.gmk
|
||||
# Initial bootstrapping, copied and stripped down from Makefile and PreInit.gmk.
|
||||
################################################################################
|
||||
|
||||
# In Cygwin, the MAKE variable gets prepended with the current directory if the
|
||||
@@ -136,7 +136,8 @@ $(eval $(call SetupVariable,JIB_JAR,OPTIONAL))
|
||||
# wrapper. This is required so we can include MakeBase which is needed for
|
||||
# CreateNewSpec.
|
||||
HAS_SPEC :=
|
||||
include $(TOPDIR)/make/InitSupport.gmk
|
||||
include $(TOPDIR)/make/PreInitSupport.gmk
|
||||
include $(TOPDIR)/make/common/LogUtils.gmk
|
||||
|
||||
$(eval $(call CheckDeprecatedEnvironment))
|
||||
$(eval $(call CheckInvalidMakeFlags))
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -624,4 +624,10 @@ AC_DEFUN_ONCE([BASIC_POST_CONFIG_OUTPUT],
|
||||
|
||||
# Make the compare script executable
|
||||
$CHMOD +x $OUTPUTDIR/compare.sh
|
||||
|
||||
# Copy the linker wrapper script for clang on AIX and make it executable
|
||||
if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
$CP -f "$TOPDIR/make/scripts/aix/ld.sh" "$OUTPUTDIR/ld.sh"
|
||||
$CHMOD +x "$OUTPUTDIR/ld.sh"
|
||||
fi
|
||||
])
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -235,9 +235,10 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
|
||||
CFLAGS_WARNINGS_ARE_ERRORS="-Werror"
|
||||
|
||||
# Additional warnings that are not activated by -Wall and -Wextra
|
||||
WARNINGS_ENABLE_ADDITIONAL="-Wpointer-arith -Wreturn-type -Wsign-compare \
|
||||
-Wtrampolines -Wundef -Wunused-const-variable=1 -Wunused-function \
|
||||
-Wunused-result -Wunused-value -Wtype-limits -Wuninitialized"
|
||||
WARNINGS_ENABLE_ADDITIONAL="-Winvalid-pch -Wpointer-arith -Wreturn-type \
|
||||
-Wsign-compare -Wtrampolines -Wtype-limits -Wundef -Wuninitialized \
|
||||
-Wunused-const-variable=1 -Wunused-function -Wunused-result \
|
||||
-Wunused-value"
|
||||
WARNINGS_ENABLE_ADDITIONAL_CXX="-Woverloaded-virtual -Wreorder"
|
||||
WARNINGS_ENABLE_ALL_CFLAGS="-Wall -Wextra -Wformat=2 $WARNINGS_ENABLE_ADDITIONAL"
|
||||
WARNINGS_ENABLE_ALL_CXXFLAGS="$WARNINGS_ENABLE_ALL_CFLAGS $WARNINGS_ENABLE_ADDITIONAL_CXX"
|
||||
@@ -277,7 +278,7 @@ AC_DEFUN([FLAGS_SETUP_WARNINGS],
|
||||
AC_DEFUN([FLAGS_SETUP_QUALITY_CHECKS],
|
||||
[
|
||||
# bounds, memory and behavior checking options
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc || test "x$TOOLCHAIN_TYPE" = xclang; then
|
||||
case $DEBUG_LEVEL in
|
||||
release )
|
||||
# no adjustment
|
||||
@@ -516,12 +517,6 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
-fvisibility=hidden -fno-strict-aliasing -fno-omit-frame-pointer"
|
||||
fi
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xclang && test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
# clang compiler on aix needs -ffunction-sections
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno -fstack-protector"
|
||||
TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char -fstack-protector"
|
||||
fi
|
||||
|
||||
if test "x$TOOLCHAIN_TYPE" = xgcc; then
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -fstack-protector"
|
||||
TOOLCHAIN_CFLAGS_JDK="-fvisibility=hidden -pipe -fstack-protector"
|
||||
@@ -541,7 +536,7 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
# Restrict the debug information created by Clang to avoid
|
||||
# too big object files and speed the build up a little bit
|
||||
# (see http://llvm.org/bugs/show_bug.cgi?id=7554)
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -flimit-debug-info"
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -flimit-debug-info -fstack-protector"
|
||||
|
||||
# In principle the stack alignment below is cpu- and ABI-dependent and
|
||||
# should agree with values of StackAlignmentInBytes in various
|
||||
@@ -559,7 +554,13 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_HELPER],
|
||||
TOOLCHAIN_CFLAGS_JDK="-pipe"
|
||||
TOOLCHAIN_CFLAGS_JDK_CONLY="-fno-strict-aliasing" # technically NOT for CXX
|
||||
fi
|
||||
TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -fvisibility=hidden"
|
||||
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
TOOLCHAIN_CFLAGS_JVM="$TOOLCHAIN_CFLAGS_JVM -ffunction-sections -ftls-model -fno-math-errno"
|
||||
TOOLCHAIN_CFLAGS_JDK="-ffunction-sections -fsigned-char"
|
||||
fi
|
||||
|
||||
TOOLCHAIN_CFLAGS_JDK="$TOOLCHAIN_CFLAGS_JDK -fvisibility=hidden -fstack-protector"
|
||||
|
||||
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
|
||||
# The -utf-8 option sets source and execution character sets to UTF-8 to enable correct
|
||||
@@ -736,6 +737,11 @@ AC_DEFUN([FLAGS_SETUP_CFLAGS_CPU_DEP],
|
||||
# for all archs except arm and ppc, prevent gcc to omit frame pointer
|
||||
$1_CFLAGS_CPU_JDK="${$1_CFLAGS_CPU_JDK} -fno-omit-frame-pointer"
|
||||
fi
|
||||
if test "x$FLAGS_CPU" = xppc64le; then
|
||||
# Little endian machine uses ELFv2 ABI.
|
||||
# Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
|
||||
$1_CFLAGS_CPU_JVM="${$1_CFLAGS_CPU_JVM} -DABI_ELFv2 -mcpu=power8 -mtune=power8"
|
||||
fi
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
$1_CFLAGS_CPU="-mcpu=pwr8"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -79,7 +79,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
|
||||
fi
|
||||
if test "x$OPENJDK_TARGET_OS" = xaix; then
|
||||
BASIC_LDFLAGS="-Wl,-b64 -Wl,-brtl -Wl,-bnorwexec -Wl,-bnolibpath -Wl,-bnoexpall \
|
||||
-Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k"
|
||||
-Wl,-bernotok -Wl,-bdatapsize:64k -Wl,-btextpsize:64k -Wl,-bstackpsize:64k -fuse-ld=$OUTPUTDIR/ld.sh"
|
||||
BASIC_LDFLAGS_JVM_ONLY="$BASIC_LDFLAGS_JVM_ONLY -Wl,-lC_r -Wl,-bbigtoc"
|
||||
fi
|
||||
|
||||
|
||||
127
make/common/LogUtils.gmk
Normal file
127
make/common/LogUtils.gmk
Normal file
@@ -0,0 +1,127 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
|
||||
################################################################################
|
||||
# This file contains helper functions for logging.
|
||||
################################################################################
|
||||
|
||||
# Look for a given option in the LOG variable, and if found, set a variable
|
||||
# and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to "true" if the option is found
|
||||
define ParseLogOption
|
||||
ifneq ($$(findstring $1, $$(LOG)), )
|
||||
override $2 := true
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $1,, $$(subst $$(COMMA)$$(strip $1),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
# Look for a given option with an assignment in the LOG variable, and if found,
|
||||
# set a variable to that value and remove the option from the LOG variable
|
||||
# $1: The option to look for
|
||||
# $2: The variable to set to the value of the option, if found
|
||||
define ParseLogValue
|
||||
ifneq ($$(findstring $1=, $$(LOG)), )
|
||||
# Make words of out comma-separated list and find the one with opt=val
|
||||
value := $$(strip $$(subst $$(strip $1)=,, $$(filter $$(strip $1)=%, $$(subst $$(COMMA), , $$(LOG)))))
|
||||
override $2 := $$(value)
|
||||
# First try to remove ",<option>" if it exists, otherwise just remove "<option>"
|
||||
LOG_STRIPPED := $$(subst $$(strip $1)=$$(value),, \
|
||||
$$(subst $$(COMMA)$$(strip $1)=$$(value),, $$(LOG)))
|
||||
# We might have ended up with a leading comma. Remove it. Need override
|
||||
# since LOG is set from the command line.
|
||||
override LOG := $$(strip $$(patsubst $$(COMMA)%, %, $$(LOG_STRIPPED)))
|
||||
endif
|
||||
endef
|
||||
|
||||
define ParseLogLevel
|
||||
# Catch old-style VERBOSE= command lines.
|
||||
ifneq ($$(origin VERBOSE), undefined)
|
||||
$$(info Error: VERBOSE is deprecated. Use LOG=<warn|info|debug|trace> instead.)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
|
||||
# Setup logging according to LOG
|
||||
|
||||
# If "nofile" is present, do not log to a file
|
||||
$$(eval $$(call ParseLogOption, nofile, LOG_NOFILE))
|
||||
|
||||
# If "cmdline" is present, print all executes "important" command lines.
|
||||
$$(eval $$(call ParseLogOption, cmdlines, LOG_CMDLINES))
|
||||
|
||||
# If "report" is present, use non-standard reporting options at build failure.
|
||||
$$(eval $$(call ParseLogValue, report, LOG_REPORT))
|
||||
ifneq ($$(LOG_REPORT), )
|
||||
ifeq ($$(filter $$(LOG_REPORT), none all default), )
|
||||
$$(info Error: LOG=report has invalid value: $$(LOG_REPORT).)
|
||||
$$(info Valid values: LOG=report=<none>|<all>|<default>)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endif
|
||||
|
||||
# If "profile-to-log" is present, write shell times in build log
|
||||
$$(eval $$(call ParseLogOption, profile-to-log, LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
# If "profile" is present, write shell times in separate log file
|
||||
# IMPORTANT: $(ParseLogOption profile-to-log) should go first. Otherwise
|
||||
# parsing of 'LOG=debug,profile-to-log,nofile' ends up in the following error:
|
||||
# Error: LOG contains unknown option or log level: debug-to-log.
|
||||
$$(eval $$(call ParseLogOption, profile, LOG_PROFILE_TIMES_FILE))
|
||||
|
||||
# Treat LOG=profile-to-log as if it were LOG=profile,profile-to-log
|
||||
LOG_PROFILE_TIMES_FILE := $$(firstword $$(LOG_PROFILE_TIMES_FILE) $$(LOG_PROFILE_TIMES_LOG))
|
||||
|
||||
override LOG_LEVEL := $$(LOG)
|
||||
|
||||
ifeq ($$(LOG_LEVEL), )
|
||||
# Set LOG to "warn" as default if not set
|
||||
override LOG_LEVEL := warn
|
||||
endif
|
||||
|
||||
ifeq ($$(LOG_LEVEL), warn)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), info)
|
||||
override MAKE_LOG_FLAGS := -s
|
||||
else ifeq ($$(LOG_LEVEL), debug)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else ifeq ($$(LOG_LEVEL), trace)
|
||||
override MAKE_LOG_FLAGS :=
|
||||
else
|
||||
$$(info Error: LOG contains unknown option or log level: $$(LOG).)
|
||||
$$(info LOG can be <level>[,<opt>[...]] where <opt> is nofile | cmdlines | profile | profile-to-log)
|
||||
$$(info and <level> is warn | info | debug | trace)
|
||||
$$(error Cannot continue)
|
||||
endif
|
||||
endef
|
||||
|
||||
MAKE_LOG_VARS = $(foreach v, \
|
||||
LOG_LEVEL LOG_NOFILE LOG_CMDLINES LOG_REPORT LOG_PROFILE_TIMES_LOG \
|
||||
LOG_PROFILE_TIMES_FILE, \
|
||||
$v=$($v) \
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -92,7 +92,7 @@ SRC_SUBDIRS += share/classes
|
||||
|
||||
SPEC_SUBDIRS += share/specs
|
||||
|
||||
MAN_SUBDIRS += share/man
|
||||
MAN_SUBDIRS += share/man windows/man
|
||||
|
||||
# Find all module-info.java files for the current build target platform and
|
||||
# configuration.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -1092,9 +1092,9 @@ var getJibProfilesDependencies = function (input, common) {
|
||||
windows_x64: "VS2022-17.6.5+1.0",
|
||||
linux_aarch64: "gcc13.2.0-OL7.6+1.0",
|
||||
linux_arm: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_ppc64le: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_s390x: "gcc8.2.0-Fedora27+1.0",
|
||||
linux_riscv64: "gcc11.3.0-Fedora_rawhide_68692+1.1"
|
||||
linux_ppc64le: "gcc13.2.0-Fedora_41+1.0",
|
||||
linux_s390x: "gcc13.2.0-Fedora_41+1.0",
|
||||
linux_riscv64: "gcc13.2.0-Fedora_41+1.0"
|
||||
};
|
||||
|
||||
var devkit_platform = (input.target_cpu == "x86"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -63,18 +63,14 @@ ifeq ($(BASE_OS), OL)
|
||||
LINUX_VERSION := OL6.4
|
||||
endif
|
||||
else ifeq ($(BASE_OS), Fedora)
|
||||
DEFAULT_OS_VERSION := 41
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
ifeq ($(ARCH), riscv64)
|
||||
DEFAULT_OS_VERSION := rawhide/68692
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
BASE_URL := http://fedora.riscv.rocks/repos-dist/$(BASE_OS_VERSION)/$(ARCH)/Packages/
|
||||
BASE_URL := http://fedora.riscv.rocks/repos-dist/f$(BASE_OS_VERSION)/latest/$(ARCH)/Packages/
|
||||
else
|
||||
DEFAULT_OS_VERSION := 27
|
||||
LATEST_ARCHIVED_OS_VERSION := 35
|
||||
ifeq ($(BASE_OS_VERSION), )
|
||||
BASE_OS_VERSION := $(DEFAULT_OS_VERSION)
|
||||
endif
|
||||
ifeq ($(filter x86_64 armhfp, $(ARCH)), )
|
||||
FEDORA_TYPE := fedora-secondary
|
||||
else
|
||||
@@ -203,7 +199,7 @@ RPM_LIST := \
|
||||
glibc glibc-headers glibc-devel \
|
||||
cups-libs cups-devel \
|
||||
libX11 libX11-devel \
|
||||
xorg-x11-proto-devel \
|
||||
libxcb xorg-x11-proto-devel \
|
||||
alsa-lib alsa-lib-devel \
|
||||
libXext libXext-devel \
|
||||
libXtst libXtst-devel \
|
||||
@@ -441,8 +437,9 @@ $(gcc) \
|
||||
# wants.
|
||||
$(BUILDDIR)/$(binutils_ver)/Makefile : CONFIG += --enable-64-bit-bfd --libdir=$(PREFIX)/$(word 1,$(LIBDIRS))
|
||||
|
||||
ifneq ($(ARCH), riscv64)
|
||||
# gold is not available for riscv64 for some reason,
|
||||
ifeq ($(filter $(ARCH), s390x riscv64 ppc64le), )
|
||||
# gold compiles but cannot link properly on s390x @ gcc 13.2 and Fedore 41
|
||||
# gold is not available for riscv64 and ppc64le,
|
||||
# and subsequent linking will fail if we try to enable it.
|
||||
LINKER_CONFIG := --enable-gold=default
|
||||
endif
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -125,9 +125,7 @@ else ifeq ($(call And, $(call isTargetOs, linux) $(call isTargetCpu, aarch64)),
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOs, linux macosx windows), true)
|
||||
JVM_PRECOMPILED_HEADER := $(TOPDIR)/src/hotspot/share/precompiled/precompiled.hpp
|
||||
endif
|
||||
JVM_PRECOMPILED_HEADER := $(TOPDIR)/src/hotspot/share/precompiled/precompiled.hpp
|
||||
|
||||
ifeq ($(call isTargetCpu, x86), true)
|
||||
JVM_EXCLUDE_PATTERNS += x86_64
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2013, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -52,15 +52,23 @@ ifneq ($(FDLIBM_CFLAGS), )
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetOs, linux), true)
|
||||
BUILD_LIBJVM_sharedRuntimeTrig.cpp_CXXFLAGS := -DNO_PCH $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := -DNO_PCH $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
BUILD_LIBJVM_sharedRuntimeTrig.cpp_CXXFLAGS := $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := $(FDLIBM_CFLAGS) $(LIBJVM_FDLIBM_COPY_OPT_FLAG)
|
||||
|
||||
ifeq ($(TOOLCHAIN_TYPE), clang)
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
$(OPT_SPEED_SRC) \
|
||||
#
|
||||
#
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetCpu, ppc64le)+$(TOOLCHAIN_TYPE), true+gcc)
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
$(OPT_SPEED_SRC) \
|
||||
#
|
||||
endif
|
||||
|
||||
ifeq ($(call isTargetCpu, x86), true)
|
||||
@@ -110,11 +118,7 @@ else ifeq ($(call isTargetOs, macosx), true)
|
||||
endif
|
||||
|
||||
else ifeq ($(call isTargetOs, aix), true)
|
||||
ifeq ($(TOOLCHAIN_TYPE), clang)
|
||||
BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -fno-inline
|
||||
else
|
||||
BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -qnoinline
|
||||
endif
|
||||
BUILD_LIBJVM_synchronizer.cpp_CXXFLAGS := -fno-inline
|
||||
BUILD_LIBJVM_sharedRuntimeTrans.cpp_CXXFLAGS := $(CXX_O_FLAG_NONE)
|
||||
# Disable aggressive optimizations for functions in sharedRuntimeTrig.cpp
|
||||
# and sharedRuntimeTrans.cpp on ppc64.
|
||||
@@ -138,6 +142,13 @@ else ifeq ($(call isTargetOs, aix), true)
|
||||
# Disable ELF decoder on AIX (AIX uses XCOFF).
|
||||
JVM_EXCLUDE_PATTERNS += elf
|
||||
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
sharedRuntimeTrig.cpp \
|
||||
sharedRuntimeTrans.cpp \
|
||||
synchronizer.cpp \
|
||||
$(OPT_SPEED_SRC) \
|
||||
#
|
||||
|
||||
else ifeq ($(call isTargetOs, windows), true)
|
||||
JVM_PRECOMPILED_HEADER_EXCLUDE := \
|
||||
bytecodeInterpreter.cpp \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -393,8 +393,6 @@ else
|
||||
LIBFONTMANAGER_JDK_LIBS += libfreetype
|
||||
endif
|
||||
|
||||
LIBFONTMANAGER_OPTIMIZATION := HIGHEST
|
||||
|
||||
ifneq ($(filter $(TOOLCHAIN_TYPE), gcc clang), )
|
||||
# gcc (and to an extent clang) is particularly bad at optimizing these files,
|
||||
# causing a massive spike in compile time. We don't care about these
|
||||
@@ -405,7 +403,6 @@ endif
|
||||
|
||||
ifeq ($(call isTargetOs, windows), true)
|
||||
LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c X11TextRenderer.c
|
||||
LIBFONTMANAGER_OPTIMIZATION := HIGHEST
|
||||
else ifeq ($(call isTargetOs, macosx), true)
|
||||
LIBFONTMANAGER_EXCLUDE_FILES += X11FontScaler.c X11TextRenderer.c \
|
||||
lcdglyph.c lcdglyphDW.cpp
|
||||
@@ -426,7 +423,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
|
||||
AccelGlyphCache.c, \
|
||||
CFLAGS := $(LIBFONTMANAGER_CFLAGS), \
|
||||
CXXFLAGS := $(LIBFONTMANAGER_CFLAGS), \
|
||||
OPTIMIZATION := $(LIBFONTMANAGER_OPTIMIZATION), \
|
||||
OPTIMIZATION := HIGHEST, \
|
||||
CFLAGS_windows = -DCC_NOEX, \
|
||||
EXTRA_HEADER_DIRS := $(LIBFONTMANAGER_EXTRA_HEADER_DIRS), \
|
||||
EXTRA_SRC := $(LIBFONTMANAGER_EXTRA_SRC), \
|
||||
|
||||
27
make/scripts/aix/ld.sh
Normal file
27
make/scripts/aix/ld.sh
Normal file
@@ -0,0 +1,27 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright (c) 2025 SAP SE. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
# under the terms of the GNU General Public License version 2 only, as
|
||||
# published by the Free Software Foundation. Oracle designates this
|
||||
# particular file as subject to the "Classpath" exception as provided
|
||||
# by Oracle in the LICENSE file that accompanied this code.
|
||||
#
|
||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
# version 2 for more details (a copy is included in the LICENSE file that
|
||||
# accompanied this code).
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License version
|
||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
# or visit www.oracle.com if you need additional information or have any
|
||||
# questions.
|
||||
#
|
||||
unset LIBPATH
|
||||
exec /usr/bin/ld "$@"
|
||||
@@ -1217,15 +1217,24 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
||||
void LIR_Assembler::type_profile_helper(Register mdo,
|
||||
ciMethodData *md, ciProfileData *data,
|
||||
Register recv, Label* update_done) {
|
||||
|
||||
// Given a profile data offset, generate an Address which points to
|
||||
// the corresponding slot in mdo->data().
|
||||
// Clobbers rscratch2.
|
||||
auto slot_at = [=](ByteSize offset) -> Address {
|
||||
return __ form_address(rscratch2, mdo,
|
||||
md->byte_offset_of_slot(data, offset),
|
||||
LogBytesPerWord);
|
||||
};
|
||||
|
||||
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
|
||||
Label next_test;
|
||||
// See if the receiver is receiver[n].
|
||||
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
|
||||
__ ldr(rscratch1, Address(rscratch2));
|
||||
__ ldr(rscratch1, slot_at(ReceiverTypeData::receiver_offset(i)));
|
||||
__ cmp(recv, rscratch1);
|
||||
__ br(Assembler::NE, next_test);
|
||||
Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
|
||||
__ addptr(data_addr, DataLayout::counter_increment);
|
||||
__ addptr(slot_at(ReceiverTypeData::receiver_count_offset(i)),
|
||||
DataLayout::counter_increment);
|
||||
__ b(*update_done);
|
||||
__ bind(next_test);
|
||||
}
|
||||
@@ -1233,15 +1242,12 @@ void LIR_Assembler::type_profile_helper(Register mdo,
|
||||
// Didn't find receiver; find next empty slot and fill it in
|
||||
for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
|
||||
Label next_test;
|
||||
__ lea(rscratch2,
|
||||
Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
|
||||
Address recv_addr(rscratch2);
|
||||
Address recv_addr(slot_at(ReceiverTypeData::receiver_offset(i)));
|
||||
__ ldr(rscratch1, recv_addr);
|
||||
__ cbnz(rscratch1, next_test);
|
||||
__ str(recv, recv_addr);
|
||||
__ mov(rscratch1, DataLayout::counter_increment);
|
||||
__ lea(rscratch2, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))));
|
||||
__ str(rscratch1, Address(rscratch2));
|
||||
__ str(rscratch1, slot_at(ReceiverTypeData::receiver_count_offset(i)));
|
||||
__ b(*update_done);
|
||||
__ bind(next_test);
|
||||
}
|
||||
@@ -1413,8 +1419,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
// Object is null; update MDO and exit
|
||||
Address data_addr
|
||||
= __ form_address(rscratch2, mdo,
|
||||
md->byte_offset_of_slot(data, DataLayout::flags_offset()),
|
||||
0);
|
||||
md->byte_offset_of_slot(data, DataLayout::flags_offset()), 0);
|
||||
__ ldrb(rscratch1, data_addr);
|
||||
__ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
|
||||
__ strb(rscratch1, data_addr);
|
||||
@@ -2565,10 +2570,12 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
for (i = 0; i < VirtualCallData::row_limit(); i++) {
|
||||
ciKlass* receiver = vc_data->receiver(i);
|
||||
if (receiver == nullptr) {
|
||||
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
|
||||
__ mov_metadata(rscratch1, known_klass->constant_encoding());
|
||||
__ lea(rscratch2, recv_addr);
|
||||
__ str(rscratch1, Address(rscratch2));
|
||||
Address recv_addr =
|
||||
__ form_address(rscratch2, mdo,
|
||||
md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)),
|
||||
LogBytesPerWord);
|
||||
__ str(rscratch1, recv_addr);
|
||||
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
|
||||
__ addptr(data_addr, DataLayout::counter_increment);
|
||||
return;
|
||||
|
||||
@@ -1173,7 +1173,10 @@ public:
|
||||
|
||||
// Arithmetics
|
||||
|
||||
// Clobber: rscratch1, rscratch2
|
||||
void addptr(const Address &dst, int32_t src);
|
||||
|
||||
// Clobber: rscratch1
|
||||
void cmpptr(Register src1, Address src2);
|
||||
|
||||
void cmpoop(Register obj1, Register obj2);
|
||||
|
||||
140
src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp
Normal file
140
src/hotspot/cpu/aarch64/stubDeclarations_aarch64.hpp
Normal file
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
#define CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 10000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 30000 ZGC_ONLY(+10000)) \
|
||||
do_stub(compiler, vector_iota_indices) \
|
||||
do_arch_entry(aarch64, compiler, vector_iota_indices, \
|
||||
vector_iota_indices, vector_iota_indices) \
|
||||
do_stub(compiler, large_array_equals) \
|
||||
do_arch_entry(aarch64, compiler, large_array_equals, \
|
||||
large_array_equals, large_array_equals) \
|
||||
do_stub(compiler, large_arrays_hashcode_boolean) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_boolean, \
|
||||
large_arrays_hashcode_boolean, \
|
||||
large_arrays_hashcode_boolean) \
|
||||
do_stub(compiler, large_arrays_hashcode_byte) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_byte, \
|
||||
large_arrays_hashcode_byte, \
|
||||
large_arrays_hashcode_byte) \
|
||||
do_stub(compiler, large_arrays_hashcode_char) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_char, \
|
||||
large_arrays_hashcode_char, \
|
||||
large_arrays_hashcode_char) \
|
||||
do_stub(compiler, large_arrays_hashcode_short) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_short, \
|
||||
large_arrays_hashcode_short, \
|
||||
large_arrays_hashcode_short) \
|
||||
do_stub(compiler, large_arrays_hashcode_int) \
|
||||
do_arch_entry(aarch64, compiler, large_arrays_hashcode_int, \
|
||||
large_arrays_hashcode_int, \
|
||||
large_arrays_hashcode_int) \
|
||||
do_stub(compiler, large_byte_array_inflate) \
|
||||
do_arch_entry(aarch64, compiler, large_byte_array_inflate, \
|
||||
large_byte_array_inflate, large_byte_array_inflate) \
|
||||
do_stub(compiler, count_positives) \
|
||||
do_arch_entry(aarch64, compiler, count_positives, count_positives, \
|
||||
count_positives) \
|
||||
do_stub(compiler, count_positives_long) \
|
||||
do_arch_entry(aarch64, compiler, count_positives_long, \
|
||||
count_positives_long, count_positives_long) \
|
||||
do_stub(compiler, compare_long_string_LL) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_LL, \
|
||||
compare_long_string_LL, compare_long_string_LL) \
|
||||
do_stub(compiler, compare_long_string_UU) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_UU, \
|
||||
compare_long_string_UU, compare_long_string_UU) \
|
||||
do_stub(compiler, compare_long_string_LU) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_LU, \
|
||||
compare_long_string_LU, compare_long_string_LU) \
|
||||
do_stub(compiler, compare_long_string_UL) \
|
||||
do_arch_entry(aarch64, compiler, compare_long_string_UL, \
|
||||
compare_long_string_UL, compare_long_string_UL) \
|
||||
do_stub(compiler, string_indexof_linear_ll) \
|
||||
do_arch_entry(aarch64, compiler, string_indexof_linear_ll, \
|
||||
string_indexof_linear_ll, string_indexof_linear_ll) \
|
||||
do_stub(compiler, string_indexof_linear_uu) \
|
||||
do_arch_entry(aarch64, compiler, string_indexof_linear_uu, \
|
||||
string_indexof_linear_uu, string_indexof_linear_uu) \
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_arch_entry(aarch64, compiler, string_indexof_linear_ul, \
|
||||
string_indexof_linear_ul, string_indexof_linear_ul) \
|
||||
/* this uses the entry for ghash_processBlocks */ \
|
||||
do_stub(compiler, ghash_processBlocks_wide) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 20000 ZGC_ONLY(+100000)) \
|
||||
do_stub(final, copy_byte_f) \
|
||||
do_arch_entry(aarch64, final, copy_byte_f, copy_byte_f, \
|
||||
copy_byte_f) \
|
||||
do_stub(final, copy_byte_b) \
|
||||
do_arch_entry(aarch64, final, copy_byte_b, copy_byte_b, \
|
||||
copy_byte_b) \
|
||||
do_stub(final, copy_oop_f) \
|
||||
do_arch_entry(aarch64, final, copy_oop_f, copy_oop_f, copy_oop_f) \
|
||||
do_stub(final, copy_oop_b) \
|
||||
do_arch_entry(aarch64, final, copy_oop_b, copy_oop_b, copy_oop_b) \
|
||||
do_stub(final, copy_oop_uninit_f) \
|
||||
do_arch_entry(aarch64, final, copy_oop_uninit_f, copy_oop_uninit_f, \
|
||||
copy_oop_uninit_f) \
|
||||
do_stub(final, copy_oop_uninit_b) \
|
||||
do_arch_entry(aarch64, final, copy_oop_uninit_b, copy_oop_uninit_b, \
|
||||
copy_oop_uninit_b) \
|
||||
do_stub(final, zero_blocks) \
|
||||
do_arch_entry(aarch64, final, zero_blocks, zero_blocks, \
|
||||
zero_blocks) \
|
||||
do_stub(final, spin_wait) \
|
||||
do_arch_entry_init(aarch64, final, spin_wait, spin_wait, \
|
||||
spin_wait, empty_spin_wait) \
|
||||
/* stub only -- entries are not stored in StubRoutines::aarch64 */ \
|
||||
/* n.b. these are not the same as the generic atomic stubs */ \
|
||||
do_stub(final, atomic_entry_points) \
|
||||
|
||||
|
||||
#endif // CPU_AARCH64_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
@@ -29,40 +29,22 @@
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
address StubRoutines::aarch64::_get_previous_sp_entry = nullptr;
|
||||
|
||||
address StubRoutines::aarch64::_f2i_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_f2l_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_d2i_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_d2l_fixup = nullptr;
|
||||
address StubRoutines::aarch64::_vector_iota_indices = nullptr;
|
||||
address StubRoutines::aarch64::_float_sign_mask = nullptr;
|
||||
address StubRoutines::aarch64::_float_sign_flip = nullptr;
|
||||
address StubRoutines::aarch64::_double_sign_mask = nullptr;
|
||||
address StubRoutines::aarch64::_double_sign_flip = nullptr;
|
||||
address StubRoutines::aarch64::_zero_blocks = nullptr;
|
||||
address StubRoutines::aarch64::_count_positives = nullptr;
|
||||
address StubRoutines::aarch64::_count_positives_long = nullptr;
|
||||
address StubRoutines::aarch64::_large_array_equals = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_boolean = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_byte = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_char = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_int = nullptr;
|
||||
address StubRoutines::aarch64::_large_arrays_hashcode_short = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_LL = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_UU = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_LU = nullptr;
|
||||
address StubRoutines::aarch64::_compare_long_string_UL = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ll = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_uu = nullptr;
|
||||
address StubRoutines::aarch64::_string_indexof_linear_ul = nullptr;
|
||||
address StubRoutines::aarch64::_large_byte_array_inflate = nullptr;
|
||||
// function used as default for spin_wait stub
|
||||
|
||||
static void empty_spin_wait() { }
|
||||
address StubRoutines::aarch64::_spin_wait = CAST_FROM_FN_PTR(address, empty_spin_wait);
|
||||
|
||||
// define fields for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
|
||||
bool StubRoutines::aarch64::_completed = false;
|
||||
|
||||
|
||||
@@ -34,134 +34,66 @@ static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 10000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 30000 ZGC_ONLY(+10000),
|
||||
_final_stubs_code_size = 20000 ZGC_ONLY(+100000)
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
class aarch64 {
|
||||
friend class StubGenerator;
|
||||
#if INCLUDE_JVMCI
|
||||
friend class JVMCIVMStructs;
|
||||
#endif
|
||||
|
||||
private:
|
||||
static address _get_previous_sp_entry;
|
||||
// declare fields for arch-specific entries
|
||||
|
||||
static address _f2i_fixup;
|
||||
static address _f2l_fixup;
|
||||
static address _d2i_fixup;
|
||||
static address _d2l_fixup;
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
|
||||
static address _vector_iota_indices;
|
||||
static address _float_sign_mask;
|
||||
static address _float_sign_flip;
|
||||
static address _double_sign_mask;
|
||||
static address _double_sign_flip;
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
static address _zero_blocks;
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
static address _large_array_equals;
|
||||
static address _large_arrays_hashcode_boolean;
|
||||
static address _large_arrays_hashcode_byte;
|
||||
static address _large_arrays_hashcode_char;
|
||||
static address _large_arrays_hashcode_int;
|
||||
static address _large_arrays_hashcode_short;
|
||||
static address _compare_long_string_LL;
|
||||
static address _compare_long_string_LU;
|
||||
static address _compare_long_string_UL;
|
||||
static address _compare_long_string_UU;
|
||||
static address _string_indexof_linear_ll;
|
||||
static address _string_indexof_linear_uu;
|
||||
static address _string_indexof_linear_ul;
|
||||
static address _large_byte_array_inflate;
|
||||
|
||||
static address _spin_wait;
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
|
||||
static bool _completed;
|
||||
|
||||
public:
|
||||
|
||||
static address _count_positives;
|
||||
static address _count_positives_long;
|
||||
// declare getters for arch-specific entries
|
||||
|
||||
static address get_previous_sp_entry()
|
||||
{
|
||||
return _get_previous_sp_entry;
|
||||
}
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
|
||||
static address f2i_fixup()
|
||||
{
|
||||
return _f2i_fixup;
|
||||
}
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
static address f2l_fixup()
|
||||
{
|
||||
return _f2l_fixup;
|
||||
}
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
|
||||
static address d2i_fixup()
|
||||
{
|
||||
return _d2i_fixup;
|
||||
}
|
||||
|
||||
static address d2l_fixup()
|
||||
{
|
||||
return _d2l_fixup;
|
||||
}
|
||||
|
||||
static address vector_iota_indices() {
|
||||
return _vector_iota_indices;
|
||||
}
|
||||
|
||||
static address float_sign_mask()
|
||||
{
|
||||
return _float_sign_mask;
|
||||
}
|
||||
|
||||
static address float_sign_flip()
|
||||
{
|
||||
return _float_sign_flip;
|
||||
}
|
||||
|
||||
static address double_sign_mask()
|
||||
{
|
||||
return _double_sign_mask;
|
||||
}
|
||||
|
||||
static address double_sign_flip()
|
||||
{
|
||||
return _double_sign_flip;
|
||||
}
|
||||
|
||||
static address zero_blocks() {
|
||||
return _zero_blocks;
|
||||
}
|
||||
|
||||
static address count_positives() {
|
||||
return _count_positives;
|
||||
}
|
||||
|
||||
static address count_positives_long() {
|
||||
return _count_positives_long;
|
||||
}
|
||||
|
||||
static address large_array_equals() {
|
||||
return _large_array_equals;
|
||||
}
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
|
||||
static address large_arrays_hashcode(BasicType eltype) {
|
||||
switch (eltype) {
|
||||
case T_BOOLEAN:
|
||||
return _large_arrays_hashcode_boolean;
|
||||
return large_arrays_hashcode_boolean();
|
||||
case T_BYTE:
|
||||
return _large_arrays_hashcode_byte;
|
||||
return large_arrays_hashcode_byte();
|
||||
case T_CHAR:
|
||||
return _large_arrays_hashcode_char;
|
||||
return large_arrays_hashcode_char();
|
||||
case T_SHORT:
|
||||
return _large_arrays_hashcode_short;
|
||||
return large_arrays_hashcode_short();
|
||||
case T_INT:
|
||||
return _large_arrays_hashcode_int;
|
||||
return large_arrays_hashcode_int();
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@@ -169,42 +101,6 @@ class aarch64 {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static address compare_long_string_LL() {
|
||||
return _compare_long_string_LL;
|
||||
}
|
||||
|
||||
static address compare_long_string_LU() {
|
||||
return _compare_long_string_LU;
|
||||
}
|
||||
|
||||
static address compare_long_string_UL() {
|
||||
return _compare_long_string_UL;
|
||||
}
|
||||
|
||||
static address compare_long_string_UU() {
|
||||
return _compare_long_string_UU;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ul() {
|
||||
return _string_indexof_linear_ul;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ll() {
|
||||
return _string_indexof_linear_ll;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_uu() {
|
||||
return _string_indexof_linear_uu;
|
||||
}
|
||||
|
||||
static address large_byte_array_inflate() {
|
||||
return _large_byte_array_inflate;
|
||||
}
|
||||
|
||||
static address spin_wait() {
|
||||
return _spin_wait;
|
||||
}
|
||||
|
||||
static bool complete() {
|
||||
return _completed;
|
||||
}
|
||||
|
||||
68
src/hotspot/cpu/arm/stubDeclarations_arm.hpp
Normal file
68
src/hotspot/cpu/arm/stubDeclarations_arm.hpp
Normal file
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_ARM_STUBDECLARATIONS_HPP
|
||||
#define CPU_ARM_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 9000) \
|
||||
do_stub(initial, idiv_irem) \
|
||||
do_arch_entry(Arm, initial, idiv_irem, \
|
||||
idiv_irem_entry, idiv_irem_entry) \
|
||||
do_stub(initial, atomic_load_long) \
|
||||
do_arch_entry(Arm, initial, atomic_load_long, \
|
||||
atomic_load_long_entry, atomic_load_long_entry) \
|
||||
do_stub(initial, atomic_store_long) \
|
||||
do_arch_entry(Arm, initial, atomic_load_long, \
|
||||
atomic_store_long_entry, atomic_store_long_entry) \
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 22000) \
|
||||
do_stub(compiler, partial_subtype_check) \
|
||||
do_arch_entry(Arm, compiler, partial_subtype_check, \
|
||||
partial_subtype_check, partial_subtype_check) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 22000) \
|
||||
|
||||
|
||||
#endif // CPU_ARM_STUBDECLARATIONS_HPP
|
||||
@@ -172,7 +172,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
private:
|
||||
|
||||
address generate_call_stub(address& return_address) {
|
||||
StubCodeMark mark(this, "StubRoutines", "call_stub");
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
|
||||
@@ -251,7 +252,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// (in) Rexception_obj: exception oop
|
||||
address generate_catch_exception() {
|
||||
StubCodeMark mark(this, "StubRoutines", "catch_exception");
|
||||
StubGenStubId stub_id = StubGenStubId::catch_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
__ str(Rexception_obj, Address(Rthread, Thread::pending_exception_offset()));
|
||||
@@ -263,7 +265,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// (in) Rexception_pc: return address
|
||||
address generate_forward_exception() {
|
||||
StubCodeMark mark(this, "StubRoutines", "forward exception");
|
||||
StubGenStubId stub_id = StubGenStubId::forward_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
__ mov(c_rarg0, Rthread);
|
||||
@@ -312,6 +315,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register tmp = LR;
|
||||
assert(dividend == remainder, "must be");
|
||||
|
||||
StubGenStubId stub_id = StubGenStubId::idiv_irem_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// Check for special cases: divisor <= 0 or dividend < 0
|
||||
@@ -453,7 +458,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_add() {
|
||||
address start;
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_add");
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_add_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
Label retry;
|
||||
start = __ pc();
|
||||
Register addval = R0;
|
||||
@@ -504,7 +510,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_xchg() {
|
||||
address start;
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_xchg_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
start = __ pc();
|
||||
Register newval = R0;
|
||||
Register dest = R1;
|
||||
@@ -554,7 +561,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_cmpxchg() {
|
||||
address start;
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
start = __ pc();
|
||||
Register cmp = R0;
|
||||
Register newval = R1;
|
||||
@@ -592,7 +600,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_cmpxchg_long() {
|
||||
address start;
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_cmpxchg_long_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
start = __ pc();
|
||||
Register cmp_lo = R0;
|
||||
Register cmp_hi = R1;
|
||||
@@ -629,7 +638,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_load_long() {
|
||||
address start;
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_load_long");
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_load_long_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
start = __ pc();
|
||||
Register result_lo = R0;
|
||||
Register result_hi = R1;
|
||||
@@ -653,7 +663,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_atomic_store_long() {
|
||||
address start;
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "atomic_store_long");
|
||||
StubGenStubId stub_id = StubGenStubId::atomic_store_long_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
start = __ pc();
|
||||
Register newval_lo = R0;
|
||||
Register newval_hi = R1;
|
||||
@@ -695,7 +706,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// raddr: LR, blown by call
|
||||
address generate_partial_subtype_check() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
|
||||
StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// based on SPARC check_klass_subtype_[fast|slow]_path (without CompressedOops)
|
||||
@@ -784,7 +796,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Non-destructive plausibility checks for oops
|
||||
|
||||
address generate_verify_oop() {
|
||||
StubCodeMark mark(this, "StubRoutines", "verify_oop");
|
||||
StubGenStubId stub_id = StubGenStubId::verify_oop_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// Incoming arguments:
|
||||
@@ -1985,6 +1998,23 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start_pc;
|
||||
}
|
||||
|
||||
/* Internal development flag */
|
||||
/* enabled by defining TEST_C2_GENERIC_ARRAYCOPY */
|
||||
|
||||
// With this flag, the C2 stubs are tested by generating calls to
|
||||
// generic_arraycopy instead of Runtime1::arraycopy
|
||||
|
||||
// Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
|
||||
// and the result is tested to see whether the arraycopy stub should
|
||||
// be called.
|
||||
|
||||
// When we test arraycopy this way, we must generate extra code in the
|
||||
// arraycopy methods callable from C2 generic_arraycopy to set the
|
||||
// status to 0 for those who always succeed (calling the slow path stub might
|
||||
// lead to errors since the copy has already been performed).
|
||||
|
||||
static const bool set_status;
|
||||
|
||||
//
|
||||
// Generate stub for primitive array copy. If "aligned" is true, the
|
||||
// "from" and "to" addresses are assumed to be heapword aligned.
|
||||
@@ -1997,9 +2027,109 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to: R1
|
||||
// count: R2 treated as signed 32-bit int
|
||||
//
|
||||
address generate_primitive_copy(bool aligned, const char * name, bool status, int bytes_per_count, bool disjoint, address nooverlap_target = nullptr) {
|
||||
address generate_primitive_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) {
|
||||
bool aligned;
|
||||
bool status;
|
||||
int bytes_per_count;
|
||||
bool disjoint;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 1;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jshort_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 2;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jint_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 4;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jlong_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 8;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jbyte_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 1;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jshort_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 2;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jint_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 4;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_jlong_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = set_status;
|
||||
bytes_per_count = 8;
|
||||
disjoint = true;
|
||||
break;
|
||||
case jbyte_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 1;
|
||||
disjoint = false;
|
||||
break;
|
||||
case jshort_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 2;
|
||||
disjoint = false;
|
||||
break;
|
||||
case jint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 4;
|
||||
disjoint = false;
|
||||
break;
|
||||
case jlong_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
bytes_per_count = 8;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_jbyte_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 1;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_jshort_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 2;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_jint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
bytes_per_count = 4;
|
||||
disjoint = false;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = R0; // source array address
|
||||
@@ -2171,9 +2301,38 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// to: R1
|
||||
// count: R2 treated as signed 32-bit int
|
||||
//
|
||||
address generate_oop_copy(bool aligned, const char * name, bool status, bool disjoint, address nooverlap_target = nullptr) {
|
||||
address generate_oop_copy(StubGenStubId stub_id, address nooverlap_target = nullptr) {
|
||||
bool aligned;
|
||||
bool status;
|
||||
bool disjoint;
|
||||
|
||||
switch (stub_id) {
|
||||
case oop_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
disjoint = true;
|
||||
break;
|
||||
case arrayof_oop_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
disjoint = true;
|
||||
break;
|
||||
case oop_arraycopy_id:
|
||||
aligned = false;
|
||||
status = true;
|
||||
disjoint = false;
|
||||
break;
|
||||
case arrayof_oop_arraycopy_id:
|
||||
aligned = true;
|
||||
status = set_status;
|
||||
disjoint = false;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Register from = R0;
|
||||
@@ -2308,7 +2467,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to a long, int, short, or byte copy loop.
|
||||
//
|
||||
address generate_unsafe_copy(const char* name) {
|
||||
address generate_unsafe_copy() {
|
||||
|
||||
const Register R0_from = R0; // source array address
|
||||
const Register R1_to = R1; // destination array address
|
||||
@@ -2317,7 +2476,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register R3_bits = R3; // test copy of low bits
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
const Register tmp = Rtemp;
|
||||
|
||||
@@ -2442,9 +2602,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// ckval: R4 (super_klass)
|
||||
// ret: R0 zero for success; (-1^K) where K is partial transfer count (32-bit)
|
||||
//
|
||||
address generate_checkcast_copy(const char * name) {
|
||||
address generate_checkcast_copy() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::checkcast_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = R0; // source array address
|
||||
@@ -2595,7 +2756,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// R0 == 0 - success
|
||||
// R0 < 0 - need to call System.arraycopy
|
||||
//
|
||||
address generate_generic_copy(const char *name) {
|
||||
address generate_generic_copy() {
|
||||
Label L_failed, L_objArray;
|
||||
|
||||
// Input registers
|
||||
@@ -2611,7 +2772,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register R8_temp = R8;
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
__ zap_high_non_significant_bits(R1);
|
||||
@@ -2842,72 +3004,55 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
|
||||
bool status = false; // non failing C2 stubs need not return a status in R0
|
||||
|
||||
#ifdef TEST_C2_GENERIC_ARRAYCOPY /* Internal development flag */
|
||||
// With this flag, the C2 stubs are tested by generating calls to
|
||||
// generic_arraycopy instead of Runtime1::arraycopy
|
||||
|
||||
// Runtime1::arraycopy return a status in R0 (0 if OK, else ~copied)
|
||||
// and the result is tested to see whether the arraycopy stub should
|
||||
// be called.
|
||||
|
||||
// When we test arraycopy this way, we must generate extra code in the
|
||||
// arraycopy methods callable from C2 generic_arraycopy to set the
|
||||
// status to 0 for those who always succeed (calling the slow path stub might
|
||||
// lead to errors since the copy has already been performed).
|
||||
|
||||
status = true; // generate a status compatible with C1 calls
|
||||
#endif
|
||||
|
||||
address ucm_common_error_exit = generate_unsafecopy_common_error_exit();
|
||||
UnsafeMemoryAccess::set_common_exit_stub_pc(ucm_common_error_exit);
|
||||
|
||||
// these need always status in case they are called from generic_arraycopy
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(false, "jbyte_disjoint_arraycopy", true, 1, true);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(false, "jshort_disjoint_arraycopy", true, 2, true);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(false, "jint_disjoint_arraycopy", true, 4, true);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(false, "jlong_disjoint_arraycopy", true, 8, true);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (false, "oop_disjoint_arraycopy", true, true);
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jbyte_disjoint_arraycopy", status, 1, true);
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jshort_disjoint_arraycopy",status, 2, true);
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jint_disjoint_arraycopy", status, 4, true);
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(true, "arrayof_jlong_disjoint_arraycopy", status, 8, true);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (true, "arrayof_oop_disjoint_arraycopy", status, true);
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id);
|
||||
|
||||
// these need always status in case they are called from generic_arraycopy
|
||||
StubRoutines::_jbyte_arraycopy = generate_primitive_copy(false, "jbyte_arraycopy", true, 1, false, StubRoutines::_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_jshort_arraycopy = generate_primitive_copy(false, "jshort_arraycopy", true, 2, false, StubRoutines::_jshort_disjoint_arraycopy);
|
||||
StubRoutines::_jint_arraycopy = generate_primitive_copy(false, "jint_arraycopy", true, 4, false, StubRoutines::_jint_disjoint_arraycopy);
|
||||
StubRoutines::_jlong_arraycopy = generate_primitive_copy(false, "jlong_arraycopy", true, 8, false, StubRoutines::_jlong_disjoint_arraycopy);
|
||||
StubRoutines::_oop_arraycopy = generate_oop_copy (false, "oop_arraycopy", true, false, StubRoutines::_oop_disjoint_arraycopy);
|
||||
StubRoutines::_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::jbyte_arraycopy_id, StubRoutines::_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_jshort_arraycopy = generate_primitive_copy(StubGenStubId::jshort_arraycopy_id, StubRoutines::_jshort_disjoint_arraycopy);
|
||||
StubRoutines::_jint_arraycopy = generate_primitive_copy(StubGenStubId::jint_arraycopy_id, StubRoutines::_jint_disjoint_arraycopy);
|
||||
StubRoutines::_jlong_arraycopy = generate_primitive_copy(StubGenStubId::jlong_arraycopy_id, StubRoutines::_jlong_disjoint_arraycopy);
|
||||
StubRoutines::_oop_arraycopy = generate_oop_copy (StubGenStubId::oop_arraycopy_id, StubRoutines::_oop_disjoint_arraycopy);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(true, "arrayof_jbyte_arraycopy", status, 1, false, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(true, "arrayof_jshort_arraycopy", status, 2, false, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jbyte_arraycopy_id, StubRoutines::_arrayof_jbyte_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jshort_arraycopy_id, StubRoutines::_arrayof_jshort_disjoint_arraycopy);
|
||||
#ifdef _LP64
|
||||
// since sizeof(jint) < sizeof(HeapWord), there's a different flavor:
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(true, "arrayof_jint_arraycopy", status, 4, false, StubRoutines::_arrayof_jint_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_primitive_copy(StubGenStubId::arrayof_jint_arraycopy_id, StubRoutines::_arrayof_jint_disjoint_arraycopy);
|
||||
#else
|
||||
StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy;
|
||||
#endif
|
||||
if (BytesPerHeapOop < HeapWordSize) {
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (true, "arrayof_oop_arraycopy", status, false, StubRoutines::_arrayof_oop_disjoint_arraycopy);
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_oop_copy (StubGenStubId::arrayof_oop_arraycopy_id, StubRoutines::_arrayof_oop_disjoint_arraycopy);
|
||||
} else {
|
||||
StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy;
|
||||
}
|
||||
StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy;
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy");
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy");
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy");
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy();
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy();
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy();
|
||||
|
||||
|
||||
}
|
||||
|
||||
address generate_method_entry_barrier() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");
|
||||
StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
Label deoptimize_label;
|
||||
|
||||
@@ -2960,22 +3105,22 @@ class StubGenerator: public StubCodeGenerator {
|
||||
#undef __
|
||||
#define __ masm->
|
||||
|
||||
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind) {
|
||||
address generate_cont_thaw(StubGenStubId stub_id) {
|
||||
if (!Continuations::enabled()) return nullptr;
|
||||
Unimplemented();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
address generate_cont_thaw() {
|
||||
return generate_cont_thaw("Cont thaw", Continuation::thaw_top);
|
||||
return generate_cont_thaw(StubGenStubId::cont_thaw_id);
|
||||
}
|
||||
|
||||
address generate_cont_returnBarrier() {
|
||||
return generate_cont_thaw("Cont thaw return barrier", Continuation::thaw_return_barrier);
|
||||
return generate_cont_thaw(StubGenStubId::cont_returnBarrier_id);
|
||||
}
|
||||
|
||||
address generate_cont_returnBarrier_exception() {
|
||||
return generate_cont_thaw("Cont thaw return barrier exception", Continuation::thaw_return_barrier_exception);
|
||||
return generate_cont_thaw(StubGenStubId::cont_returnBarrierExc_id);
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
@@ -3007,8 +3152,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||
StubRoutines::_atomic_load_long_entry = generate_atomic_load_long();
|
||||
StubRoutines::_atomic_store_long_entry = generate_atomic_store_long();
|
||||
StubRoutines::Arm::_atomic_load_long_entry = generate_atomic_load_long();
|
||||
StubRoutines::Arm::_atomic_store_long_entry = generate_atomic_store_long();
|
||||
|
||||
}
|
||||
|
||||
@@ -3058,27 +3203,36 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) {
|
||||
switch(kind) {
|
||||
case Initial_stubs:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
case Continuation_stubs:
|
||||
case continuation_id:
|
||||
generate_continuation_stubs();
|
||||
break;
|
||||
case Compiler_stubs:
|
||||
case compiler_id:
|
||||
generate_compiler_stubs();
|
||||
break;
|
||||
case Final_stubs:
|
||||
case final_id:
|
||||
generate_final_stubs();
|
||||
break;
|
||||
default:
|
||||
fatal("unexpected stubs kind: %d", kind);
|
||||
fatal("unexpected blob id: %d", blob_id);
|
||||
break;
|
||||
};
|
||||
}
|
||||
}; // end class declaration
|
||||
|
||||
void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) {
|
||||
StubGenerator g(code, kind);
|
||||
void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) {
|
||||
StubGenerator g(code, blob_id);
|
||||
}
|
||||
|
||||
// implementation of internal development flag
|
||||
|
||||
#ifdef TEST_C2_GENERIC_ARRAYCOPY
|
||||
const bool StubGenerator::set_status = true; // generate a status compatible with C1 calls
|
||||
#else
|
||||
const bool StubGenerator::set_status = false; // non failing C2 stubs need not return a status in R0
|
||||
#endif
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -119,7 +119,8 @@ void aes_init() {
|
||||
|
||||
address generate_aescrypt_encryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "aesencryptBlock");
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -316,7 +317,8 @@ address generate_aescrypt_encryptBlock() {
|
||||
|
||||
address generate_aescrypt_decryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock");
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -536,7 +538,8 @@ address generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
// [sp+4] Transposition Box reference
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -601,7 +604,8 @@ address generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
|
||||
address generate_cipherBlockChaining_decryptAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
|
||||
@@ -26,9 +26,13 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
address StubRoutines::Arm::_idiv_irem_entry = nullptr;
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
address StubRoutines::Arm::_partial_subtype_check = nullptr;
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
address StubRoutines::_atomic_load_long_entry = nullptr;
|
||||
address StubRoutines::_atomic_store_long_entry = nullptr;
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -29,38 +29,53 @@
|
||||
// definition. See stubRoutines.hpp for a description on how to
|
||||
// extend it.
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 9000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 22000,
|
||||
_final_stubs_code_size = 22000
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
public:
|
||||
static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
class Arm {
|
||||
friend class StubGenerator;
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
|
||||
static address _idiv_irem_entry;
|
||||
static address _partial_subtype_check;
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
public:
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
|
||||
public:
|
||||
|
||||
// declare getters for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
|
||||
static address idiv_irem_entry() { return _idiv_irem_entry; }
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
};
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
static address _atomic_load_long_entry;
|
||||
static address _atomic_store_long_entry;
|
||||
|
||||
static address atomic_load_long_entry() { return _atomic_load_long_entry; }
|
||||
static address atomic_store_long_entry() { return _atomic_store_long_entry; }
|
||||
|
||||
|
||||
#endif // CPU_ARM_STUBROUTINES_ARM_HPP
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -562,20 +562,20 @@ void Assembler::test_asm() {
|
||||
li( R3, -4711);
|
||||
|
||||
// PPC 1, section 3.3.9, Fixed-Point Compare Instructions
|
||||
cmpi( CCR7, 0, R27, 4711);
|
||||
cmp( CCR0, 1, R14, R11);
|
||||
cmpli( CCR5, 1, R17, 45);
|
||||
cmpl( CCR3, 0, R9, R10);
|
||||
cmpi( CR7, 0, R27, 4711);
|
||||
cmp( CR0, 1, R14, R11);
|
||||
cmpli( CR5, 1, R17, 45);
|
||||
cmpl( CR3, 0, R9, R10);
|
||||
|
||||
cmpwi( CCR7, R27, 4711);
|
||||
cmpw( CCR0, R14, R11);
|
||||
cmplwi( CCR5, R17, 45);
|
||||
cmplw( CCR3, R9, R10);
|
||||
cmpwi( CR7, R27, 4711);
|
||||
cmpw( CR0, R14, R11);
|
||||
cmplwi( CR5, R17, 45);
|
||||
cmplw( CR3, R9, R10);
|
||||
|
||||
cmpdi( CCR7, R27, 4711);
|
||||
cmpd( CCR0, R14, R11);
|
||||
cmpldi( CCR5, R17, 45);
|
||||
cmpld( CCR3, R9, R10);
|
||||
cmpdi( CR7, R27, 4711);
|
||||
cmpd( CR0, R14, R11);
|
||||
cmpldi( CR5, R17, 45);
|
||||
cmpld( CR3, R9, R10);
|
||||
|
||||
// PPC 1, section 3.3.11, Fixed-Point Logical Instructions
|
||||
andi_( R4, R5, 0xff);
|
||||
@@ -715,23 +715,23 @@ void Assembler::test_asm() {
|
||||
bcctr( 4, 6, 0);
|
||||
bcctrl(4, 6, 0);
|
||||
|
||||
blt(CCR0, lbl2);
|
||||
bgt(CCR1, lbl2);
|
||||
beq(CCR2, lbl2);
|
||||
bso(CCR3, lbl2);
|
||||
bge(CCR4, lbl2);
|
||||
ble(CCR5, lbl2);
|
||||
bne(CCR6, lbl2);
|
||||
bns(CCR7, lbl2);
|
||||
blt(CR0, lbl2);
|
||||
bgt(CR1, lbl2);
|
||||
beq(CR2, lbl2);
|
||||
bso(CR3, lbl2);
|
||||
bge(CR4, lbl2);
|
||||
ble(CR5, lbl2);
|
||||
bne(CR6, lbl2);
|
||||
bns(CR7, lbl2);
|
||||
|
||||
bltl(CCR0, lbl2);
|
||||
bgtl(CCR1, lbl2);
|
||||
beql(CCR2, lbl2);
|
||||
bsol(CCR3, lbl2);
|
||||
bgel(CCR4, lbl2);
|
||||
blel(CCR5, lbl2);
|
||||
bnel(CCR6, lbl2);
|
||||
bnsl(CCR7, lbl2);
|
||||
bltl(CR0, lbl2);
|
||||
bgtl(CR1, lbl2);
|
||||
beql(CR2, lbl2);
|
||||
bsol(CR3, lbl2);
|
||||
bgel(CR4, lbl2);
|
||||
blel(CR5, lbl2);
|
||||
bnel(CR6, lbl2);
|
||||
bnsl(CR7, lbl2);
|
||||
blr();
|
||||
|
||||
sync();
|
||||
@@ -794,7 +794,7 @@ void Assembler::test_asm() {
|
||||
fcfid( F22, F23);
|
||||
|
||||
// PPC 1, section 4.6.7 Floating-Point Compare Instructions
|
||||
fcmpu( CCR7, F24, F25);
|
||||
fcmpu( CR7, F24, F25);
|
||||
|
||||
tty->print_cr("\ntest_asm disassembly (0x%lx 0x%lx):", p2i(code()->insts_begin()), p2i(code()->insts_end()));
|
||||
code()->decode();
|
||||
|
||||
@@ -294,6 +294,8 @@ class Assembler : public AbstractAssembler {
|
||||
CLRRWI_OPCODE = RLWINM_OPCODE,
|
||||
CLRLWI_OPCODE = RLWINM_OPCODE,
|
||||
|
||||
RLWNM_OPCODE = (23u << OPCODE_SHIFT),
|
||||
|
||||
RLWIMI_OPCODE = (20u << OPCODE_SHIFT),
|
||||
|
||||
SLW_OPCODE = (31u << OPCODE_SHIFT | 24u << 1),
|
||||
@@ -424,6 +426,9 @@ class Assembler : public AbstractAssembler {
|
||||
RLDIC_OPCODE = (30u << OPCODE_SHIFT | 2u << XO_27_29_SHIFT), // MD-FORM
|
||||
RLDIMI_OPCODE = (30u << OPCODE_SHIFT | 3u << XO_27_29_SHIFT), // MD-FORM
|
||||
|
||||
RLDCL_OPCODE = (30u << OPCODE_SHIFT | 8u << 1),
|
||||
RLDCR_OPCODE = (30u << OPCODE_SHIFT | 9u << 1),
|
||||
|
||||
SRADI_OPCODE = (31u << OPCODE_SHIFT | 413u << XO_21_29_SHIFT), // XS-FORM
|
||||
|
||||
SLD_OPCODE = (31u << OPCODE_SHIFT | 27u << 1), // X-FORM
|
||||
@@ -1696,6 +1701,14 @@ class Assembler : public AbstractAssembler {
|
||||
inline void insrdi( Register a, Register s, int n, int b);
|
||||
inline void insrwi( Register a, Register s, int n, int b);
|
||||
|
||||
// Rotate variable
|
||||
inline void rlwnm( Register a, Register s, Register b, int mb, int me);
|
||||
inline void rlwnm_(Register a, Register s, Register b, int mb, int me);
|
||||
inline void rldcl( Register a, Register s, Register b, int mb);
|
||||
inline void rldcl_(Register a, Register s, Register b, int mb);
|
||||
inline void rldcr( Register a, Register s, Register b, int me);
|
||||
inline void rldcr_(Register a, Register s, Register b, int me);
|
||||
|
||||
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
|
||||
// 4 bytes
|
||||
inline void lwzx( Register d, Register s1, Register s2);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -246,9 +246,9 @@ inline void Assembler::nop() { Assembler::ori(R0, R
|
||||
// NOP for FP and BR units (different versions to allow them to be in one group)
|
||||
inline void Assembler::fpnop0() { Assembler::fmr(F30, F30); }
|
||||
inline void Assembler::fpnop1() { Assembler::fmr(F31, F31); }
|
||||
inline void Assembler::brnop0() { Assembler::mcrf(CCR2, CCR2); }
|
||||
inline void Assembler::brnop1() { Assembler::mcrf(CCR3, CCR3); }
|
||||
inline void Assembler::brnop2() { Assembler::mcrf(CCR4, CCR4); }
|
||||
inline void Assembler::brnop0() { Assembler::mcrf(CR2, CR2); }
|
||||
inline void Assembler::brnop1() { Assembler::mcrf(CR3, CR3); }
|
||||
inline void Assembler::brnop2() { Assembler::mcrf(CR4, CR4); }
|
||||
|
||||
inline void Assembler::mr( Register d, Register s) { Assembler::orr(d, s, s); }
|
||||
inline void Assembler::ori_opt( Register d, int ui16) { if (ui16!=0) Assembler::ori( d, d, ui16); }
|
||||
@@ -303,7 +303,7 @@ inline void Assembler::clrlsldi_(Register a, Register s, int clrl6, int shl6) {
|
||||
inline void Assembler::extrdi( Register a, Register s, int n, int b){ Assembler::rldicl(a, s, b+n, 64-n); }
|
||||
// testbit with condition register.
|
||||
inline void Assembler::testbitdi(ConditionRegister cr, Register a, Register s, int ui6) {
|
||||
if (cr == CCR0) {
|
||||
if (cr == CR0) {
|
||||
Assembler::rldicr_(a, s, 63-ui6, 0);
|
||||
} else {
|
||||
Assembler::rldicr(a, s, 63-ui6, 0);
|
||||
@@ -336,6 +336,13 @@ inline void Assembler::rldimi_( Register a, Register s, int sh6, int mb6)
|
||||
inline void Assembler::insrdi( Register a, Register s, int n, int b) { Assembler::rldimi(a, s, 64-(b+n), b); }
|
||||
inline void Assembler::insrwi( Register a, Register s, int n, int b) { Assembler::rlwimi(a, s, 32-(b+n), b, b+n-1); }
|
||||
|
||||
inline void Assembler::rlwnm( Register a, Register s, Register b, int mb, int me) { emit_int32(RLWNM_OPCODE | rta(a) | rs(s) | rb(b) | mb2125(mb) | me2630(me) | rc(0)); }
|
||||
inline void Assembler::rlwnm_(Register a, Register s, Register b, int mb, int me) { emit_int32(RLWNM_OPCODE | rta(a) | rs(s) | rb(b) | mb2125(mb) | me2630(me) | rc(1)); }
|
||||
inline void Assembler::rldcl( Register a, Register s, Register b, int mb) { emit_int32(RLDCL_OPCODE | rta(a) | rs(s) | rb(b) | mb2126(mb) | rc(0)); }
|
||||
inline void Assembler::rldcl_( Register a, Register s, Register b, int mb) { emit_int32(RLDCL_OPCODE | rta(a) | rs(s) | rb(b) | mb2126(mb) | rc(1)); }
|
||||
inline void Assembler::rldcr( Register a, Register s, Register b, int me) { emit_int32(RLDCR_OPCODE | rta(a) | rs(s) | rb(b) | me2126(me) | rc(0)); }
|
||||
inline void Assembler::rldcr_( Register a, Register s, Register b, int me) { emit_int32(RLDCR_OPCODE | rta(a) | rs(s) | rb(b) | me2126(me) | rc(1)); }
|
||||
|
||||
// PPC 1, section 3.3.2 Fixed-Point Load Instructions
|
||||
inline void Assembler::lwzx( Register d, Register s1, Register s2) { emit_int32(LWZX_OPCODE | rt(d) | ra0mem(s1) | rb(s2));}
|
||||
inline void Assembler::lwz( Register d, Address &a) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -367,9 +367,9 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
|
||||
__ mr(R0, _obj); // spill
|
||||
__ ld(_obj, java_lang_Class::klass_offset(), _obj);
|
||||
__ ld(_obj, in_bytes(InstanceKlass::init_thread_offset()), _obj);
|
||||
__ cmpd(CCR0, _obj, R16_thread);
|
||||
__ cmpd(CR0, _obj, R16_thread);
|
||||
__ mr(_obj, R0); // restore
|
||||
__ bne(CCR0, call_patch);
|
||||
__ bne(CR0, call_patch);
|
||||
|
||||
// Load_klass patches may execute the patched code before it's
|
||||
// copied back into place so we need to jump back into the main
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -48,7 +48,7 @@
|
||||
#define __ _masm->
|
||||
|
||||
|
||||
const ConditionRegister LIR_Assembler::BOOL_RESULT = CCR5;
|
||||
const ConditionRegister LIR_Assembler::BOOL_RESULT = CR5;
|
||||
|
||||
|
||||
bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
|
||||
@@ -156,8 +156,8 @@ void LIR_Assembler::osr_entry() {
|
||||
{
|
||||
Label L;
|
||||
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ bne(CR0, L);
|
||||
__ stop("locked object is null");
|
||||
__ bind(L);
|
||||
}
|
||||
@@ -410,11 +410,11 @@ void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right,
|
||||
|
||||
Label regular, done;
|
||||
if (is_int) {
|
||||
__ cmpwi(CCR0, Rdivisor, -1);
|
||||
__ cmpwi(CR0, Rdivisor, -1);
|
||||
} else {
|
||||
__ cmpdi(CCR0, Rdivisor, -1);
|
||||
__ cmpdi(CR0, Rdivisor, -1);
|
||||
}
|
||||
__ bne(CCR0, regular);
|
||||
__ bne(CR0, regular);
|
||||
if (code == lir_idiv) {
|
||||
__ neg(Rresult, Rdividend);
|
||||
__ b(done);
|
||||
@@ -597,14 +597,14 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : Address();
|
||||
Label L;
|
||||
// Result must be 0 if value is NaN; test by comparing value to itself.
|
||||
__ fcmpu(CCR0, rsrc, rsrc);
|
||||
__ fcmpu(CR0, rsrc, rsrc);
|
||||
if (dst_in_memory) {
|
||||
__ li(R0, 0); // 0 in case of NAN
|
||||
__ std(R0, addr);
|
||||
} else {
|
||||
__ li(dst->as_register(), 0);
|
||||
}
|
||||
__ bso(CCR0, L);
|
||||
__ bso(CR0, L);
|
||||
__ fctiwz(rsrc, rsrc); // USE_KILL
|
||||
if (dst_in_memory) {
|
||||
__ stfd(rsrc, addr.disp(), addr.base());
|
||||
@@ -621,14 +621,14 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
|
||||
Address addr = dst_in_memory ? frame_map()->address_for_slot(dst->double_stack_ix()) : Address();
|
||||
Label L;
|
||||
// Result must be 0 if value is NaN; test by comparing value to itself.
|
||||
__ fcmpu(CCR0, rsrc, rsrc);
|
||||
__ fcmpu(CR0, rsrc, rsrc);
|
||||
if (dst_in_memory) {
|
||||
__ li(R0, 0); // 0 in case of NAN
|
||||
__ std(R0, addr);
|
||||
} else {
|
||||
__ li(dst->as_register_lo(), 0);
|
||||
}
|
||||
__ bso(CCR0, L);
|
||||
__ bso(CR0, L);
|
||||
__ fctidz(rsrc, rsrc); // USE_KILL
|
||||
if (dst_in_memory) {
|
||||
__ stfd(rsrc, addr.disp(), addr.base());
|
||||
@@ -1530,15 +1530,15 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
|
||||
if (code == lir_cmp_fd2i || code == lir_ucmp_fd2i) {
|
||||
bool is_unordered_less = (code == lir_ucmp_fd2i);
|
||||
if (left->is_single_fpu()) {
|
||||
__ fcmpu(CCR0, left->as_float_reg(), right->as_float_reg());
|
||||
__ fcmpu(CR0, left->as_float_reg(), right->as_float_reg());
|
||||
} else if (left->is_double_fpu()) {
|
||||
__ fcmpu(CCR0, left->as_double_reg(), right->as_double_reg());
|
||||
__ fcmpu(CR0, left->as_double_reg(), right->as_double_reg());
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ set_cmpu3(Rdst, is_unordered_less); // is_unordered_less ? -1 : 1
|
||||
} else if (code == lir_cmp_l2i) {
|
||||
__ cmpd(CCR0, left->as_register_lo(), right->as_register_lo());
|
||||
__ cmpd(CR0, left->as_register_lo(), right->as_register_lo());
|
||||
__ set_cmp3(Rdst); // set result as follows: <: -1, =: 0, >: 1
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
@@ -1893,8 +1893,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ add(src_pos, tmp, src_pos);
|
||||
__ add(dst_pos, tmp, dst_pos);
|
||||
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::less), *stub->entry());
|
||||
__ bind(*stub->continuation());
|
||||
return;
|
||||
}
|
||||
@@ -1910,12 +1910,12 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
// Use only one conditional branch for simple checks.
|
||||
if (simple_check_flag_set) {
|
||||
ConditionRegister combined_check = CCR1, tmp_check = CCR1;
|
||||
ConditionRegister combined_check = CR1, tmp_check = CR1;
|
||||
|
||||
// Make sure src and dst are non-null.
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ cmpdi(combined_check, src, 0);
|
||||
tmp_check = CCR0;
|
||||
tmp_check = CR0;
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::dst_null_check) {
|
||||
@@ -1923,13 +1923,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
if (tmp_check != combined_check) {
|
||||
__ cror(combined_check, Assembler::equal, tmp_check, Assembler::equal);
|
||||
}
|
||||
tmp_check = CCR0;
|
||||
tmp_check = CR0;
|
||||
}
|
||||
|
||||
// Clear combined_check.eq if not already used.
|
||||
if (tmp_check == combined_check) {
|
||||
__ crandc(combined_check, Assembler::equal, combined_check, Assembler::equal);
|
||||
tmp_check = CCR0;
|
||||
tmp_check = CR0;
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::src_pos_positive_check) {
|
||||
@@ -1960,15 +1960,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
|
||||
__ load_klass(tmp, dst);
|
||||
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
|
||||
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CCR0, slow);
|
||||
__ cmpwi(CR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CR0, slow);
|
||||
}
|
||||
|
||||
if (!(flags & LIR_OpArrayCopy::src_objarray)) {
|
||||
__ load_klass(tmp, src);
|
||||
__ lwz(tmp2, in_bytes(Klass::layout_helper_offset()), tmp);
|
||||
__ cmpwi(CCR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CCR0, slow);
|
||||
__ cmpwi(CR0, tmp2, Klass::_lh_neutral_value);
|
||||
__ bge(CR0, slow);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1979,16 +1979,16 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
if (flags & LIR_OpArrayCopy::src_range_check) {
|
||||
__ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), src);
|
||||
__ add(tmp, length, src_pos);
|
||||
__ cmpld(CCR0, tmp2, tmp);
|
||||
__ ble(CCR0, slow);
|
||||
__ cmpld(CR0, tmp2, tmp);
|
||||
__ ble(CR0, slow);
|
||||
}
|
||||
|
||||
__ extsw(dst_pos, dst_pos);
|
||||
if (flags & LIR_OpArrayCopy::dst_range_check) {
|
||||
__ lwz(tmp2, arrayOopDesc::length_offset_in_bytes(), dst);
|
||||
__ add(tmp, length, dst_pos);
|
||||
__ cmpld(CCR0, tmp2, tmp);
|
||||
__ ble(CCR0, slow);
|
||||
__ cmpld(CR0, tmp2, tmp);
|
||||
__ ble(CR0, slow);
|
||||
}
|
||||
|
||||
int shift = shift_amount(basic_type);
|
||||
@@ -2003,8 +2003,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// We don't know the array types are compatible.
|
||||
if (basic_type != T_OBJECT) {
|
||||
// Simple test for basic type arrays.
|
||||
__ cmp_klasses_from_objects(CCR0, src, dst, tmp, tmp2);
|
||||
__ beq(CCR0, cont);
|
||||
__ cmp_klasses_from_objects(CR0, src, dst, tmp, tmp2);
|
||||
__ beq(CR0, cont);
|
||||
} else {
|
||||
// For object arrays, if src is a sub class of dst then we can
|
||||
// safely do the copy.
|
||||
@@ -2024,7 +2024,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ calculate_address_from_global_toc(tmp, slow_stc, true, true, false);
|
||||
__ mtctr(tmp);
|
||||
__ bctrl(); // sets CR0
|
||||
__ beq(CCR0, cont);
|
||||
__ beq(CR0, cont);
|
||||
|
||||
if (copyfunc_addr != nullptr) { // Use stub if available.
|
||||
__ bind(copyfunc);
|
||||
@@ -2044,8 +2044,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
|
||||
__ load_const_optimized(tmp, objArray_lh);
|
||||
__ cmpw(CCR0, tmp, tmp2);
|
||||
__ bne(CCR0, slow);
|
||||
__ cmpw(CR0, tmp, tmp2);
|
||||
__ bne(CR0, slow);
|
||||
}
|
||||
|
||||
Register src_ptr = R3_ARG1;
|
||||
@@ -2080,8 +2080,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
Label failed;
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ bne(CCR0, failed);
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ bne(CR0, failed);
|
||||
address counter = (address)&Runtime1::_arraycopy_checkcast_cnt;
|
||||
int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
|
||||
__ lwz(R11_scratch1, simm16_offs, tmp);
|
||||
@@ -2092,8 +2092,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
#endif
|
||||
|
||||
__ nand(tmp, R3_RET, R3_RET);
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (PrintC1Statistics) {
|
||||
@@ -2126,15 +2126,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
metadata2reg(default_type->constant_encoding(), tmp);
|
||||
__ cmp_klass(CCR0, dst, tmp, R11_scratch1, R12_scratch2);
|
||||
__ cmp_klass(CR0, dst, tmp, R11_scratch1, R12_scratch2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ bne(CCR0, halt);
|
||||
__ cmp_klass(CCR0, src, tmp, R11_scratch1, R12_scratch2);
|
||||
__ beq(CCR0, known_ok);
|
||||
__ bne(CR0, halt);
|
||||
__ cmp_klass(CR0, src, tmp, R11_scratch1, R12_scratch2);
|
||||
__ beq(CR0, known_ok);
|
||||
} else {
|
||||
__ beq(CCR0, known_ok);
|
||||
__ cmpw(CCR0, src, dst);
|
||||
__ beq(CCR0, known_ok);
|
||||
__ beq(CR0, known_ok);
|
||||
__ cmpw(CR0, src, dst);
|
||||
__ beq(CR0, known_ok);
|
||||
}
|
||||
__ bind(halt);
|
||||
__ stop("incorrect type information in arraycopy");
|
||||
@@ -2269,8 +2269,8 @@ void LIR_Assembler::emit_alloc_obj(LIR_OpAllocObj* op) {
|
||||
__ lbz(op->tmp1()->as_register(),
|
||||
in_bytes(InstanceKlass::init_state_offset()), op->klass()->as_register());
|
||||
// acquire barrier included in membar_storestore() which follows the allocation immediately.
|
||||
__ cmpwi(CCR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *op->stub()->entry());
|
||||
__ cmpwi(CR0, op->tmp1()->as_register(), InstanceKlass::fully_initialized);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *op->stub()->entry());
|
||||
}
|
||||
__ allocate_object(op->obj()->as_register(),
|
||||
op->tmp1()->as_register(),
|
||||
@@ -2317,8 +2317,8 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
// See if the receiver is receiver[n].
|
||||
__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ verify_klass_ptr(tmp1);
|
||||
__ cmpd(CCR0, recv, tmp1);
|
||||
__ bne(CCR0, next_test);
|
||||
__ cmpd(CR0, recv, tmp1);
|
||||
__ bne(CR0, next_test);
|
||||
|
||||
__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ addi(tmp1, tmp1, DataLayout::counter_increment);
|
||||
@@ -2332,8 +2332,8 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
for (i = 0; i < VirtualCallData::row_limit(); i++) {
|
||||
Label next_test;
|
||||
__ ld(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ cmpdi(CCR0, tmp1, 0);
|
||||
__ bne(CCR0, next_test);
|
||||
__ cmpdi(CR0, tmp1, 0);
|
||||
__ bne(CR0, next_test);
|
||||
__ li(tmp1, DataLayout::counter_increment);
|
||||
__ std(recv, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - mdo_offset_bias, mdo);
|
||||
__ std(tmp1, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) - mdo_offset_bias, mdo);
|
||||
@@ -2394,8 +2394,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
Label not_null;
|
||||
metadata2reg(md->constant_encoding(), mdo);
|
||||
__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ bne(CCR0, not_null);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ bne(CR0, not_null);
|
||||
__ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
__ ori(data_val, data_val, BitData::null_seen_byte_constant());
|
||||
__ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
@@ -2412,8 +2412,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ std(Rtmp1, slot_offset, mdo);
|
||||
__ bind(update_done);
|
||||
} else {
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ beq(CCR0, *obj_is_null);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ beq(CR0, *obj_is_null);
|
||||
}
|
||||
|
||||
// get object class
|
||||
@@ -2427,8 +2427,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
|
||||
if (op->fast_check()) {
|
||||
assert_different_registers(klass_RInfo, k_RInfo);
|
||||
__ cmpd(CCR0, k_RInfo, klass_RInfo);
|
||||
__ beq(CCR0, *success);
|
||||
__ cmpd(CR0, k_RInfo, klass_RInfo);
|
||||
__ beq(CR0, *success);
|
||||
// Fall through to failure case.
|
||||
} else {
|
||||
bool need_slow_path = true;
|
||||
@@ -2462,7 +2462,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ mtctr(original_Rtmp1);
|
||||
__ bctrl(); // sets CR0
|
||||
if (keep_obj_alive) { __ mr(obj, dst); }
|
||||
__ beq(CCR0, *success);
|
||||
__ beq(CR0, *success);
|
||||
// Fall through to failure case.
|
||||
}
|
||||
}
|
||||
@@ -2501,8 +2501,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
Register data_val = Rtmp1;
|
||||
metadata2reg(md->constant_encoding(), mdo);
|
||||
__ add_const_optimized(mdo, mdo, mdo_offset_bias, R0);
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ bne(CCR0, not_null);
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ bne(CR0, not_null);
|
||||
__ lbz(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
__ ori(data_val, data_val, BitData::null_seen_byte_constant());
|
||||
__ stb(data_val, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias, mdo);
|
||||
@@ -2519,8 +2519,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ std(Rtmp1, slot_offset, mdo);
|
||||
__ bind(update_done);
|
||||
} else {
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done);
|
||||
}
|
||||
if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
|
||||
explicit_null_check(array, op->info_for_exception());
|
||||
@@ -2543,7 +2543,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ add_const_optimized(R0, R29_TOC, MacroAssembler::offset_to_global_toc(slow_path));
|
||||
__ mtctr(R0);
|
||||
__ bctrl(); // sets CR0
|
||||
__ beq(CCR0, done);
|
||||
__ beq(CR0, done);
|
||||
|
||||
__ bind(failure);
|
||||
__ b(*stub->entry());
|
||||
@@ -3024,9 +3024,9 @@ void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr
|
||||
}
|
||||
|
||||
if (UseStaticBranchPredictionInCompareAndSwapPPC64) {
|
||||
__ bne_predict_not_taken(CCR0, Lretry);
|
||||
__ bne_predict_not_taken(CR0, Lretry);
|
||||
} else {
|
||||
__ bne( CCR0, Lretry);
|
||||
__ bne( CR0, Lretry);
|
||||
}
|
||||
|
||||
if (UseCompressedOops && data->is_oop()) {
|
||||
@@ -3063,8 +3063,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
if (do_null) {
|
||||
if (!TypeEntries::was_null_seen(current_klass)) {
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ bne(CCR0, Lupdate);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ bne(CR0, Lupdate);
|
||||
__ ld(R0, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
|
||||
__ ori(R0, R0, TypeEntries::null_seen);
|
||||
if (do_update) {
|
||||
@@ -3074,14 +3074,14 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
}
|
||||
} else {
|
||||
if (do_update) {
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ beq(CCR0, Ldone);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ beq(CR0, Ldone);
|
||||
}
|
||||
}
|
||||
#ifdef ASSERT
|
||||
} else {
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ bne(CCR0, Lupdate);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ bne(CR0, Lupdate);
|
||||
__ stop("unexpected null obj");
|
||||
#endif
|
||||
}
|
||||
@@ -3097,8 +3097,8 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
klass_reg_used = true;
|
||||
__ load_klass(klass, obj);
|
||||
metadata2reg(exact_klass->constant_encoding(), R0);
|
||||
__ cmpd(CCR0, klass, R0);
|
||||
__ beq(CCR0, ok);
|
||||
__ cmpd(CR0, klass, R0);
|
||||
__ beq(CR0, ok);
|
||||
__ stop("exact klass and actual klass differ");
|
||||
__ bind(ok);
|
||||
}
|
||||
@@ -3118,20 +3118,20 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
// Like InterpreterMacroAssembler::profile_obj_type
|
||||
__ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
__ cmpd(CCR1, R0, klass);
|
||||
__ cmpd(CR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
//beq(CCR1, do_nothing);
|
||||
//beq(CR1, do_nothing);
|
||||
|
||||
__ andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
//bne(CCR0, do_nothing);
|
||||
__ crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
__ beq(CCR0, Lnext);
|
||||
//bne(CR0, do_nothing);
|
||||
__ crorc(CR0, Assembler::equal, CR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
__ beq(CR0, Lnext);
|
||||
|
||||
if (TypeEntries::is_type_none(current_klass)) {
|
||||
__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
__ orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
|
||||
__ beq(CCR0, Ldo_update); // First time here. Set profile type.
|
||||
__ beq(CR0, Ldo_update); // First time here. Set profile type.
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -3141,7 +3141,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
__ ld(tmp, index_or_disp(mdo_addr), mdo_addr->base()->as_pointer_register());
|
||||
__ andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
__ bne(CCR0, Lnext);
|
||||
__ bne(CR0, Lnext);
|
||||
}
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
@@ -3157,14 +3157,14 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
__ clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
__ cmpd(CCR1, R0, klass);
|
||||
__ cmpd(CR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
__ beq(CCR1, Lnext);
|
||||
__ beq(CR1, Lnext);
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label ok;
|
||||
__ clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
__ beq(CCR0, ok); // First time here.
|
||||
__ beq(CR0, ok); // First time here.
|
||||
|
||||
__ stop("unexpected profiling mismatch");
|
||||
__ bind(ok);
|
||||
@@ -3178,7 +3178,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
|
||||
|
||||
// Already unknown. Nothing to do anymore.
|
||||
__ andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
__ bne(CCR0, Lnext);
|
||||
__ bne(CR0, Lnext);
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
__ ori(R0, tmp, TypeEntries::type_unknown);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -86,8 +86,8 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(Rscratch, Roop);
|
||||
lbz(Rscratch, in_bytes(Klass::misc_flags_offset()), Rscratch);
|
||||
testbitdi(CCR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_int);
|
||||
testbitdi(CR0, R0, Rscratch, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CR0, slow_int);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
@@ -101,7 +101,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
|
||||
// Compare object markWord with Rmark and if equal exchange Rscratch with object markWord.
|
||||
assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/Rscratch,
|
||||
/*compare_value=*/Rmark,
|
||||
/*exchange_value=*/Rbox,
|
||||
@@ -128,7 +128,7 @@ void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox
|
||||
load_const_optimized(R0, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
|
||||
and_(R0/*==0?*/, Rscratch, R0);
|
||||
std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
bne(CCR0, slow_int);
|
||||
bne(CR0, slow_int);
|
||||
}
|
||||
|
||||
bind(done);
|
||||
@@ -149,8 +149,8 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||
if (LockingMode != LM_LIGHTWEIGHT) {
|
||||
// Test first if it is a fast recursive unlock.
|
||||
ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
|
||||
cmpdi(CCR0, Rmark, 0);
|
||||
beq(CCR0, done);
|
||||
cmpdi(CR0, Rmark, 0);
|
||||
beq(CR0, done);
|
||||
}
|
||||
|
||||
// Load object.
|
||||
@@ -162,7 +162,7 @@ void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rb
|
||||
} else if (LockingMode == LM_LEGACY) {
|
||||
// Check if it is still a light weight lock, this is is true if we see
|
||||
// the stack address of the basicLock in the markWord of the object.
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/R0,
|
||||
/*compare_value=*/Rbox,
|
||||
/*exchange_value=*/Rmark,
|
||||
@@ -285,9 +285,9 @@ void C1_MacroAssembler::initialize_object(
|
||||
{
|
||||
lwz(t1, in_bytes(Klass::layout_helper_offset()), klass);
|
||||
if (var_size_in_bytes != noreg) {
|
||||
cmpw(CCR0, t1, var_size_in_bytes);
|
||||
cmpw(CR0, t1, var_size_in_bytes);
|
||||
} else {
|
||||
cmpwi(CCR0, t1, con_size_in_bytes);
|
||||
cmpwi(CR0, t1, con_size_in_bytes);
|
||||
}
|
||||
asm_assert_eq("bad size in initialize_object");
|
||||
}
|
||||
@@ -340,8 +340,8 @@ void C1_MacroAssembler::allocate_array(
|
||||
if (max_tlab < max_length) { max_length = max_tlab; }
|
||||
}
|
||||
load_const_optimized(t1, max_length);
|
||||
cmpld(CCR0, len, t1);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::greater), slow_case);
|
||||
cmpld(CR0, len, t1);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CR0, Assembler::greater), slow_case);
|
||||
|
||||
// compute array size
|
||||
// note: If 0 <= len <= max_length, len*elt_size + header + alignment is
|
||||
@@ -399,8 +399,8 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
|
||||
|
||||
void C1_MacroAssembler::verify_not_null_oop(Register r) {
|
||||
Label not_null;
|
||||
cmpdi(CCR0, r, 0);
|
||||
bne(CCR0, not_null);
|
||||
cmpdi(CR0, r, 0);
|
||||
bne(CR0, not_null);
|
||||
stop("non-null oop required");
|
||||
bind(not_null);
|
||||
verify_oop(r, FILE_AND_LINE);
|
||||
@@ -414,7 +414,7 @@ void C1_MacroAssembler::null_check(Register r, Label* Lnull) {
|
||||
} else { // explicit
|
||||
//const address exception_entry = Runtime1::entry_for(C1StubId::throw_null_pointer_exception_id);
|
||||
assert(Lnull != nullptr, "must have Label for explicit check");
|
||||
cmpdi(CCR0, r, 0);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CCR0, Assembler::equal), *Lnull);
|
||||
cmpdi(CR0, r, 0);
|
||||
bc_far_optimized(Assembler::bcondCRbiIs1, bi0(CR0, Assembler::equal), *Lnull);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -69,14 +69,14 @@ int StubAssembler::call_RT(Register oop_result1, Register metadata_result,
|
||||
// Check for pending exceptions.
|
||||
{
|
||||
ld(R0, in_bytes(Thread::pending_exception_offset()), R16_thread);
|
||||
cmpdi(CCR0, R0, 0);
|
||||
cmpdi(CR0, R0, 0);
|
||||
|
||||
// This used to conditionally jump to forward_exception however it is
|
||||
// possible if we relocate that the branch will not reach. So we must jump
|
||||
// around so we can always reach.
|
||||
|
||||
Label ok;
|
||||
beq(CCR0, ok);
|
||||
beq(CR0, ok);
|
||||
|
||||
// Make sure that the vm_results are cleared.
|
||||
if (oop_result1->is_valid() || metadata_result->is_valid()) {
|
||||
@@ -368,7 +368,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
int call_offset = __ call_RT(noreg, noreg, target);
|
||||
OopMapSet* oop_maps = new OopMapSet();
|
||||
oop_maps->add_gc_map(call_offset, oop_map);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
|
||||
// Re-execute the patched instruction or, if the nmethod was deoptmized,
|
||||
// return to the deoptimization handler entry that will cause re-execution
|
||||
@@ -382,7 +382,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
|
||||
|
||||
restore_live_registers(sasm, noreg, noreg);
|
||||
// Return if patching routine returned 0.
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
|
||||
address stub = deopt_blob->unpack_with_reexecution();
|
||||
//__ load_const_optimized(R0, stub);
|
||||
@@ -448,8 +448,8 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
Label ok;
|
||||
__ lwz(R0, in_bytes(Klass::layout_helper_offset()), R4_ARG2);
|
||||
__ srawi(R0, R0, Klass::_lh_array_tag_shift);
|
||||
__ cmpwi(CCR0, R0, tag);
|
||||
__ beq(CCR0, ok);
|
||||
__ cmpwi(CR0, R0, tag);
|
||||
__ beq(CR0, ok);
|
||||
__ stop("assert(is an array klass)");
|
||||
__ should_not_reach_here();
|
||||
__ bind(ok);
|
||||
@@ -485,9 +485,9 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
// Load the klass and check the has finalizer flag.
|
||||
__ load_klass(t, R3_ARG1);
|
||||
__ lbz(t, in_bytes(Klass::misc_flags_offset()), t);
|
||||
__ testbitdi(CCR0, R0, t, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ testbitdi(CR0, R0, t, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
// Return if has_finalizer bit == 0 (CR0.eq).
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CCR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
__ bclr(Assembler::bcondCRbiIs1, Assembler::bi0(CR0, Assembler::equal), Assembler::bhintbhBCLRisReturn);
|
||||
|
||||
__ mflr(R0);
|
||||
__ std(R0, _abi0(lr), R1_SP);
|
||||
@@ -602,10 +602,9 @@ OopMapSet* Runtime1::generate_code_for(C1StubId id, StubAssembler* sasm) {
|
||||
{ // Support for uint StubRoutine::partial_subtype_check( Klass sub, Klass super );
|
||||
const Register sub_klass = R5,
|
||||
super_klass = R4,
|
||||
temp1_reg = R6,
|
||||
temp2_reg = R0;
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, temp2_reg); // returns with CR0.eq if successful
|
||||
__ crandc(CCR0, Assembler::equal, CCR0, Assembler::equal); // failed: CR0.ne
|
||||
temp1_reg = R6;
|
||||
__ check_klass_subtype_slow_path(sub_klass, super_klass, temp1_reg, noreg);
|
||||
// Result is in CR0.
|
||||
__ blr();
|
||||
}
|
||||
break;
|
||||
@@ -806,10 +805,10 @@ OopMapSet* Runtime1::generate_handle_exception(C1StubId id, StubAssembler* sasm)
|
||||
// Check that fields in JavaThread for exception oop and issuing pc are
|
||||
// empty before writing to them.
|
||||
__ ld(R0, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ asm_assert_eq("exception oop already set");
|
||||
__ ld(R0, in_bytes(JavaThread::exception_pc_offset() ), R16_thread);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ asm_assert_eq("exception pc already set");
|
||||
#endif
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ void C2_MacroAssembler::string_compress_16(Register src, Register dst, Register
|
||||
// Check if cnt >= 8 (= 16 bytes)
|
||||
lis(tmp1, byte_mask); // tmp1 = 0x00FF00FF00FF00FF (non ascii case)
|
||||
srwi_(tmp2, cnt, 3);
|
||||
beq(CCR0, Lslow);
|
||||
beq(CR0, Lslow);
|
||||
ori(tmp1, tmp1, byte_mask);
|
||||
rldimi(tmp1, tmp1, 32, 0);
|
||||
mtctr(tmp2);
|
||||
@@ -87,7 +87,7 @@ void C2_MacroAssembler::string_compress_16(Register src, Register dst, Register
|
||||
rldimi(tmp4, tmp4, 2*8, 2*8); // _4_6_7_7
|
||||
|
||||
andc_(tmp0, tmp0, tmp1);
|
||||
bne(CCR0, Lfailure); // Not latin1/ascii.
|
||||
bne(CR0, Lfailure); // Not latin1/ascii.
|
||||
addi(src, src, 16);
|
||||
|
||||
rlwimi(tmp3, tmp2, 0*8, 24, 31);// _____1_3
|
||||
@@ -115,8 +115,8 @@ void C2_MacroAssembler::string_compress(Register src, Register dst, Register cnt
|
||||
|
||||
bind(Lloop);
|
||||
lhz(tmp, 0, src);
|
||||
cmplwi(CCR0, tmp, byte_mask);
|
||||
bgt(CCR0, Lfailure); // Not latin1/ascii.
|
||||
cmplwi(CR0, tmp, byte_mask);
|
||||
bgt(CR0, Lfailure); // Not latin1/ascii.
|
||||
addi(src, src, 2);
|
||||
stb(tmp, 0, dst);
|
||||
addi(dst, dst, 1);
|
||||
@@ -130,7 +130,7 @@ void C2_MacroAssembler::encode_iso_array(Register src, Register dst, Register le
|
||||
|
||||
string_compress_16(src, dst, len, tmp1, tmp2, tmp3, tmp4, tmp5, Lfailure1, ascii);
|
||||
rldicl_(result, len, 0, 64-3); // Remaining characters.
|
||||
beq(CCR0, Ldone);
|
||||
beq(CR0, Ldone);
|
||||
bind(Lslow);
|
||||
string_compress(src, dst, result, tmp2, Lfailure2, ascii);
|
||||
li(result, 0);
|
||||
@@ -140,7 +140,7 @@ void C2_MacroAssembler::encode_iso_array(Register src, Register dst, Register le
|
||||
mr(result, len);
|
||||
mfctr(tmp1);
|
||||
rldimi_(result, tmp1, 3, 0); // Remaining characters.
|
||||
beq(CCR0, Ldone);
|
||||
beq(CR0, Ldone);
|
||||
b(Lslow);
|
||||
|
||||
bind(Lfailure2);
|
||||
@@ -159,7 +159,7 @@ void C2_MacroAssembler::string_inflate_16(Register src, Register dst, Register c
|
||||
|
||||
// Check if cnt >= 8
|
||||
srwi_(tmp2, cnt, 3);
|
||||
beq(CCR0, Lslow);
|
||||
beq(CR0, Lslow);
|
||||
lis(tmp1, 0xFF); // tmp1 = 0x00FF00FF
|
||||
ori(tmp1, tmp1, 0xFF);
|
||||
mtctr(tmp2);
|
||||
@@ -235,10 +235,10 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
subf_(diff, cnt2, cnt1); // diff = cnt1 - cnt2
|
||||
// if (diff > 0) { cnt1 = cnt2; }
|
||||
if (VM_Version::has_isel()) {
|
||||
isel(cnt1, CCR0, Assembler::greater, /*invert*/ false, cnt2);
|
||||
isel(cnt1, CR0, Assembler::greater, /*invert*/ false, cnt2);
|
||||
} else {
|
||||
Label Lskip;
|
||||
blt(CCR0, Lskip);
|
||||
blt(CR0, Lskip);
|
||||
mr(cnt1, cnt2);
|
||||
bind(Lskip);
|
||||
}
|
||||
@@ -254,7 +254,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
Label Lfastloop, Lskipfast;
|
||||
|
||||
srwi_(tmp0, cnt1, log2_chars_per_iter);
|
||||
beq(CCR0, Lskipfast);
|
||||
beq(CR0, Lskipfast);
|
||||
rldicl(cnt2, cnt1, 0, 64 - log2_chars_per_iter); // Remaining characters.
|
||||
li(cnt1, 1 << log2_chars_per_iter); // Initialize for failure case: Rescan characters from current iteration.
|
||||
mtctr(tmp0);
|
||||
@@ -262,8 +262,8 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
bind(Lfastloop);
|
||||
ld(chr1, 0, str1);
|
||||
ld(chr2, 0, str2);
|
||||
cmpd(CCR0, chr1, chr2);
|
||||
bne(CCR0, Lslow);
|
||||
cmpd(CR0, chr1, chr2);
|
||||
bne(CR0, Lslow);
|
||||
addi(str1, str1, stride1);
|
||||
addi(str2, str2, stride2);
|
||||
bdnz(Lfastloop);
|
||||
@@ -272,8 +272,8 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
}
|
||||
|
||||
// Loop which searches the first difference character by character.
|
||||
cmpwi(CCR0, cnt1, 0);
|
||||
beq(CCR0, Lreturn_diff);
|
||||
cmpwi(CR0, cnt1, 0);
|
||||
beq(CR0, Lreturn_diff);
|
||||
bind(Lslow);
|
||||
mtctr(cnt1);
|
||||
|
||||
@@ -289,7 +289,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
if (stride1 == 1) { lbz(chr1, 0, str1); } else { lhz(chr1, 0, str1); }
|
||||
if (stride2 == 1) { lbz(chr2, 0, str2); } else { lhz(chr2, 0, str2); }
|
||||
subf_(result, chr2, chr1); // result = chr1 - chr2
|
||||
bne(CCR0, Ldone);
|
||||
bne(CR0, Ldone);
|
||||
addi(str1, str1, stride1);
|
||||
addi(str2, str2, stride2);
|
||||
bdnz(Lloop);
|
||||
@@ -317,23 +317,23 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
const int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
|
||||
|
||||
// Return true if the same array.
|
||||
cmpd(CCR0, ary1, ary2);
|
||||
beq(CCR0, Lskiploop);
|
||||
cmpd(CR0, ary1, ary2);
|
||||
beq(CR0, Lskiploop);
|
||||
|
||||
// Return false if one of them is null.
|
||||
cmpdi(CCR0, ary1, 0);
|
||||
cmpdi(CCR1, ary2, 0);
|
||||
cmpdi(CR0, ary1, 0);
|
||||
cmpdi(CR1, ary2, 0);
|
||||
li(result, 0);
|
||||
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
beq(CCR0, Ldone);
|
||||
cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
beq(CR0, Ldone);
|
||||
|
||||
// Load the lengths of arrays.
|
||||
lwz(limit, length_offset, ary1);
|
||||
lwz(tmp0, length_offset, ary2);
|
||||
|
||||
// Return false if the two arrays are not equal length.
|
||||
cmpw(CCR0, limit, tmp0);
|
||||
bne(CCR0, Ldone);
|
||||
cmpw(CR0, limit, tmp0);
|
||||
bne(CR0, Ldone);
|
||||
|
||||
// Load array addresses.
|
||||
addi(ary1, ary1, base_offset);
|
||||
@@ -351,7 +351,7 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
const int log2_chars_per_iter = is_byte ? 3 : 2;
|
||||
|
||||
srwi_(tmp0, limit, log2_chars_per_iter + (limit_needs_shift ? 1 : 0));
|
||||
beq(CCR0, Lskipfast);
|
||||
beq(CR0, Lskipfast);
|
||||
mtctr(tmp0);
|
||||
|
||||
bind(Lfastloop);
|
||||
@@ -359,13 +359,13 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
ld(chr2, 0, ary2);
|
||||
addi(ary1, ary1, 8);
|
||||
addi(ary2, ary2, 8);
|
||||
cmpd(CCR0, chr1, chr2);
|
||||
bne(CCR0, Ldone);
|
||||
cmpd(CR0, chr1, chr2);
|
||||
bne(CR0, Ldone);
|
||||
bdnz(Lfastloop);
|
||||
|
||||
bind(Lskipfast);
|
||||
rldicl_(limit, limit, limit_needs_shift ? 64 - 1 : 0, 64 - log2_chars_per_iter); // Remaining characters.
|
||||
beq(CCR0, Lskiploop);
|
||||
beq(CR0, Lskiploop);
|
||||
mtctr(limit);
|
||||
|
||||
// Character by character.
|
||||
@@ -381,8 +381,8 @@ void C2_MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register
|
||||
addi(ary1, ary1, 2);
|
||||
addi(ary2, ary2, 2);
|
||||
}
|
||||
cmpw(CCR0, chr1, chr2);
|
||||
bne(CCR0, Ldone);
|
||||
cmpw(CR0, chr1, chr2);
|
||||
bne(CR0, Ldone);
|
||||
bdnz(Lloop);
|
||||
|
||||
bind(Lskiploop);
|
||||
@@ -414,9 +414,9 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
clrldi(haycnt, haycnt, 32); // Ensure positive int is valid as 64 bit value.
|
||||
addi(addr, haystack, -h_csize); // Accesses use pre-increment.
|
||||
if (needlecntval == 0) { // variable needlecnt
|
||||
cmpwi(CCR6, needlecnt, 2);
|
||||
cmpwi(CR6, needlecnt, 2);
|
||||
clrldi(needlecnt, needlecnt, 32); // Ensure positive int is valid as 64 bit value.
|
||||
blt(CCR6, L_TooShort); // Variable needlecnt: handle short needle separately.
|
||||
blt(CR6, L_TooShort); // Variable needlecnt: handle short needle separately.
|
||||
}
|
||||
|
||||
if (n_csize == 2) { lwz(n_start, 0, needle); } else { lhz(n_start, 0, needle); } // Load first 2 characters of needle.
|
||||
@@ -447,7 +447,7 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
subf(addr_diff, addr, last_addr); // Difference between already checked address and last address to check.
|
||||
addi(addr, addr, h_csize); // This is the new address we want to use for comparing.
|
||||
srdi_(ch2, addr_diff, h_csize);
|
||||
beq(CCR0, L_FinalCheck); // 2 characters left?
|
||||
beq(CR0, L_FinalCheck); // 2 characters left?
|
||||
mtctr(ch2); // num of characters / 2
|
||||
bind(L_InnerLoop); // Main work horse (2x unrolled search loop)
|
||||
if (h_csize == 2) { // Load 2 characters of haystack (ignore alignment).
|
||||
@@ -457,18 +457,18 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
lhz(ch1, 0, addr);
|
||||
lhz(ch2, 1, addr);
|
||||
}
|
||||
cmpw(CCR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
|
||||
cmpw(CCR1, ch2, n_start);
|
||||
beq(CCR0, L_Comp1); // Did we find the needle start?
|
||||
beq(CCR1, L_Comp2);
|
||||
cmpw(CR0, ch1, n_start); // Compare 2 characters (1 would be sufficient but try to reduce branches to CompLoop).
|
||||
cmpw(CR1, ch2, n_start);
|
||||
beq(CR0, L_Comp1); // Did we find the needle start?
|
||||
beq(CR1, L_Comp2);
|
||||
addi(addr, addr, 2 * h_csize);
|
||||
bdnz(L_InnerLoop);
|
||||
bind(L_FinalCheck);
|
||||
andi_(addr_diff, addr_diff, h_csize); // Remaining characters not covered by InnerLoop: (num of characters) & 1.
|
||||
beq(CCR0, L_NotFound);
|
||||
beq(CR0, L_NotFound);
|
||||
if (h_csize == 2) { lwz(ch1, 0, addr); } else { lhz(ch1, 0, addr); } // One position left at which we have to compare.
|
||||
cmpw(CCR1, ch1, n_start);
|
||||
beq(CCR1, L_Comp1);
|
||||
cmpw(CR1, ch1, n_start);
|
||||
beq(CR1, L_Comp1);
|
||||
bind(L_NotFound);
|
||||
li(result, -1); // not found
|
||||
b(L_End);
|
||||
@@ -483,8 +483,8 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
if (n_csize == 2) { lhz(n_start, 0, needle); } else { lbz(n_start, 0, needle); } // First character of needle
|
||||
bind(L_OneCharLoop);
|
||||
if (h_csize == 2) { lhzu(ch1, 2, addr); } else { lbzu(ch1, 1, addr); }
|
||||
cmpw(CCR1, ch1, n_start);
|
||||
beq(CCR1, L_Found); // Did we find the one character needle?
|
||||
cmpw(CR1, ch1, n_start);
|
||||
beq(CR1, L_Found); // Did we find the one character needle?
|
||||
bdnz(L_OneCharLoop);
|
||||
li(result, -1); // Not found.
|
||||
b(L_End);
|
||||
@@ -500,7 +500,7 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
bind(L_Comp1); // Addr points to possible needle start.
|
||||
if (needlecntval != 2) { // Const needlecnt==2?
|
||||
if (needlecntval != 3) {
|
||||
if (needlecntval == 0) { beq(CCR6, L_Found); } // Variable needlecnt==2?
|
||||
if (needlecntval == 0) { beq(CR6, L_Found); } // Variable needlecnt==2?
|
||||
Register n_ind = tmp4,
|
||||
h_ind = n_ind;
|
||||
li(n_ind, 2 * n_csize); // First 2 characters are already compared, use index 2.
|
||||
@@ -513,15 +513,15 @@ void C2_MacroAssembler::string_indexof(Register result, Register haystack, Regis
|
||||
}
|
||||
if (n_csize == 2) { lhzx(ch2, needle, n_ind); } else { lbzx(ch2, needle, n_ind); }
|
||||
if (h_csize == 2) { lhzx(ch1, addr, h_ind); } else { lbzx(ch1, addr, h_ind); }
|
||||
cmpw(CCR1, ch1, ch2);
|
||||
bne(CCR1, L_OuterLoop);
|
||||
cmpw(CR1, ch1, ch2);
|
||||
bne(CR1, L_OuterLoop);
|
||||
addi(n_ind, n_ind, n_csize);
|
||||
bdnz(L_CompLoop);
|
||||
} else { // No loop required if there's only one needle character left.
|
||||
if (n_csize == 2) { lhz(ch2, 2 * 2, needle); } else { lbz(ch2, 2 * 1, needle); }
|
||||
if (h_csize == 2) { lhz(ch1, 2 * 2, addr); } else { lbz(ch1, 2 * 1, addr); }
|
||||
cmpw(CCR1, ch1, ch2);
|
||||
bne(CCR1, L_OuterLoop);
|
||||
cmpw(CR1, ch1, ch2);
|
||||
bne(CR1, L_OuterLoop);
|
||||
}
|
||||
}
|
||||
// Return index ...
|
||||
@@ -545,7 +545,7 @@ void C2_MacroAssembler::string_indexof_char(Register result, Register haystack,
|
||||
//4:
|
||||
srwi_(tmp2, haycnt, 1); // Shift right by exact_log2(UNROLL_FACTOR).
|
||||
mr(addr, haystack);
|
||||
beq(CCR0, L_FinalCheck);
|
||||
beq(CR0, L_FinalCheck);
|
||||
mtctr(tmp2); // Move to count register.
|
||||
//8:
|
||||
bind(L_InnerLoop); // Main work horse (2x unrolled search loop).
|
||||
@@ -556,19 +556,19 @@ void C2_MacroAssembler::string_indexof_char(Register result, Register haystack,
|
||||
lbz(ch1, 0, addr);
|
||||
lbz(ch2, 1, addr);
|
||||
}
|
||||
(needle != R0) ? cmpw(CCR0, ch1, needle) : cmplwi(CCR0, ch1, (unsigned int)needleChar);
|
||||
(needle != R0) ? cmpw(CCR1, ch2, needle) : cmplwi(CCR1, ch2, (unsigned int)needleChar);
|
||||
beq(CCR0, L_Found1); // Did we find the needle?
|
||||
beq(CCR1, L_Found2);
|
||||
(needle != R0) ? cmpw(CR0, ch1, needle) : cmplwi(CR0, ch1, (unsigned int)needleChar);
|
||||
(needle != R0) ? cmpw(CR1, ch2, needle) : cmplwi(CR1, ch2, (unsigned int)needleChar);
|
||||
beq(CR0, L_Found1); // Did we find the needle?
|
||||
beq(CR1, L_Found2);
|
||||
addi(addr, addr, 2 * h_csize);
|
||||
bdnz(L_InnerLoop);
|
||||
//16:
|
||||
bind(L_FinalCheck);
|
||||
andi_(R0, haycnt, 1);
|
||||
beq(CCR0, L_NotFound);
|
||||
beq(CR0, L_NotFound);
|
||||
if (!is_byte) { lhz(ch1, 0, addr); } else { lbz(ch1, 0, addr); } // One position left at which we have to compare.
|
||||
(needle != R0) ? cmpw(CCR1, ch1, needle) : cmplwi(CCR1, ch1, (unsigned int)needleChar);
|
||||
beq(CCR1, L_Found1);
|
||||
(needle != R0) ? cmpw(CR1, ch1, needle) : cmplwi(CR1, ch1, (unsigned int)needleChar);
|
||||
beq(CR1, L_Found1);
|
||||
//21:
|
||||
bind(L_NotFound);
|
||||
li(result, -1); // Not found.
|
||||
@@ -594,7 +594,7 @@ void C2_MacroAssembler::count_positives(Register src, Register cnt, Register res
|
||||
lis(tmp1, (int)(short)0x8080); // tmp1 = 0x8080808080808080
|
||||
srwi_(tmp2, cnt, 4);
|
||||
mr(result, src); // Use result reg to point to the current position.
|
||||
beq(CCR0, Lslow);
|
||||
beq(CR0, Lslow);
|
||||
ori(tmp1, tmp1, 0x8080);
|
||||
rldimi(tmp1, tmp1, 32, 0);
|
||||
mtctr(tmp2);
|
||||
@@ -607,19 +607,19 @@ void C2_MacroAssembler::count_positives(Register src, Register cnt, Register res
|
||||
orr(tmp0, tmp2, tmp0);
|
||||
|
||||
and_(tmp0, tmp0, tmp1);
|
||||
bne(CCR0, Lslow); // Found negative byte.
|
||||
bne(CR0, Lslow); // Found negative byte.
|
||||
addi(result, result, 16);
|
||||
bdnz(Lfastloop);
|
||||
|
||||
bind(Lslow); // Fallback to slow version.
|
||||
subf(tmp0, src, result); // Bytes known positive.
|
||||
subf_(tmp0, tmp0, cnt); // Remaining Bytes.
|
||||
beq(CCR0, Ldone);
|
||||
beq(CR0, Ldone);
|
||||
mtctr(tmp0);
|
||||
bind(Lloop);
|
||||
lbz(tmp0, 0, result);
|
||||
andi_(tmp0, tmp0, 0x80);
|
||||
bne(CCR0, Ldone); // Found negative byte.
|
||||
bne(CR0, Ldone); // Found negative byte.
|
||||
addi(result, result, 1);
|
||||
bdnz(Lloop);
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2020, 2025 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -282,8 +282,8 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
__ safepoint_poll(L_safepoint_poll_slow_path, tmp, true /* at_return */, false /* in_nmethod */);
|
||||
|
||||
__ lwz(tmp, in_bytes(JavaThread::suspend_flags_offset()), R16_thread);
|
||||
__ cmpwi(CCR0, tmp, 0);
|
||||
__ bne(CCR0, L_safepoint_poll_slow_path);
|
||||
__ cmpwi(CR0, tmp, 0);
|
||||
__ bne(CR0, L_safepoint_poll_slow_path);
|
||||
__ bind(L_after_safepoint_poll);
|
||||
|
||||
// change thread state
|
||||
@@ -293,8 +293,8 @@ void DowncallLinker::StubGenerator::generate() {
|
||||
|
||||
__ block_comment("reguard stack check");
|
||||
__ lwz(tmp, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
|
||||
__ cmpwi(CCR0, tmp, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ beq(CCR0, L_reguard);
|
||||
__ cmpwi(CR0, tmp, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ beq(CR0, L_reguard);
|
||||
__ bind(L_after_reguard);
|
||||
|
||||
__ reset_last_Java_frame();
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -51,7 +51,7 @@ static void generate_marking_inactive_test(MacroAssembler* masm) {
|
||||
int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
|
||||
assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
|
||||
__ lbz(R0, active_offset, R16_thread); // tmp1 := *(mark queue active address)
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
|
||||
@@ -68,7 +68,7 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm
|
||||
|
||||
// Is marking active?
|
||||
generate_marking_inactive_test(masm);
|
||||
__ beq(CCR0, filtered);
|
||||
__ beq(CR0, filtered);
|
||||
|
||||
__ save_LR(R0);
|
||||
__ push_frame(frame_size, R0);
|
||||
@@ -118,8 +118,8 @@ static void generate_queue_insertion(MacroAssembler* masm, ByteSize index_offset
|
||||
// Can we store a value in the given thread's buffer?
|
||||
// (The index field is typed as size_t.)
|
||||
__ ld(temp, in_bytes(index_offset), R16_thread); // temp := *(index address)
|
||||
__ cmpdi(CCR0, temp, 0); // jump to runtime if index == 0 (full buffer)
|
||||
__ beq(CCR0, runtime);
|
||||
__ cmpdi(CR0, temp, 0); // jump to runtime if index == 0 (full buffer)
|
||||
__ beq(CR0, runtime);
|
||||
// The buffer is not full, store value into it.
|
||||
__ ld(R0, in_bytes(buffer_offset), R16_thread); // R0 := buffer address
|
||||
__ addi(temp, temp, -wordSize); // temp := next index
|
||||
@@ -154,7 +154,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
Label runtime, filtered;
|
||||
|
||||
generate_marking_inactive_test(masm);
|
||||
__ beq(CCR0, filtered);
|
||||
__ beq(CR0, filtered);
|
||||
|
||||
// Do we need to load the previous value?
|
||||
if (!preloaded) {
|
||||
@@ -171,12 +171,12 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, Decorator
|
||||
// Is the previous value null?
|
||||
if (preloaded && not_null) {
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ asm_assert_ne("null oop not allowed (G1 pre)"); // Checked by caller.
|
||||
#endif
|
||||
} else {
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ beq(CCR0, filtered);
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ beq(CR0, filtered);
|
||||
}
|
||||
|
||||
if (!preloaded && UseCompressedOops) {
|
||||
@@ -240,14 +240,14 @@ static Address generate_card_young_test(MacroAssembler* masm, const Register sto
|
||||
__ load_const_optimized(tmp1, (address)(ct->card_table()->byte_map_base()), tmp2);
|
||||
__ srdi(tmp2, store_addr, CardTable::card_shift()); // tmp1 := card address relative to card table base
|
||||
__ lbzx(R0, tmp1, tmp2); // tmp1 := card address
|
||||
__ cmpwi(CCR0, R0, (int)G1CardTable::g1_young_card_val());
|
||||
__ cmpwi(CR0, R0, (int)G1CardTable::g1_young_card_val());
|
||||
return Address(tmp1, tmp2); // return card address
|
||||
}
|
||||
|
||||
static void generate_card_dirty_test(MacroAssembler* masm, Address card_addr) {
|
||||
__ membar(Assembler::StoreLoad); // Must reload after StoreLoad membar due to concurrent refinement
|
||||
__ lbzx(R0, card_addr.base(), card_addr.index()); // tmp2 := card
|
||||
__ cmpwi(CCR0, R0, (int)G1CardTable::dirty_card_val()); // tmp2 := card == dirty_card_val?
|
||||
__ cmpwi(CR0, R0, (int)G1CardTable::dirty_card_val()); // tmp2 := card == dirty_card_val?
|
||||
}
|
||||
|
||||
void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators,
|
||||
@@ -262,24 +262,24 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, Decorato
|
||||
CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
|
||||
|
||||
generate_region_crossing_test(masm, store_addr, new_val);
|
||||
__ beq(CCR0, filtered);
|
||||
__ beq(CR0, filtered);
|
||||
|
||||
// Crosses regions, storing null?
|
||||
if (not_null) {
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CCR0, new_val, 0);
|
||||
__ cmpdi(CR0, new_val, 0);
|
||||
__ asm_assert_ne("null oop not allowed (G1 post)"); // Checked by caller.
|
||||
#endif
|
||||
} else {
|
||||
__ cmpdi(CCR0, new_val, 0);
|
||||
__ beq(CCR0, filtered);
|
||||
__ cmpdi(CR0, new_val, 0);
|
||||
__ beq(CR0, filtered);
|
||||
}
|
||||
|
||||
Address card_addr = generate_card_young_test(masm, store_addr, tmp1, tmp2);
|
||||
__ beq(CCR0, filtered);
|
||||
__ beq(CR0, filtered);
|
||||
|
||||
generate_card_dirty_test(masm, card_addr);
|
||||
__ beq(CCR0, filtered);
|
||||
__ beq(CR0, filtered);
|
||||
|
||||
__ li(R0, (int)G1CardTable::dirty_card_val());
|
||||
__ stbx(R0, card_addr.base(), card_addr.index()); // *(card address) := dirty_card_val
|
||||
@@ -371,14 +371,14 @@ void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done, not_weak;
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
|
||||
__ clrrdi(tmp1, value, JNIHandles::tag_size);
|
||||
__ andi_(tmp2, value, JNIHandles::TypeTag::weak_global);
|
||||
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
|
||||
|
||||
__ beq(CCR0, not_weak); // Test for jweak tag.
|
||||
__ beq(CR0, not_weak); // Test for jweak tag.
|
||||
__ verify_oop(value, FILE_AND_LINE);
|
||||
g1_write_barrier_pre(masm, IN_NATIVE | ON_PHANTOM_OOP_REF,
|
||||
noreg, noreg, value,
|
||||
@@ -409,7 +409,7 @@ void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
|
||||
stub->initialize_registers(obj, pre_val, R16_thread, tmp1, tmp2);
|
||||
|
||||
generate_marking_inactive_test(masm);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
@@ -433,8 +433,8 @@ void G1BarrierSetAssembler::generate_c2_pre_barrier_stub(MacroAssembler* masm,
|
||||
__ ld(pre_val, 0, obj);
|
||||
}
|
||||
}
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
Register pre_val_decoded = pre_val;
|
||||
if (UseCompressedOops) {
|
||||
@@ -472,25 +472,25 @@ void G1BarrierSetAssembler::g1_write_barrier_post_c2(MacroAssembler* masm,
|
||||
if (null_check_required && CompressedOops::base() != nullptr) {
|
||||
// We prefer doing the null check after the region crossing check.
|
||||
// Only compressed oop modes with base != null require a null check here.
|
||||
__ cmpwi(CCR0, new_val, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
__ cmpwi(CR0, new_val, 0);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
null_check_required = false;
|
||||
}
|
||||
new_val_decoded = __ decode_heap_oop_not_null(tmp2, new_val);
|
||||
}
|
||||
|
||||
generate_region_crossing_test(masm, store_addr, new_val_decoded);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
__ beq(CR0, *stub->continuation());
|
||||
|
||||
// crosses regions, storing null?
|
||||
if (null_check_required) {
|
||||
__ cmpdi(CCR0, new_val_decoded, 0);
|
||||
__ beq(CCR0, *stub->continuation());
|
||||
__ cmpdi(CR0, new_val_decoded, 0);
|
||||
__ beq(CR0, *stub->continuation());
|
||||
}
|
||||
|
||||
Address card_addr = generate_card_young_test(masm, store_addr, tmp1, tmp2);
|
||||
assert(card_addr.base() == tmp1 && card_addr.index() == tmp2, "needed by post barrier stub");
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *stub->entry());
|
||||
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
@@ -504,7 +504,7 @@ void G1BarrierSetAssembler::generate_c2_post_barrier_stub(MacroAssembler* masm,
|
||||
__ bind(*stub->entry());
|
||||
|
||||
generate_card_dirty_test(masm, card_addr);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
__ li(R0, (int)G1CardTable::dirty_card_val());
|
||||
__ stbx(R0, card_addr.base(), card_addr.index()); // *(card address) := dirty_card_val
|
||||
@@ -546,8 +546,8 @@ void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrier
|
||||
ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/);
|
||||
}
|
||||
|
||||
__ cmpdi(CCR0, pre_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CR0, pre_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
address c_code = bs->pre_barrier_c1_runtime_code_blob()->code_begin();
|
||||
//__ load_const_optimized(R0, c_code);
|
||||
@@ -567,8 +567,8 @@ void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarri
|
||||
Register addr_reg = stub->addr()->as_pointer_register();
|
||||
Register new_val_reg = stub->new_val()->as_register();
|
||||
|
||||
__ cmpdi(CCR0, new_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CR0, new_val_reg, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
address c_code = bs->post_barrier_c1_runtime_code_blob()->code_begin();
|
||||
//__ load_const_optimized(R0, c_code);
|
||||
@@ -604,7 +604,7 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
|
||||
// Is marking still active?
|
||||
generate_marking_inactive_test(sasm);
|
||||
__ beq(CCR0, marking_not_active);
|
||||
__ beq(CR0, marking_not_active);
|
||||
|
||||
__ bind(restart);
|
||||
// Load the index into the SATB buffer. SATBMarkQueue::_index is a
|
||||
@@ -612,8 +612,8 @@ void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler*
|
||||
__ ld(tmp, satb_q_index_byte_offset, R16_thread);
|
||||
|
||||
// index == 0?
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ beq(CCR0, refill);
|
||||
__ cmpdi(CR0, tmp, 0);
|
||||
__ beq(CR0, refill);
|
||||
|
||||
__ ld(tmp2, satb_q_buf_byte_offset, R16_thread);
|
||||
__ ld(pre_val, -8, R1_SP); // Load from stack.
|
||||
@@ -666,15 +666,15 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
||||
|
||||
// Return if young card.
|
||||
__ cmpwi(CCR0, tmp, G1CardTable::g1_young_card_val());
|
||||
__ beq(CCR0, ret);
|
||||
__ cmpwi(CR0, tmp, G1CardTable::g1_young_card_val());
|
||||
__ beq(CR0, ret);
|
||||
|
||||
// Return if sequential consistent value is already dirty.
|
||||
__ membar(Assembler::StoreLoad);
|
||||
__ lbz(tmp, 0, addr); // tmp := [addr + cardtable]
|
||||
|
||||
__ cmpwi(CCR0, tmp, G1CardTable::dirty_card_val());
|
||||
__ beq(CCR0, ret);
|
||||
__ cmpwi(CR0, tmp, G1CardTable::dirty_card_val());
|
||||
__ beq(CR0, ret);
|
||||
|
||||
// Not dirty.
|
||||
|
||||
@@ -692,8 +692,8 @@ void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler*
|
||||
__ ld(tmp2, dirty_card_q_index_byte_offset, R16_thread);
|
||||
|
||||
// index == 0?
|
||||
__ cmpdi(CCR0, tmp2, 0);
|
||||
__ beq(CCR0, refill);
|
||||
__ cmpdi(CR0, tmp2, 0);
|
||||
__ beq(CR0, refill);
|
||||
|
||||
__ ld(tmp, dirty_card_q_buf_byte_offset, R16_thread);
|
||||
__ addi(tmp2, tmp2, -oopSize);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2024 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2025 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@@ -164,7 +164,7 @@ instruct g1CompareAndExchangeP(iRegPdst res, indirect mem, iRegPsrc oldval, iReg
|
||||
format %{ "cmpxchgd $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgd(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -194,7 +194,7 @@ instruct g1CompareAndExchangeP_acq(iRegPdst res, indirect mem, iRegPsrc oldval,
|
||||
format %{ "cmpxchgd acq $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgd(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -230,7 +230,7 @@ instruct g1CompareAndExchangeN(iRegNdst res, indirect mem, iRegNsrc oldval, iReg
|
||||
format %{ "cmpxchgw $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgw(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -261,7 +261,7 @@ instruct g1CompareAndExchangeN_acq(iRegNdst res, indirect mem, iRegNsrc oldval,
|
||||
format %{ "cmpxchgw acq $newval, $mem" %}
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ cmpxchgw(CCR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CR0, $res$$Register, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -299,7 +299,7 @@ instruct g1CompareAndSwapP(iRegIdst res, indirect mem, iRegPsrc oldval, iRegPsrc
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -332,7 +332,7 @@ instruct g1CompareAndSwapP_acq(iRegIdst res, indirect mem, iRegPsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -371,7 +371,7 @@ instruct g1CompareAndSwapN(iRegIdst res, indirect mem, iRegNsrc oldval, iRegNsrc
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -405,7 +405,7 @@ instruct g1CompareAndSwapN_acq(iRegIdst res, indirect mem, iRegNsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -445,7 +445,7 @@ instruct weakG1CompareAndSwapP(iRegIdst res, indirect mem, iRegPsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -478,7 +478,7 @@ instruct weakG1CompareAndSwapP_acq(iRegIdst res, indirect mem, iRegPsrc oldval,
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgd(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgd(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -517,7 +517,7 @@ instruct weakG1CompareAndSwapN(iRegIdst res, indirect mem, iRegNsrc oldval, iReg
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
@@ -551,7 +551,7 @@ instruct weakG1CompareAndSwapN_acq(iRegIdst res, indirect mem, iRegNsrc oldval,
|
||||
ins_encode %{
|
||||
Label no_update;
|
||||
__ li($res$$Register, 0);
|
||||
__ cmpxchgw(CCR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
__ cmpxchgw(CR0, R0, $oldval$$Register, $newval$$Register, $mem$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, &no_update, true, true);
|
||||
// Pass oldval to SATB which is the only value which can get overwritten.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2022 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -89,8 +89,8 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
|
||||
if (UseCompressedOops && in_heap) {
|
||||
if (L_handle_null != nullptr) { // Label provided.
|
||||
__ lwz(dst, ind_or_offs, base);
|
||||
__ cmpwi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
__ cmpwi(CR0, dst, 0);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
__ decode_heap_oop_not_null(dst);
|
||||
} else if (not_null) { // Guaranteed to be not null.
|
||||
Register narrowOop = (tmp1 != noreg && CompressedOops::base_disjoint()) ? tmp1 : dst;
|
||||
@@ -103,8 +103,8 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,
|
||||
} else {
|
||||
__ ld(dst, ind_or_offs, base);
|
||||
if (L_handle_null != nullptr) {
|
||||
__ cmpdi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
__ cmpdi(CR0, dst, 0);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -118,11 +118,11 @@ void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done, tagged, weak_tagged, verify;
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
|
||||
__ andi_(tmp1, value, JNIHandles::tag_mask);
|
||||
__ bne(CCR0, tagged); // Test for tag.
|
||||
__ bne(CR0, tagged); // Test for tag.
|
||||
|
||||
__ access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, // no uncoloring
|
||||
value, (intptr_t)0, value, tmp1, tmp2, preservation_level);
|
||||
@@ -131,7 +131,7 @@ void BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value,
|
||||
__ bind(tagged);
|
||||
__ andi_(tmp1, value, JNIHandles::TypeTag::weak_global);
|
||||
__ clrrdi(value, value, JNIHandles::tag_size); // Untag.
|
||||
__ bne(CCR0, weak_tagged); // Test for jweak tag.
|
||||
__ bne(CR0, weak_tagged); // Test for jweak tag.
|
||||
|
||||
__ access_load_at(T_OBJECT, IN_NATIVE,
|
||||
value, (intptr_t)0, value, tmp1, tmp2, preservation_level);
|
||||
@@ -152,14 +152,14 @@ void BarrierSetAssembler::resolve_global_jobject(MacroAssembler* masm, Register
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done;
|
||||
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label valid_global_tag;
|
||||
__ andi_(tmp1, value, JNIHandles::TypeTag::global);
|
||||
__ bne(CCR0, valid_global_tag); // Test for global tag.
|
||||
__ bne(CR0, valid_global_tag); // Test for global tag.
|
||||
__ stop("non global jobject using resolve_global_jobject");
|
||||
__ bind(valid_global_tag);
|
||||
}
|
||||
@@ -200,9 +200,9 @@ void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Register t
|
||||
|
||||
// Low order half of 64 bit value is currently used.
|
||||
__ ld(R0, in_bytes(bs_nm->thread_disarmed_guard_value_offset()), R16_thread);
|
||||
__ cmpw(CCR0, R0, tmp);
|
||||
__ cmpw(CR0, R0, tmp);
|
||||
|
||||
__ bnectrl(CCR0);
|
||||
__ bnectrl(CR0);
|
||||
|
||||
// Oops may have been changed. Make those updates observable.
|
||||
// "isync" can serve both, data and instruction patching.
|
||||
@@ -229,8 +229,8 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1,
|
||||
Label bad_call, skip_barrier;
|
||||
|
||||
// Fast path: If no method is given, the call is definitely bad.
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, bad_call);
|
||||
__ cmpdi(CR0, R19_method, 0);
|
||||
__ beq(CR0, bad_call);
|
||||
|
||||
// Load class loader data to determine whether the method's holder is concurrently unloading.
|
||||
__ load_method_holder(tmp1, R19_method);
|
||||
@@ -238,14 +238,14 @@ void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler *masm, Register tmp1,
|
||||
|
||||
// Fast path: If class loader is strong, the holder cannot be unloaded.
|
||||
__ lwz(tmp2, in_bytes(ClassLoaderData::keep_alive_ref_count_offset()), tmp1_class_loader_data);
|
||||
__ cmpdi(CCR0, tmp2, 0);
|
||||
__ bne(CCR0, skip_barrier);
|
||||
__ cmpdi(CR0, tmp2, 0);
|
||||
__ bne(CR0, skip_barrier);
|
||||
|
||||
// Class loader is weak. Determine whether the holder is still alive.
|
||||
__ ld(tmp2, in_bytes(ClassLoaderData::holder_offset()), tmp1_class_loader_data);
|
||||
__ resolve_weak_handle(tmp2, tmp1, tmp3, MacroAssembler::PreservationLevel::PRESERVATION_FRAME_LR_GP_FP_REGS);
|
||||
__ cmpdi(CCR0, tmp2, 0);
|
||||
__ bne(CCR0, skip_barrier);
|
||||
__ cmpdi(CR0, tmp2, 0);
|
||||
__ bne(CR0, skip_barrier);
|
||||
|
||||
__ bind(bad_call);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -49,7 +49,7 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
|
||||
Label Lskip_loop, Lstore_loop;
|
||||
|
||||
__ sldi_(count, count, LogBytesPerHeapOop);
|
||||
__ beq(CCR0, Lskip_loop); // zero length
|
||||
__ beq(CR0, Lskip_loop); // zero length
|
||||
__ addi(count, count, -BytesPerHeapOop);
|
||||
__ add(count, addr, count);
|
||||
// Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2021 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -80,8 +80,8 @@ void ModRefBarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register v
|
||||
Register tmp1, Register tmp2,
|
||||
MacroAssembler::PreservationLevel preservation_level) {
|
||||
Label done;
|
||||
__ cmpdi(CCR0, value, 0);
|
||||
__ beq(CCR0, done); // Use null as-is.
|
||||
__ cmpdi(CR0, value, 0);
|
||||
__ beq(CR0, done); // Use null as-is.
|
||||
|
||||
__ clrrdi(tmp1, value, JNIHandles::tag_size);
|
||||
__ ld(value, 0, tmp1); // Resolve (untagged) jobject.
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2024, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2018, 2025, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -102,8 +102,8 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec
|
||||
Label skip_prologue;
|
||||
|
||||
// Fast path: Array is of length zero.
|
||||
__ cmpdi(CCR0, count, 0);
|
||||
__ beq(CCR0, skip_prologue);
|
||||
__ cmpdi(CR0, count, 0);
|
||||
__ beq(CR0, skip_prologue);
|
||||
|
||||
/* ==== Check whether barrier is required (gc state) ==== */
|
||||
__ lbz(R11_tmp, in_bytes(ShenandoahThreadLocalData::gc_state_offset()),
|
||||
@@ -118,7 +118,7 @@ void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler *masm, Dec
|
||||
: ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING;
|
||||
|
||||
__ andi_(R11_tmp, R11_tmp, required_states);
|
||||
__ beq(CCR0, skip_prologue);
|
||||
__ beq(CR0, skip_prologue);
|
||||
|
||||
/* ==== Invoke runtime ==== */
|
||||
// Save to-be-preserved registers.
|
||||
@@ -216,7 +216,7 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
__ lbz(tmp1, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
|
||||
|
||||
__ andi_(tmp1, tmp1, ShenandoahHeap::MARKING);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ beq(CR0, skip_barrier);
|
||||
|
||||
/* ==== Determine the reference's previous value ==== */
|
||||
bool preloaded_mode = base == noreg;
|
||||
@@ -235,12 +235,12 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
|
||||
if ((decorators & IS_NOT_NULL) != 0) {
|
||||
#ifdef ASSERT
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ asm_assert_ne("null oop is not allowed");
|
||||
#endif // ASSERT
|
||||
} else {
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ beq(CR0, skip_barrier);
|
||||
}
|
||||
} else {
|
||||
// Load from the reference address to determine the reference's current value (before the store is being performed).
|
||||
@@ -254,8 +254,8 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
__ ld(pre_val, ind_or_offs, base);
|
||||
}
|
||||
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ beq(CR0, skip_barrier);
|
||||
|
||||
if (UseCompressedOops) {
|
||||
__ decode_heap_oop_not_null(pre_val);
|
||||
@@ -271,8 +271,8 @@ void ShenandoahBarrierSetAssembler::satb_write_barrier_impl(MacroAssembler *masm
|
||||
// If not, jump to the runtime to commit the buffer and to allocate a new one.
|
||||
// (The buffer's index corresponds to the amount of remaining free space.)
|
||||
__ ld(Rindex, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, Rindex, 0);
|
||||
__ beq(CCR0, runtime); // If index == 0 (buffer is full), goto runtime.
|
||||
__ cmpdi(CR0, Rindex, 0);
|
||||
__ beq(CR0, runtime); // If index == 0 (buffer is full), goto runtime.
|
||||
|
||||
// Capacity suffices. Decrement the queue's size by the size of one oop.
|
||||
// (The buffer is filled contrary to the heap's growing direction, i.e., it is filled downwards.)
|
||||
@@ -362,9 +362,9 @@ void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssemb
|
||||
"marked value must equal the value obtained when all lock bits are being set");
|
||||
if (VM_Version::has_isel()) {
|
||||
__ xori(tmp1, tmp1, markWord::lock_mask_in_place);
|
||||
__ isel(dst, CCR0, Assembler::equal, false, tmp1);
|
||||
__ isel(dst, CR0, Assembler::equal, false, tmp1);
|
||||
} else {
|
||||
__ bne(CCR0, done);
|
||||
__ bne(CR0, done);
|
||||
__ xori(dst, tmp1, markWord::lock_mask_in_place);
|
||||
}
|
||||
|
||||
@@ -402,7 +402,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
|
||||
if (is_strong) {
|
||||
// For strong references, the heap is considered stable if "has forwarded" is not active.
|
||||
__ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ beq(CR0, skip_barrier);
|
||||
#ifdef ASSERT
|
||||
// "evacuation" -> (implies) "has forwarded". If we reach this code, "has forwarded" must thus be set.
|
||||
__ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
|
||||
@@ -414,10 +414,10 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
|
||||
// The additional phase conditions are in place to avoid the resurrection of weak references (see JDK-8266440).
|
||||
Label skip_fastpath;
|
||||
__ andi_(tmp1, tmp2, ShenandoahHeap::WEAK_ROOTS);
|
||||
__ bne(CCR0, skip_fastpath);
|
||||
__ bne(CR0, skip_fastpath);
|
||||
|
||||
__ andi_(tmp1, tmp2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ beq(CR0, skip_barrier);
|
||||
#ifdef ASSERT
|
||||
// "evacuation" -> (implies) "has forwarded". If we reach this code, "has forwarded" must thus be set.
|
||||
__ andi_(tmp1, tmp1, ShenandoahHeap::HAS_FORWARDED);
|
||||
@@ -453,7 +453,7 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_impl(
|
||||
__ srdi(tmp1, dst, ShenandoahHeapRegion::region_size_bytes_shift_jint());
|
||||
__ lbzx(tmp2, tmp1, tmp2);
|
||||
__ andi_(tmp2, tmp2, 1);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ beq(CR0, skip_barrier);
|
||||
}
|
||||
|
||||
/* ==== Invoke runtime ==== */
|
||||
@@ -639,8 +639,8 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
Label done;
|
||||
|
||||
// Fast path: Reference is null (JNI tags are zero for null pointers).
|
||||
__ cmpdi(CCR0, obj, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ cmpdi(CR0, obj, 0);
|
||||
__ beq(CR0, done);
|
||||
|
||||
// Resolve jobject using standard implementation.
|
||||
BarrierSetAssembler::try_resolve_jobject_in_native(masm, dst, jni_env, obj, tmp, slowpath);
|
||||
@@ -651,7 +651,7 @@ void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler
|
||||
jni_env);
|
||||
|
||||
__ andi_(tmp, tmp, ShenandoahHeap::EVACUATION | ShenandoahHeap::HAS_FORWARDED);
|
||||
__ bne(CCR0, slowpath);
|
||||
__ bne(CR0, slowpath);
|
||||
|
||||
__ bind(done);
|
||||
__ block_comment("} try_resolve_jobject_in_native (shenandoahgc)");
|
||||
@@ -701,23 +701,23 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
|
||||
// Given that 'expected' must refer to the to-space object of an evacuated object (strong to-space invariant),
|
||||
// no special processing is required.
|
||||
if (UseCompressedOops) {
|
||||
__ cmpxchgw(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgw(CR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag, nullptr, true);
|
||||
} else {
|
||||
__ cmpxchgd(CCR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgd(CR0, current_value, expected, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag, nullptr, true);
|
||||
}
|
||||
|
||||
// Skip the rest of the barrier if the CAS operation succeeds immediately.
|
||||
// If it does not, the value stored at the address is either the from-space pointer of the
|
||||
// referenced object (success criteria s2)) or simply another object.
|
||||
__ beq(CCR0, done);
|
||||
__ beq(CR0, done);
|
||||
|
||||
/* ==== Step 2 (Null check) ==== */
|
||||
// The success criteria s2) cannot be matched with a null pointer
|
||||
// (null pointers cannot be subject to concurrent evacuation). The failure of the CAS operation is thus legitimate.
|
||||
__ cmpdi(CCR0, current_value, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ cmpdi(CR0, current_value, 0);
|
||||
__ beq(CR0, done);
|
||||
|
||||
/* ==== Step 3 (reference pointer refers to from-space version; success criteria s2)) ==== */
|
||||
// To check whether the reference pointer refers to the from-space version, the forward
|
||||
@@ -737,15 +737,15 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
|
||||
// Load zero into register for the potential failure case.
|
||||
__ li(success_flag, 0);
|
||||
}
|
||||
__ cmpd(CCR0, current_value, expected);
|
||||
__ bne(CCR0, done);
|
||||
__ cmpd(CR0, current_value, expected);
|
||||
__ bne(CR0, done);
|
||||
|
||||
// Discard fetched value as it might be a reference to the from-space version of an object.
|
||||
if (UseCompressedOops) {
|
||||
__ cmpxchgw(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgw(CR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag);
|
||||
} else {
|
||||
__ cmpxchgd(CCR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
__ cmpxchgd(CR0, R0, initial_value, new_val, base_addr, MacroAssembler::MemBarNone,
|
||||
false, success_flag);
|
||||
}
|
||||
|
||||
@@ -770,7 +770,7 @@ void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler *masm, Register b
|
||||
// guaranteed to be the case.
|
||||
// In case of a concurrent update, the CAS would be retried again. This is legitimate
|
||||
// in terms of program correctness (even though it is not desired).
|
||||
__ bne(CCR0, step_four);
|
||||
__ bne(CR0, step_four);
|
||||
|
||||
__ bind(done);
|
||||
__ block_comment("} cmpxchg_oop (shenandoahgc)");
|
||||
@@ -789,7 +789,7 @@ void ShenandoahBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssemb
|
||||
__ sldi_(count, count, LogBytesPerHeapOop);
|
||||
|
||||
// Zero length? Skip.
|
||||
__ beq(CCR0, L_skip_loop);
|
||||
__ beq(CR0, L_skip_loop);
|
||||
|
||||
__ addi(count, count, -BytesPerHeapOop);
|
||||
__ add(count, addr, count);
|
||||
@@ -835,8 +835,8 @@ void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler *ce, Shen
|
||||
}
|
||||
|
||||
// Fast path: Reference is null.
|
||||
__ cmpdi(CCR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
__ cmpdi(CR0, pre_val, 0);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
|
||||
// Argument passing via the stack.
|
||||
__ std(pre_val, -8, R1_SP);
|
||||
@@ -866,7 +866,7 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
|
||||
// Ensure that 'res' is 'R3_ARG1' and contains the same value as 'obj' to reduce the number of required
|
||||
// copy instructions.
|
||||
assert(R3_RET == res, "res must be r3");
|
||||
__ cmpd(CCR0, res, obj);
|
||||
__ cmpd(CR0, res, obj);
|
||||
__ asm_assert_eq("result register must contain the reference stored in obj");
|
||||
#endif
|
||||
|
||||
@@ -888,7 +888,7 @@ void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assemble
|
||||
__ lbzx(tmp2, tmp1, tmp2);
|
||||
|
||||
__ andi_(tmp2, tmp2, 1);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CCR0, Assembler::equal), *stub->continuation());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs1_bhintNoHint, __ bi0(CR0, Assembler::equal), *stub->continuation());
|
||||
}
|
||||
|
||||
address blob_addr = nullptr;
|
||||
@@ -946,13 +946,13 @@ void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAss
|
||||
__ lbz(R12_tmp2, in_bytes(ShenandoahThreadLocalData::gc_state_offset()), R16_thread);
|
||||
|
||||
__ andi_(R12_tmp2, R12_tmp2, ShenandoahHeap::MARKING);
|
||||
__ beq(CCR0, skip_barrier);
|
||||
__ beq(CR0, skip_barrier);
|
||||
|
||||
/* ==== Add previous value directly to thread-local SATB mark queue ==== */
|
||||
// Check queue's capacity. Jump to runtime if no free slot is available.
|
||||
__ ld(R12_tmp2, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, R12_tmp2, 0);
|
||||
__ beq(CCR0, runtime);
|
||||
__ cmpdi(CR0, R12_tmp2, 0);
|
||||
__ beq(CR0, runtime);
|
||||
|
||||
// Capacity suffices. Decrement the queue's size by one slot (size of one oop).
|
||||
__ addi(R12_tmp2, R12_tmp2, -wordSize);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2021, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -169,7 +169,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
// if the pointer is not dirty.
|
||||
// Only dirty pointers must be processed by this barrier, so we can skip it in case the latter condition holds true.
|
||||
__ and_(tmp1, tmp1, dst);
|
||||
__ beq(CCR0, uncolor);
|
||||
__ beq(CR0, uncolor);
|
||||
|
||||
/* ==== Invoke barrier ==== */
|
||||
{
|
||||
@@ -193,8 +193,8 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
|
||||
// Slow-path has already uncolored
|
||||
if (L_handle_null != nullptr) {
|
||||
__ cmpdi(CCR0, dst, 0);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
__ cmpdi(CR0, dst, 0);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
}
|
||||
__ b(done);
|
||||
|
||||
@@ -203,7 +203,7 @@ void ZBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators
|
||||
__ srdi(dst, dst, ZPointerLoadShift);
|
||||
} else {
|
||||
__ srdi_(dst, dst, ZPointerLoadShift);
|
||||
__ beq(CCR0, *L_handle_null);
|
||||
__ beq(CR0, *L_handle_null);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
@@ -234,7 +234,7 @@ static void emit_store_fast_path_check(MacroAssembler* masm, Register base, Regi
|
||||
// A not relocatable object could have spurious raw null pointers in its fields after
|
||||
// getting promoted to the old generation.
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodBits);
|
||||
__ cmplwi(CCR0, R0, barrier_Relocation::unpatched);
|
||||
__ cmplwi(CR0, R0, barrier_Relocation::unpatched);
|
||||
} else {
|
||||
__ ld(R0, ind_or_offs, base);
|
||||
// Stores on relocatable objects never need to deal with raw null pointers in fields.
|
||||
@@ -244,7 +244,7 @@ static void emit_store_fast_path_check(MacroAssembler* masm, Register base, Regi
|
||||
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreBadMask);
|
||||
__ andi_(R0, R0, barrier_Relocation::unpatched);
|
||||
}
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), medium_path);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), medium_path);
|
||||
}
|
||||
|
||||
void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm,
|
||||
@@ -274,7 +274,7 @@ void ZBarrierSetAssembler::store_barrier_fast(MacroAssembler* masm,
|
||||
__ ld(R0, ind_or_offset, ref_base);
|
||||
__ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_bad_mask_offset()), R16_thread);
|
||||
__ and_(R0, R0, rnew_zpointer);
|
||||
__ bne(CCR0, medium_path);
|
||||
__ bne(CR0, medium_path);
|
||||
__ bind(medium_path_continuation);
|
||||
__ ld(rnew_zpointer, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
}
|
||||
@@ -293,7 +293,7 @@ static void store_barrier_buffer_add(MacroAssembler* masm,
|
||||
// Combined pointer bump and check if the buffer is disabled or full
|
||||
__ ld(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1);
|
||||
__ addic_(R0, R0, -(int)sizeof(ZStoreBarrierEntry));
|
||||
__ blt(CCR0, slow_path);
|
||||
__ blt(CR0, slow_path);
|
||||
__ std(R0, in_bytes(ZStoreBarrierBuffer::current_offset()), tmp1);
|
||||
|
||||
// Entry is at ZStoreBarrierBuffer (tmp1) + buffer_offset + scaled index (R0)
|
||||
@@ -327,8 +327,8 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm,
|
||||
// Atomic accesses can get to the medium fast path because the value was a
|
||||
// raw null value. If it was not null, then there is no doubt we need to take a slow path.
|
||||
__ ld(tmp, ind_or_offs, ref_base);
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ bne(CCR0, slow_path);
|
||||
__ cmpdi(CR0, tmp, 0);
|
||||
__ bne(CR0, slow_path);
|
||||
|
||||
// If we get this far, we know there is a young raw null value in the field.
|
||||
// Try to self-heal null values for atomic accesses
|
||||
@@ -338,12 +338,12 @@ void ZBarrierSetAssembler::store_barrier_medium(MacroAssembler* masm,
|
||||
need_restore = true;
|
||||
}
|
||||
__ ld(R0, in_bytes(ZThreadLocalData::store_good_mask_offset()), R16_thread);
|
||||
__ cmpxchgd(CCR0, tmp, (intptr_t)0, R0, ref_base,
|
||||
__ cmpxchgd(CR0, tmp, (intptr_t)0, R0, ref_base,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, need_restore ? nullptr : &slow_path);
|
||||
if (need_restore) {
|
||||
__ sub(ref_base, ref_base, ind_or_offs);
|
||||
__ bne(CCR0, slow_path);
|
||||
__ bne(CR0, slow_path);
|
||||
}
|
||||
} else {
|
||||
// A non-atomic relocatable object won't get to the medium fast path due to a
|
||||
@@ -447,7 +447,7 @@ void ZBarrierSetAssembler::copy_load_at_fast(MacroAssembler* masm,
|
||||
Label& continuation) const {
|
||||
__ ldx(zpointer, addr);
|
||||
__ and_(R0, zpointer, load_bad_mask);
|
||||
__ bne(CCR0, slow_path);
|
||||
__ bne(CR0, slow_path);
|
||||
__ bind(continuation);
|
||||
}
|
||||
void ZBarrierSetAssembler::copy_load_at_slow(MacroAssembler* masm,
|
||||
@@ -480,7 +480,7 @@ void ZBarrierSetAssembler::copy_store_at_fast(MacroAssembler* masm,
|
||||
if (!dest_uninitialized) {
|
||||
__ ldx(R0, addr);
|
||||
__ and_(R0, R0, store_bad_mask);
|
||||
__ bne(CCR0, medium_path);
|
||||
__ bne(CR0, medium_path);
|
||||
__ bind(continuation);
|
||||
}
|
||||
__ rldimi(zpointer, store_good_mask, 0, 64 - ZPointerLoadShift); // Replace color bits.
|
||||
@@ -515,8 +515,8 @@ void ZBarrierSetAssembler::copy_store_at_slow(MacroAssembler* masm,
|
||||
void ZBarrierSetAssembler::generate_disjoint_oop_copy(MacroAssembler* masm, bool dest_uninitialized) {
|
||||
const Register zpointer = R2, tmp = R9;
|
||||
Label done, loop, load_bad, load_good, store_bad, store_good;
|
||||
__ cmpdi(CCR0, R5_ARG3, 0);
|
||||
__ beq(CCR0, done);
|
||||
__ cmpdi(CR0, R5_ARG3, 0);
|
||||
__ beq(CR0, done);
|
||||
__ mtctr(R5_ARG3);
|
||||
|
||||
__ align(32);
|
||||
@@ -539,7 +539,7 @@ void ZBarrierSetAssembler::generate_conjoint_oop_copy(MacroAssembler* masm, bool
|
||||
const Register zpointer = R2, tmp = R9;
|
||||
Label done, loop, load_bad, load_good, store_bad, store_good;
|
||||
__ sldi_(R0, R5_ARG3, 3);
|
||||
__ beq(CCR0, done);
|
||||
__ beq(CR0, done);
|
||||
__ mtctr(R5_ARG3);
|
||||
// Point behind last elements and copy backwards.
|
||||
__ add(R3_ARG1, R3_ARG1, R0);
|
||||
@@ -570,12 +570,12 @@ void ZBarrierSetAssembler::check_oop(MacroAssembler *masm, Register obj, const c
|
||||
Label done, skip_uncolor;
|
||||
// Skip (colored) null.
|
||||
__ srdi_(R0, obj, ZPointerLoadShift);
|
||||
__ beq(CCR0, done);
|
||||
__ beq(CR0, done);
|
||||
|
||||
// Check if ZAddressHeapBase << ZPointerLoadShift is set. If so, we need to uncolor.
|
||||
__ rldicl_(R0, obj, 64 - ZAddressHeapBaseShift - ZPointerLoadShift, 63);
|
||||
__ mr(R0, obj);
|
||||
__ beq(CCR0, skip_uncolor);
|
||||
__ beq(CR0, skip_uncolor);
|
||||
__ srdi(R0, obj, ZPointerLoadShift);
|
||||
__ bind(skip_uncolor);
|
||||
|
||||
@@ -594,7 +594,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
|
||||
// Test for tag
|
||||
__ andi_(tmp, obj, JNIHandles::tag_mask);
|
||||
__ bne(CCR0, tagged);
|
||||
__ bne(CR0, tagged);
|
||||
|
||||
// Resolve local handle
|
||||
__ ld(dst, 0, obj);
|
||||
@@ -605,7 +605,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
// Test for weak tag
|
||||
__ andi_(tmp, obj, JNIHandles::TypeTag::weak_global);
|
||||
__ clrrdi(dst, obj, JNIHandles::tag_size); // Untag.
|
||||
__ bne(CCR0, weak_tagged);
|
||||
__ bne(CR0, weak_tagged);
|
||||
|
||||
// Resolve global handle
|
||||
__ ld(dst, 0, dst);
|
||||
@@ -620,7 +620,7 @@ void ZBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, R
|
||||
|
||||
__ bind(check_color);
|
||||
__ and_(tmp, tmp, dst);
|
||||
__ bne(CCR0, slowpath);
|
||||
__ bne(CR0, slowpath);
|
||||
|
||||
// Uncolor
|
||||
__ srdi(dst, dst, ZPointerLoadShift);
|
||||
@@ -666,7 +666,7 @@ void ZBarrierSetAssembler::generate_c1_load_barrier(LIR_Assembler* ce,
|
||||
ZLoadBarrierStubC1* stub,
|
||||
bool on_non_strong) const {
|
||||
check_color(ce, ref, on_non_strong);
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CCR0, Assembler::equal), *stub->entry());
|
||||
__ bc_far_optimized(Assembler::bcondCRbiIs0, __ bi0(CR0, Assembler::equal), *stub->entry());
|
||||
z_uncolor(ce, ref);
|
||||
__ bind(*stub->continuation());
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2021 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2025 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@@ -70,7 +70,7 @@ static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address r
|
||||
check_color(masm, ref, on_non_strong);
|
||||
|
||||
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
|
||||
__ bne_far(CCR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
__ bne_far(CR0, *stub->entry(), MacroAssembler::bc_far_optimize_on_relocate);
|
||||
|
||||
z_uncolor(masm, ref);
|
||||
__ bind(*stub->continuation());
|
||||
@@ -97,7 +97,7 @@ static void z_compare_and_swap(MacroAssembler* masm, const MachNode* node,
|
||||
Register rold_zpointer = tmp1, rnew_zpointer = tmp2;
|
||||
z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
|
||||
z_color(masm, rold_zpointer, oldval);
|
||||
__ cmpxchgd(CCR0, R0, rold_zpointer, rnew_zpointer, mem,
|
||||
__ cmpxchgd(CR0, R0, rold_zpointer, rnew_zpointer, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), res, nullptr, true,
|
||||
false /* we could support weak, but benefit is questionable */);
|
||||
|
||||
@@ -119,7 +119,7 @@ static void z_compare_and_exchange(MacroAssembler* masm, const MachNode* node,
|
||||
Register rold_zpointer = R0, rnew_zpointer = tmp;
|
||||
z_store_barrier(masm, node, mem, 0, newval, rnew_zpointer, true /* is_atomic */);
|
||||
z_color(masm, rold_zpointer, oldval);
|
||||
__ cmpxchgd(CCR0, res, rold_zpointer, rnew_zpointer, mem,
|
||||
__ cmpxchgd(CR0, res, rold_zpointer, rnew_zpointer, mem,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(), noreg, nullptr, true,
|
||||
false /* we could support weak, but benefit is questionable */);
|
||||
z_uncolor(masm, res);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -126,10 +126,10 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg)
|
||||
// means that this code is called *during* popframe handling - we
|
||||
// don't want to reenter.
|
||||
andi_(R0, scratch_reg, JavaThread::popframe_pending_bit);
|
||||
beq(CCR0, L);
|
||||
beq(CR0, L);
|
||||
|
||||
andi_(R0, scratch_reg, JavaThread::popframe_processing_bit);
|
||||
bne(CCR0, L);
|
||||
bne(CR0, L);
|
||||
|
||||
// Call the Interpreter::remove_activation_preserving_args_entry()
|
||||
// func to get the address of the same-named entrypoint in the
|
||||
@@ -150,12 +150,12 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg)
|
||||
if (JvmtiExport::can_force_early_return()) {
|
||||
Label Lno_early_ret;
|
||||
ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
|
||||
cmpdi(CCR0, Rthr_state_addr, 0);
|
||||
beq(CCR0, Lno_early_ret);
|
||||
cmpdi(CR0, Rthr_state_addr, 0);
|
||||
beq(CR0, Lno_early_ret);
|
||||
|
||||
lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr);
|
||||
cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending);
|
||||
bne(CCR0, Lno_early_ret);
|
||||
cmpwi(CR0, R0, JvmtiThreadState::earlyret_pending);
|
||||
bne(CR0, Lno_early_ret);
|
||||
|
||||
// Jump to Interpreter::_earlyret_entry.
|
||||
lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr);
|
||||
@@ -229,7 +229,7 @@ void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register byt
|
||||
ld(R0, in_bytes(JavaThread::polling_word_offset()), R16_thread);
|
||||
// Armed page has poll_bit set, if poll bit is cleared just continue.
|
||||
andi_(R0, R0, SafepointMechanism::poll_bit());
|
||||
beq(CCR0, dispatch);
|
||||
beq(CR0, dispatch);
|
||||
load_dispatch_table(R11_scratch1, sfpt_tbl);
|
||||
align(32, 16);
|
||||
bind(dispatch);
|
||||
@@ -528,8 +528,8 @@ void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result
|
||||
Label index_ok;
|
||||
lwa(R0, arrayOopDesc::length_offset_in_bytes(), result);
|
||||
sldi(R0, R0, LogBytesPerHeapOop);
|
||||
cmpd(CCR0, index, R0);
|
||||
blt(CCR0, index_ok);
|
||||
cmpd(CR0, index, R0);
|
||||
blt(CR0, index_ok);
|
||||
stop("resolved reference index out of bounds");
|
||||
bind(index_ok);
|
||||
#endif
|
||||
@@ -592,8 +592,8 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
|
||||
|
||||
// Array nullcheck
|
||||
if (!ImplicitNullChecks) {
|
||||
cmpdi(CCR0, Rarray, 0);
|
||||
beq(CCR0, LisNull);
|
||||
cmpdi(CR0, Rarray, 0);
|
||||
beq(CR0, LisNull);
|
||||
} else {
|
||||
null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex);
|
||||
}
|
||||
@@ -605,9 +605,9 @@ void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Registe
|
||||
|
||||
// Index check
|
||||
lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray);
|
||||
cmplw(CCR0, Rindex, Rlength);
|
||||
cmplw(CR0, Rindex, Rlength);
|
||||
sldi(RsxtIndex, RsxtIndex, index_shift);
|
||||
blt(CCR0, LnotOOR);
|
||||
blt(CR0, LnotOOR);
|
||||
// Index should be in R17_tos, array should be in R4_ARG2.
|
||||
mr_if_needed(R17_tos, Rindex);
|
||||
mr_if_needed(R4_ARG2, Rarray);
|
||||
@@ -687,11 +687,11 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
push(state);
|
||||
|
||||
// Skip if we don't have to unlock.
|
||||
testbitdi(CCR0, R0, Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
beq(CCR0, Lunlocked);
|
||||
testbitdi(CR0, R0, Raccess_flags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
beq(CR0, Lunlocked);
|
||||
|
||||
cmpwi(CCR0, Rdo_not_unlock_flag, 0);
|
||||
bne(CCR0, Lno_unlock);
|
||||
cmpwi(CR0, Rdo_not_unlock_flag, 0);
|
||||
bne(CR0, Lno_unlock);
|
||||
}
|
||||
|
||||
// Unlock
|
||||
@@ -705,8 +705,8 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
-(frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
|
||||
|
||||
ld(R0, BasicObjectLock::obj_offset(), Rmonitor_base);
|
||||
cmpdi(CCR0, R0, 0);
|
||||
bne(CCR0, Lunlock);
|
||||
cmpdi(CR0, R0, 0);
|
||||
bne(CR0, Lunlock);
|
||||
|
||||
// If it's already unlocked, throw exception.
|
||||
if (throw_monitor_exception) {
|
||||
@@ -740,7 +740,7 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size); // Monitor base
|
||||
|
||||
subf_(Riterations, R26_monitor, Rmonitor_base);
|
||||
ble(CCR0, Lno_unlock);
|
||||
ble(CR0, Lno_unlock);
|
||||
|
||||
addi(Rcurrent_obj_addr, Rmonitor_base,
|
||||
in_bytes(BasicObjectLock::obj_offset()) - frame::interpreter_frame_monitor_size_in_bytes());
|
||||
@@ -759,8 +759,8 @@ void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
|
||||
bind(Lloop);
|
||||
|
||||
// Check if current entry is used.
|
||||
cmpdi(CCR0, Rcurrent_obj, 0);
|
||||
bne(CCR0, Lexception);
|
||||
cmpdi(CR0, Rcurrent_obj, 0);
|
||||
bne(CR0, Lexception);
|
||||
// Preload next iteration's compare value.
|
||||
ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
|
||||
addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
|
||||
@@ -816,29 +816,29 @@ void InterpreterMacroAssembler::narrow(Register result) {
|
||||
Label notBool, notByte, notChar, done;
|
||||
|
||||
// common case first
|
||||
cmpwi(CCR0, ret_type, T_INT);
|
||||
beq(CCR0, done);
|
||||
cmpwi(CR0, ret_type, T_INT);
|
||||
beq(CR0, done);
|
||||
|
||||
cmpwi(CCR0, ret_type, T_BOOLEAN);
|
||||
bne(CCR0, notBool);
|
||||
cmpwi(CR0, ret_type, T_BOOLEAN);
|
||||
bne(CR0, notBool);
|
||||
andi(result, result, 0x1);
|
||||
b(done);
|
||||
|
||||
bind(notBool);
|
||||
cmpwi(CCR0, ret_type, T_BYTE);
|
||||
bne(CCR0, notByte);
|
||||
cmpwi(CR0, ret_type, T_BYTE);
|
||||
bne(CR0, notByte);
|
||||
extsb(result, result);
|
||||
b(done);
|
||||
|
||||
bind(notByte);
|
||||
cmpwi(CCR0, ret_type, T_CHAR);
|
||||
bne(CCR0, notChar);
|
||||
cmpwi(CR0, ret_type, T_CHAR);
|
||||
bne(CR0, notChar);
|
||||
andi(result, result, 0xffff);
|
||||
b(done);
|
||||
|
||||
bind(notChar);
|
||||
// cmpwi(CCR0, ret_type, T_SHORT); // all that's left
|
||||
// bne(CCR0, done);
|
||||
// cmpwi(CR0, ret_type, T_SHORT); // all that's left
|
||||
// bne(CR0, done);
|
||||
extsh(result, result);
|
||||
|
||||
// Nothing to do for T_INT
|
||||
@@ -893,8 +893,8 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// check if already enabled - if so no re-enabling needed
|
||||
assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
|
||||
lwz(R0, in_bytes(JavaThread::stack_guard_state_offset()), R16_thread);
|
||||
cmpwi(CCR0, R0, StackOverflow::stack_guard_enabled);
|
||||
beq_predict_taken(CCR0, no_reserved_zone_enabling);
|
||||
cmpwi(CR0, R0, StackOverflow::stack_guard_enabled);
|
||||
beq_predict_taken(CR0, no_reserved_zone_enabling);
|
||||
|
||||
// Compare frame pointers. There is no good stack pointer, as with stack
|
||||
// frame compression we can get different SPs when we do calls. A subsequent
|
||||
@@ -902,8 +902,8 @@ void InterpreterMacroAssembler::remove_activation(TosState state,
|
||||
// inner call of the method annotated with ReservedStack.
|
||||
ld_ptr(R0, JavaThread::reserved_stack_activation_offset(), R16_thread);
|
||||
ld_ptr(R11_scratch1, _abi0(callers_sp), R1_SP); // Load frame pointer.
|
||||
cmpld(CCR0, R11_scratch1, R0);
|
||||
blt_predict_taken(CCR0, no_reserved_zone_enabling);
|
||||
cmpld(CR0, R11_scratch1, R0);
|
||||
blt_predict_taken(CR0, no_reserved_zone_enabling);
|
||||
|
||||
// Enable reserved zone again, throw stack overflow exception.
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), R16_thread);
|
||||
@@ -961,8 +961,8 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
if (DiagnoseSyncOnValueBasedClasses != 0) {
|
||||
load_klass(tmp, object);
|
||||
lbz(tmp, in_bytes(Klass::misc_flags_offset()), tmp);
|
||||
testbitdi(CCR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CCR0, slow_case);
|
||||
testbitdi(CR0, R0, tmp, exact_log2(KlassFlags::_misc_is_value_based_class));
|
||||
bne(CR0, slow_case);
|
||||
}
|
||||
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
@@ -989,8 +989,8 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
|
||||
|
||||
// Must fence, otherwise, preceding store(s) may float below cmpxchg.
|
||||
// CmpxchgX sets CCR0 to cmpX(current, displaced).
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
// CmpxchgX sets CR0 to cmpX(current, displaced).
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/header, /*exchange_value=*/monitor,
|
||||
/*where=*/object_mark_addr,
|
||||
@@ -1021,7 +1021,7 @@ void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
|
||||
and_(R0/*==0?*/, current_header, tmp);
|
||||
// If condition is true we are done and hence we can store 0 in the displaced
|
||||
// header indicating it is a recursive lock.
|
||||
bne(CCR0, slow_case);
|
||||
bne(CR0, slow_case);
|
||||
std(R0/*==0!*/, mark_offset, monitor);
|
||||
b(count_locking);
|
||||
}
|
||||
@@ -1087,8 +1087,8 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
||||
BasicLock::displaced_header_offset_in_bytes(), monitor);
|
||||
|
||||
// If the displaced header is zero, we have a recursive unlock.
|
||||
cmpdi(CCR0, header, 0);
|
||||
beq(CCR0, free_slot); // recursive unlock
|
||||
cmpdi(CR0, header, 0);
|
||||
beq(CR0, free_slot); // recursive unlock
|
||||
}
|
||||
|
||||
// } else if (Atomic::cmpxchg(obj->mark_addr(), monitor, displaced_header) == monitor) {
|
||||
@@ -1108,8 +1108,8 @@ void InterpreterMacroAssembler::unlock_object(Register monitor) {
|
||||
// We have the displaced header in displaced_header. If the lock is still
|
||||
// lightweight, it will contain the monitor address and we'll store the
|
||||
// displaced header back into the object's mark word.
|
||||
// CmpxchgX sets CCR0 to cmpX(current, monitor).
|
||||
cmpxchgd(/*flag=*/CCR0,
|
||||
// CmpxchgX sets CR0 to cmpX(current, monitor).
|
||||
cmpxchgd(/*flag=*/CR0,
|
||||
/*current_value=*/current_header,
|
||||
/*compare_value=*/monitor, /*exchange_value=*/header,
|
||||
/*where=*/object_mark_addr,
|
||||
@@ -1170,8 +1170,8 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
Label done;
|
||||
cmpwi(CCR0, Rinterp_only, 0);
|
||||
beq(CCR0, done);
|
||||
cmpwi(CR0, Rinterp_only, 0);
|
||||
beq(CR0, done);
|
||||
ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method);
|
||||
align(32, 12);
|
||||
bind(done);
|
||||
@@ -1180,8 +1180,8 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
|
||||
#ifdef ASSERT
|
||||
{
|
||||
Label Lok;
|
||||
cmpdi(CCR0, Rtarget_addr, 0);
|
||||
bne(CCR0, Lok);
|
||||
cmpdi(CR0, Rtarget_addr, 0);
|
||||
bne(CR0, Lok);
|
||||
stop("null entry point");
|
||||
bind(Lok);
|
||||
}
|
||||
@@ -1211,7 +1211,7 @@ void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, R
|
||||
sldi(Rscratch1, Rscratch1, Interpreter::logStackElementSize);
|
||||
add(Rscratch1, Rscratch1, Rscratch2); // Rscratch2 contains fp
|
||||
// Compare sender_sp with the derelativized top_frame_sp
|
||||
cmpd(CCR0, R21_sender_SP, Rscratch1);
|
||||
cmpd(CR0, R21_sender_SP, Rscratch1);
|
||||
asm_assert_eq("top_frame_sp incorrect");
|
||||
#endif
|
||||
|
||||
@@ -1234,8 +1234,8 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
|
||||
// Test ImethodDataPtr. If it is null, continue at the specified label.
|
||||
void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
cmpdi(CCR0, R28_mdx, 0);
|
||||
beq(CCR0, zero_continue);
|
||||
cmpdi(CR0, R28_mdx, 0);
|
||||
beq(CR0, zero_continue);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
@@ -1250,8 +1250,8 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
|
||||
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
|
||||
add(R11_scratch1, R12_scratch2, R12_scratch2);
|
||||
cmpd(CCR0, R11_scratch1, R14_bcp);
|
||||
beq(CCR0, verify_continue);
|
||||
cmpd(CR0, R11_scratch1, R14_bcp);
|
||||
beq(CR0, verify_continue);
|
||||
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
|
||||
|
||||
@@ -1334,8 +1334,8 @@ void InterpreterMacroAssembler::test_mdp_data_at(int offset,
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
|
||||
ld(test_out, offset, R28_mdx);
|
||||
cmpd(CCR0, value, test_out);
|
||||
bne(CCR0, not_equal_continue);
|
||||
cmpd(CR0, value, test_out);
|
||||
bne(CR0, not_equal_continue);
|
||||
}
|
||||
|
||||
// Update the method data pointer by the displacement located at some fixed
|
||||
@@ -1491,8 +1491,8 @@ void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
|
||||
Label skip_receiver_profile;
|
||||
if (receiver_can_be_null) {
|
||||
Label not_null;
|
||||
cmpdi(CCR0, Rreceiver, 0);
|
||||
bne(CCR0, not_null);
|
||||
cmpdi(CR0, Rreceiver, 0);
|
||||
bne(CR0, not_null);
|
||||
// We are making a call. Increment the count for null receiver.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
|
||||
b(skip_receiver_profile);
|
||||
@@ -1681,8 +1681,8 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
if (start_row == last_row) {
|
||||
// The only thing left to do is handle the null case.
|
||||
// Scratch1 contains test_out from test_mdp_data_at.
|
||||
cmpdi(CCR0, scratch1, 0);
|
||||
beq(CCR0, found_null);
|
||||
cmpdi(CR0, scratch1, 0);
|
||||
beq(CR0, found_null);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
// Increment total counter to indicate polymorphic case.
|
||||
increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
|
||||
@@ -1691,8 +1691,8 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
|
||||
break;
|
||||
}
|
||||
// Since null is rare, make it be the branch-taken case.
|
||||
cmpdi(CCR0, scratch1, 0);
|
||||
beq(CCR0, found_null);
|
||||
cmpdi(CR0, scratch1, 0);
|
||||
beq(CR0, found_null);
|
||||
|
||||
// Put all the "Case 3" tests here.
|
||||
record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done);
|
||||
@@ -1734,27 +1734,27 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr
|
||||
ld(tmp, mdo_addr_offs, mdo_addr_base);
|
||||
|
||||
// Set null_seen if obj is 0.
|
||||
cmpdi(CCR0, obj, 0);
|
||||
cmpdi(CR0, obj, 0);
|
||||
ori(R0, tmp, TypeEntries::null_seen);
|
||||
beq(CCR0, do_update);
|
||||
beq(CR0, do_update);
|
||||
|
||||
load_klass(klass, obj);
|
||||
|
||||
clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
|
||||
// Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
|
||||
cmpd(CCR1, R0, klass);
|
||||
cmpd(CR1, R0, klass);
|
||||
// Klass seen before, nothing to do (regardless of unknown bit).
|
||||
//beq(CCR1, do_nothing);
|
||||
//beq(CR1, do_nothing);
|
||||
|
||||
andi_(R0, tmp, TypeEntries::type_unknown);
|
||||
// Already unknown. Nothing to do anymore.
|
||||
//bne(CCR0, do_nothing);
|
||||
crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
beq(CCR0, do_nothing);
|
||||
//bne(CR0, do_nothing);
|
||||
crorc(CR0, Assembler::equal, CR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
|
||||
beq(CR0, do_nothing);
|
||||
|
||||
clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
|
||||
orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
|
||||
beq(CCR0, do_update); // First time here. Set profile type.
|
||||
beq(CR0, do_update); // First time here. Set profile type.
|
||||
|
||||
// Different than before. Cannot keep accurate profile.
|
||||
ori(R0, tmp, TypeEntries::type_unknown);
|
||||
@@ -1785,8 +1785,8 @@ void InterpreterMacroAssembler::profile_arguments_type(Register callee,
|
||||
in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
|
||||
|
||||
lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
|
||||
cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
bne(CCR0, profile_continue);
|
||||
cmpwi(CR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
|
||||
bne(CR0, profile_continue);
|
||||
|
||||
if (MethodData::profile_arguments()) {
|
||||
Label done;
|
||||
@@ -1797,9 +1797,9 @@ void InterpreterMacroAssembler::profile_arguments_type(Register callee,
|
||||
if (i > 0 || MethodData::profile_return()) {
|
||||
// If return value type is profiled we may have no argument to profile.
|
||||
ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
|
||||
cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
|
||||
cmpdi(CR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
|
||||
addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
|
||||
blt(CCR0, done);
|
||||
blt(CR0, done);
|
||||
}
|
||||
ld(tmp1, in_bytes(Method::const_offset()), callee);
|
||||
lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
|
||||
@@ -1865,12 +1865,12 @@ void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1,
|
||||
// length.
|
||||
lbz(tmp1, 0, R14_bcp);
|
||||
lbz(tmp2, in_bytes(Method::intrinsic_id_offset()), R19_method);
|
||||
cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
|
||||
cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
|
||||
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
cmpwi(CCR1, tmp2, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
|
||||
cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
bne(CCR0, profile_continue);
|
||||
cmpwi(CR0, tmp1, Bytecodes::_invokedynamic);
|
||||
cmpwi(CR1, tmp1, Bytecodes::_invokehandle);
|
||||
cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
cmpwi(CR1, tmp2, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
|
||||
cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
bne(CR0, profile_continue);
|
||||
}
|
||||
|
||||
profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
|
||||
@@ -1890,8 +1890,8 @@ void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register
|
||||
// Load the offset of the area within the MDO used for
|
||||
// parameters. If it's negative we're not profiling any parameters.
|
||||
lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
|
||||
cmpwi(CCR0, tmp1, 0);
|
||||
blt(CCR0, profile_continue);
|
||||
cmpwi(CR0, tmp1, 0);
|
||||
blt(CR0, profile_continue);
|
||||
|
||||
// Compute a pointer to the area for parameters from the offset
|
||||
// and move the pointer to the slot for the last
|
||||
@@ -1936,9 +1936,9 @@ void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register
|
||||
|
||||
// Go to next parameter.
|
||||
int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
|
||||
cmpdi(CCR0, entry_offset, off_base + delta);
|
||||
cmpdi(CR0, entry_offset, off_base + delta);
|
||||
addi(entry_offset, entry_offset, -delta);
|
||||
bge(CCR0, loop);
|
||||
bge(CR0, loop);
|
||||
|
||||
align(32, 12);
|
||||
bind(profile_continue);
|
||||
@@ -1975,7 +1975,7 @@ void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Regist
|
||||
subf(n_slots, esp, R26_monitor);
|
||||
srdi_(n_slots, n_slots, LogBytesPerWord); // Compute number of slots to copy.
|
||||
assert(LogBytesPerWord == 3, "conflicts assembler instructions");
|
||||
beq(CCR0, copy_slot_finished); // Nothing to copy.
|
||||
beq(CR0, copy_slot_finished); // Nothing to copy.
|
||||
|
||||
mtctr(n_slots);
|
||||
|
||||
@@ -2115,8 +2115,8 @@ void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1,
|
||||
Label Ldone;
|
||||
// Get pending exception oop.
|
||||
ld(Rexception, thread_(pending_exception));
|
||||
cmpdi(CCR0, Rexception, 0);
|
||||
beq(CCR0, Ldone);
|
||||
cmpdi(CR0, Rexception, 0);
|
||||
beq(CR0, Ldone);
|
||||
li(Rtmp, 0);
|
||||
mr_if_needed(R3, Rexception);
|
||||
std(Rtmp, thread_(pending_exception)); // Clear exception in thread
|
||||
@@ -2168,7 +2168,7 @@ void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address
|
||||
Label resume_pc, not_preempted;
|
||||
|
||||
DEBUG_ONLY(ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread));
|
||||
DEBUG_ONLY(cmpdi(CCR0, R0, 0));
|
||||
DEBUG_ONLY(cmpdi(CR0, R0, 0));
|
||||
asm_assert_eq("Should not have alternate return address set");
|
||||
|
||||
// Preserve 2 registers
|
||||
@@ -2186,8 +2186,8 @@ void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address
|
||||
|
||||
// Jump to handler if the call was preempted
|
||||
ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
cmpdi(CCR0, R0, 0);
|
||||
beq(CCR0, not_preempted);
|
||||
cmpdi(CR0, R0, 0);
|
||||
beq(CR0, not_preempted);
|
||||
mtlr(R0);
|
||||
li(R0, 0);
|
||||
std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
@@ -2215,8 +2215,8 @@ void InterpreterMacroAssembler::restore_after_resume(Register fp) {
|
||||
{
|
||||
Label ok;
|
||||
ld(R12_scratch2, 0, R1_SP); // load fp
|
||||
cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||
beq(CCR0, ok);
|
||||
cmpd(CR0, R12_scratch2, R11_scratch1);
|
||||
beq(CR0, ok);
|
||||
stop(FILE_AND_LINE ": FP is expected in R11_scratch1");
|
||||
bind(ok);
|
||||
}
|
||||
@@ -2298,8 +2298,8 @@ void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool
|
||||
{
|
||||
Label Lok;
|
||||
subf(R0, R1_SP, scratch);
|
||||
cmpdi(CCR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size);
|
||||
bge(CCR0, Lok);
|
||||
cmpdi(CR0, R0, frame::top_ijava_frame_abi_size + frame::ijava_state_size);
|
||||
bge(CR0, Lok);
|
||||
stop("frame too small (restore istate)");
|
||||
bind(Lok);
|
||||
}
|
||||
@@ -2312,13 +2312,13 @@ void InterpreterMacroAssembler::get_method_counters(Register method,
|
||||
BLOCK_COMMENT("Load and ev. allocate counter object {");
|
||||
Label has_counters;
|
||||
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
|
||||
cmpdi(CCR0, Rcounters, 0);
|
||||
bne(CCR0, has_counters);
|
||||
cmpdi(CR0, Rcounters, 0);
|
||||
bne(CR0, has_counters);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::build_method_counters), method);
|
||||
ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
|
||||
cmpdi(CCR0, Rcounters, 0);
|
||||
beq(CCR0, skip); // No MethodCounters, OutOfMemory.
|
||||
cmpdi(CR0, Rcounters, 0);
|
||||
beq(CR0, skip); // No MethodCounters, OutOfMemory.
|
||||
BLOCK_COMMENT("} Load and ev. allocate counter object");
|
||||
|
||||
bind(has_counters);
|
||||
@@ -2398,7 +2398,7 @@ void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Regis
|
||||
|
||||
const int log2_bytecode_size_limit = 16;
|
||||
srdi_(Rtmp, reg, log2_bytecode_size_limit);
|
||||
bne(CCR0, test);
|
||||
bne(CR0, test);
|
||||
|
||||
address fd = CAST_FROM_FN_PTR(address, verify_return_address);
|
||||
const int nbytes_save = MacroAssembler::num_volatile_regs * 8;
|
||||
@@ -2442,8 +2442,8 @@ void InterpreterMacroAssembler::notify_method_entry() {
|
||||
Label jvmti_post_done;
|
||||
|
||||
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
cmpwi(CCR0, R0, 0);
|
||||
beq(CCR0, jvmti_post_done);
|
||||
cmpwi(CR0, R0, 0);
|
||||
beq(CR0, jvmti_post_done);
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
|
||||
|
||||
bind(jvmti_post_done);
|
||||
@@ -2476,8 +2476,8 @@ void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosSta
|
||||
Label jvmti_post_done;
|
||||
|
||||
lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
cmpwi(CCR0, R0, 0);
|
||||
beq(CCR0, jvmti_post_done);
|
||||
cmpwi(CR0, R0, 0);
|
||||
beq(CR0, jvmti_post_done);
|
||||
if (!is_native_method) { push(state); } // Expose tos to GC.
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit), check_exceptions);
|
||||
if (!is_native_method) { pop(state); }
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -103,9 +103,9 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
|
||||
Label do_null;
|
||||
if (do_null_check) {
|
||||
__ ld(R0, locals_j_arg_at(offset()));
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ li(r, 0);
|
||||
__ beq(CCR0, do_null);
|
||||
__ beq(CR0, do_null);
|
||||
}
|
||||
__ addir(r, locals_j_arg_at(offset()));
|
||||
__ bind(do_null);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -75,7 +75,7 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
|
||||
__ ld(Rcounter, counter_offs, Rcounter_addr);
|
||||
__ andi_(R0, Rcounter, 1);
|
||||
__ bne(CCR0, slow);
|
||||
__ bne(CR0, slow);
|
||||
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
// Field may be volatile.
|
||||
@@ -91,8 +91,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
int fac_offs = __ load_const_optimized(Rtmp, JvmtiExport::get_field_access_count_addr(),
|
||||
R0, true);
|
||||
__ lwa(Rtmp, fac_offs, Rtmp);
|
||||
__ cmpwi(CCR0, Rtmp, 0);
|
||||
__ bne(CCR0, slow);
|
||||
__ cmpwi(CR0, Rtmp, 0);
|
||||
__ bne(CR0, slow);
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
@@ -118,8 +118,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
// Order preceding load(s) wrt. succeeding check (LoadStore for volatile field).
|
||||
if (is_fp) {
|
||||
Label next;
|
||||
__ fcmpu(CCR0, F1_RET, F1_RET);
|
||||
__ bne(CCR0, next);
|
||||
__ fcmpu(CR0, F1_RET, F1_RET);
|
||||
__ bne(CR0, next);
|
||||
__ bind(next);
|
||||
} else {
|
||||
__ twi_0(Rtmp);
|
||||
@@ -127,8 +127,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
|
||||
__ isync();
|
||||
|
||||
__ ld(R0, counter_offs, Rcounter_addr);
|
||||
__ cmpd(CCR0, R0, Rcounter);
|
||||
__ bne(CCR0, slow);
|
||||
__ cmpd(CR0, R0, Rcounter);
|
||||
__ bne(CR0, slow);
|
||||
|
||||
if (!is_fp) {
|
||||
__ mr(R3_RET, Rtmp);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -179,8 +179,8 @@ class MacroAssembler: public Assembler {
|
||||
//
|
||||
// branch, jump
|
||||
//
|
||||
// set dst to -1, 0, +1 as follows: if CCR0bi is "greater than", dst is set to 1,
|
||||
// if CCR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
// set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1,
|
||||
// if CR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
void inline set_cmp3(Register dst);
|
||||
// set dst to (treat_unordered_like_less ? -1 : +1)
|
||||
void inline set_cmpu3(Register dst, bool treat_unordered_like_less);
|
||||
@@ -612,6 +612,20 @@ class MacroAssembler: public Assembler {
|
||||
// The temp_reg can be noreg, if no temps are available.
|
||||
// It can also be sub_klass or super_klass, meaning it's OK to kill that one.
|
||||
// Updates the sub's secondary super cache as necessary.
|
||||
void check_klass_subtype_slow_path_linear(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label* L_success = nullptr,
|
||||
Register result_reg = noreg);
|
||||
|
||||
void check_klass_subtype_slow_path_table(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
Register temp2_reg,
|
||||
Label* L_success = nullptr,
|
||||
Register result_reg = noreg);
|
||||
|
||||
void check_klass_subtype_slow_path(Register sub_klass,
|
||||
Register super_klass,
|
||||
Register temp1_reg,
|
||||
@@ -619,6 +633,25 @@ class MacroAssembler: public Assembler {
|
||||
Label* L_success = nullptr,
|
||||
Register result_reg = noreg);
|
||||
|
||||
void lookup_secondary_supers_table_var(Register sub_klass,
|
||||
Register r_super_klass,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3,
|
||||
Register temp4,
|
||||
Register result);
|
||||
|
||||
// If r is valid, return r.
|
||||
// If r is invalid, remove a register r2 from available_regs, add r2
|
||||
// to regs_to_push, then return r2.
|
||||
Register allocate_if_noreg(const Register r,
|
||||
RegSetIterator<Register> &available_regs,
|
||||
RegSet ®s_to_push);
|
||||
|
||||
// Frameless register spills (negative offset from SP)
|
||||
void push_set(RegSet set);
|
||||
void pop_set(RegSet set);
|
||||
|
||||
// Simplified, combined version, good for typical uses.
|
||||
// Falls through on failure.
|
||||
void check_klass_subtype(Register sub_klass,
|
||||
@@ -631,14 +664,14 @@ class MacroAssembler: public Assembler {
|
||||
|
||||
// As above, but with a constant super_klass.
|
||||
// The result is in Register result, not the condition codes.
|
||||
void lookup_secondary_supers_table(Register r_sub_klass,
|
||||
Register r_super_klass,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3,
|
||||
Register temp4,
|
||||
Register result,
|
||||
u1 super_klass_slot);
|
||||
void lookup_secondary_supers_table_const(Register r_sub_klass,
|
||||
Register r_super_klass,
|
||||
Register temp1,
|
||||
Register temp2,
|
||||
Register temp3,
|
||||
Register temp4,
|
||||
Register result,
|
||||
u1 super_klass_slot);
|
||||
|
||||
void verify_secondary_supers_table(Register r_sub_klass,
|
||||
Register r_super_klass,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -248,14 +248,14 @@ inline bool MacroAssembler::is_bc_far_variant3_at(address instruction_addr) {
|
||||
is_endgroup(instruction_2);
|
||||
}
|
||||
|
||||
// set dst to -1, 0, +1 as follows: if CCR0bi is "greater than", dst is set to 1,
|
||||
// if CCR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
// set dst to -1, 0, +1 as follows: if CR0bi is "greater than", dst is set to 1,
|
||||
// if CR0bi is "equal", dst is set to 0, otherwise it's set to -1.
|
||||
inline void MacroAssembler::set_cmp3(Register dst) {
|
||||
assert_different_registers(dst, R0);
|
||||
// P10, prefer using setbc instructions
|
||||
if (VM_Version::has_brw()) {
|
||||
setbc(R0, CCR0, Assembler::greater); // Set 1 to R0 if CCR0bi is "greater than", otherwise 0
|
||||
setnbc(dst, CCR0, Assembler::less); // Set -1 to dst if CCR0bi is "less than", otherwise 0
|
||||
setbc(R0, CR0, Assembler::greater); // Set 1 to R0 if CR0bi is "greater than", otherwise 0
|
||||
setnbc(dst, CR0, Assembler::less); // Set -1 to dst if CR0bi is "less than", otherwise 0
|
||||
} else {
|
||||
mfcr(R0); // copy CR register to R0
|
||||
srwi(dst, R0, 30); // copy the first two bits to dst
|
||||
@@ -267,9 +267,9 @@ inline void MacroAssembler::set_cmp3(Register dst) {
|
||||
// set dst to (treat_unordered_like_less ? -1 : +1)
|
||||
inline void MacroAssembler::set_cmpu3(Register dst, bool treat_unordered_like_less) {
|
||||
if (treat_unordered_like_less) {
|
||||
cror(CCR0, Assembler::less, CCR0, Assembler::summary_overflow); // treat unordered like less
|
||||
cror(CR0, Assembler::less, CR0, Assembler::summary_overflow); // treat unordered like less
|
||||
} else {
|
||||
cror(CCR0, Assembler::greater, CCR0, Assembler::summary_overflow); // treat unordered like greater
|
||||
cror(CR0, Assembler::greater, CR0, Assembler::summary_overflow); // treat unordered like greater
|
||||
}
|
||||
set_cmp3(dst);
|
||||
}
|
||||
@@ -280,11 +280,11 @@ inline void MacroAssembler::normalize_bool(Register dst, Register temp, bool is_
|
||||
|
||||
if (VM_Version::has_brw()) {
|
||||
if (is_64bit) {
|
||||
cmpdi(CCR0, dst, 0);
|
||||
cmpdi(CR0, dst, 0);
|
||||
} else {
|
||||
cmpwi(CCR0, dst, 0);
|
||||
cmpwi(CR0, dst, 0);
|
||||
}
|
||||
setbcr(dst, CCR0, Assembler::equal);
|
||||
setbcr(dst, CR0, Assembler::equal);
|
||||
} else {
|
||||
assert_different_registers(temp, dst);
|
||||
neg(temp, dst);
|
||||
@@ -373,8 +373,8 @@ inline void MacroAssembler::null_check_throw(Register a, int offset, Register te
|
||||
trap_null_check(a);
|
||||
} else {
|
||||
Label ok;
|
||||
cmpdi(CCR0, a, 0);
|
||||
bne(CCR0, ok);
|
||||
cmpdi(CR0, a, 0);
|
||||
bne(CR0, ok);
|
||||
load_const_optimized(temp_reg, exception_entry);
|
||||
mtctr(temp_reg);
|
||||
bctr();
|
||||
@@ -390,8 +390,8 @@ inline void MacroAssembler::null_check(Register a, int offset, Label *Lis_null)
|
||||
trap_null_check(a);
|
||||
} else if (Lis_null){
|
||||
Label ok;
|
||||
cmpdi(CCR0, a, 0);
|
||||
beq(CCR0, *Lis_null);
|
||||
cmpdi(CR0, a, 0);
|
||||
beq(CR0, *Lis_null);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -468,14 +468,14 @@ inline Register MacroAssembler::encode_heap_oop_not_null(Register d, Register sr
|
||||
inline Register MacroAssembler::encode_heap_oop(Register d, Register src) {
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
if (VM_Version::has_isel()) {
|
||||
cmpdi(CCR0, src, 0);
|
||||
cmpdi(CR0, src, 0);
|
||||
Register co = encode_heap_oop_not_null(d, src);
|
||||
assert(co == d, "sanity");
|
||||
isel_0(d, CCR0, Assembler::equal);
|
||||
isel_0(d, CR0, Assembler::equal);
|
||||
} else {
|
||||
Label isNull;
|
||||
or_(d, src, src); // move and compare 0
|
||||
beq(CCR0, isNull);
|
||||
beq(CR0, isNull);
|
||||
encode_heap_oop_not_null(d, src);
|
||||
bind(isNull);
|
||||
}
|
||||
@@ -509,16 +509,16 @@ inline void MacroAssembler::decode_heap_oop(Register d) {
|
||||
Label isNull;
|
||||
bool use_isel = false;
|
||||
if (CompressedOops::base() != nullptr) {
|
||||
cmpwi(CCR0, d, 0);
|
||||
cmpwi(CR0, d, 0);
|
||||
if (VM_Version::has_isel()) {
|
||||
use_isel = true;
|
||||
} else {
|
||||
beq(CCR0, isNull);
|
||||
beq(CR0, isNull);
|
||||
}
|
||||
}
|
||||
decode_heap_oop_not_null(d);
|
||||
if (use_isel) {
|
||||
isel_0(d, CCR0, Assembler::equal);
|
||||
isel_0(d, CR0, Assembler::equal);
|
||||
}
|
||||
bind(isNull);
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ void MacroAssembler::sha256_load_h_vec(const VectorRegister a,
|
||||
lvx (a, hptr);
|
||||
addi (tmp, hptr, 16);
|
||||
lvx (e, tmp);
|
||||
beq (CCR0, sha256_aligned);
|
||||
beq (CR0, sha256_aligned);
|
||||
|
||||
// handle unaligned accesses
|
||||
load_perm(vRb, hptr);
|
||||
@@ -121,7 +121,7 @@ void MacroAssembler::sha256_load_w_plus_k_vec(const Register buf_in,
|
||||
VectorRegister vRb = VR6;
|
||||
|
||||
andi_ (tmp, buf_in, 0xF);
|
||||
beq (CCR0, w_aligned); // address ends with 0x0, not 0x8
|
||||
beq (CR0, w_aligned); // address ends with 0x0, not 0x8
|
||||
|
||||
// deal with unaligned addresses
|
||||
lvx (ws[0], buf_in);
|
||||
@@ -318,7 +318,7 @@ void MacroAssembler::sha256_update_sha_state(const VectorRegister a,
|
||||
li (of16, 16);
|
||||
lvx (vt0, hptr);
|
||||
lvx (vt5, of16, hptr);
|
||||
beq (CCR0, state_load_aligned);
|
||||
beq (CR0, state_load_aligned);
|
||||
|
||||
// handle unaligned accesses
|
||||
li (of32, 32);
|
||||
@@ -538,8 +538,8 @@ void MacroAssembler::sha256(bool multi_block) {
|
||||
if (multi_block) {
|
||||
addi(buf_in, buf_in, buf_size);
|
||||
addi(ofs, ofs, buf_size);
|
||||
cmplw(CCR0, ofs, limit);
|
||||
ble(CCR0, sha_loop);
|
||||
cmplw(CR0, ofs, limit);
|
||||
ble(CR0, sha_loop);
|
||||
|
||||
// return ofs
|
||||
mr(R3_RET, ofs);
|
||||
@@ -567,7 +567,7 @@ void MacroAssembler::sha512_load_w_vec(const Register buf_in,
|
||||
Label is_aligned, after_alignment;
|
||||
|
||||
andi_ (tmp, buf_in, 0xF);
|
||||
beq (CCR0, is_aligned); // address ends with 0x0, not 0x8
|
||||
beq (CR0, is_aligned); // address ends with 0x0, not 0x8
|
||||
|
||||
// deal with unaligned addresses
|
||||
lvx (ws[0], buf_in);
|
||||
@@ -623,7 +623,7 @@ void MacroAssembler::sha512_update_sha_state(const Register state,
|
||||
VectorRegister aux = VR9;
|
||||
|
||||
andi_(tmp, state, 0xf);
|
||||
beq(CCR0, state_save_aligned);
|
||||
beq(CR0, state_save_aligned);
|
||||
// deal with unaligned addresses
|
||||
|
||||
{
|
||||
@@ -860,7 +860,7 @@ void MacroAssembler::sha512_load_h_vec(const Register state,
|
||||
Label state_aligned, after_state_aligned;
|
||||
|
||||
andi_(tmp, state, 0xf);
|
||||
beq(CCR0, state_aligned);
|
||||
beq(CR0, state_aligned);
|
||||
|
||||
// deal with unaligned addresses
|
||||
VectorRegister aux = VR9;
|
||||
@@ -1121,8 +1121,8 @@ void MacroAssembler::sha512(bool multi_block) {
|
||||
if (multi_block) {
|
||||
addi(buf_in, buf_in, buf_size);
|
||||
addi(ofs, ofs, buf_size);
|
||||
cmplw(CCR0, ofs, limit);
|
||||
ble(CCR0, sha_loop);
|
||||
cmplw(CR0, ofs, limit);
|
||||
ble(CR0, sha_loop);
|
||||
|
||||
// return ofs
|
||||
mr(R3_RET, ofs);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -83,16 +83,16 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
|
||||
Label L_ok, L_bad;
|
||||
BLOCK_COMMENT("verify_klass {");
|
||||
__ verify_oop(obj_reg, FILE_AND_LINE);
|
||||
__ cmpdi(CCR0, obj_reg, 0);
|
||||
__ beq(CCR0, L_bad);
|
||||
__ cmpdi(CR0, obj_reg, 0);
|
||||
__ beq(CR0, L_bad);
|
||||
__ load_klass(temp_reg, obj_reg);
|
||||
__ load_const_optimized(temp2_reg, (address) klass_addr);
|
||||
__ ld(temp2_reg, 0, temp2_reg);
|
||||
__ cmpd(CCR0, temp_reg, temp2_reg);
|
||||
__ beq(CCR0, L_ok);
|
||||
__ cmpd(CR0, temp_reg, temp2_reg);
|
||||
__ beq(CR0, L_ok);
|
||||
__ ld(temp_reg, klass->super_check_offset(), temp_reg);
|
||||
__ cmpd(CCR0, temp_reg, temp2_reg);
|
||||
__ beq(CCR0, L_ok);
|
||||
__ cmpd(CR0, temp_reg, temp2_reg);
|
||||
__ beq(CR0, L_ok);
|
||||
__ BIND(L_bad);
|
||||
__ stop(error_message);
|
||||
__ BIND(L_ok);
|
||||
@@ -107,8 +107,8 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe
|
||||
// assert(sizeof(u4) == sizeof(java.lang.invoke.MemberName.flags), "");
|
||||
__ srwi( temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
|
||||
__ andi(temp, temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
|
||||
__ cmpwi(CCR1, temp, ref_kind);
|
||||
__ beq(CCR1, L);
|
||||
__ cmpwi(CR1, temp, ref_kind);
|
||||
__ beq(CR1, L);
|
||||
{ char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
|
||||
jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
|
||||
if (ref_kind == JVM_REF_invokeVirtual ||
|
||||
@@ -135,11 +135,11 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
// compiled code in threads for which the event is enabled. Check here for
|
||||
// interp_only_mode if these events CAN be enabled.
|
||||
__ lwz(temp, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
__ cmplwi(CCR0, temp, 0);
|
||||
__ beq(CCR0, run_compiled_code);
|
||||
__ cmplwi(CR0, temp, 0);
|
||||
__ beq(CR0, run_compiled_code);
|
||||
// Null method test is replicated below in compiled case.
|
||||
__ cmplwi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, L_no_such_method);
|
||||
__ cmplwi(CR0, R19_method, 0);
|
||||
__ beq(CR0, L_no_such_method);
|
||||
__ ld(target, in_bytes(Method::interpreter_entry_offset()), R19_method);
|
||||
__ mtctr(target);
|
||||
__ bctr();
|
||||
@@ -147,8 +147,8 @@ void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register meth
|
||||
}
|
||||
|
||||
// Compiled case, either static or fall-through from runtime conditional
|
||||
__ cmplwi(CCR0, R19_method, 0);
|
||||
__ beq(CCR0, L_no_such_method);
|
||||
__ cmplwi(CR0, R19_method, 0);
|
||||
__ beq(CR0, L_no_such_method);
|
||||
|
||||
const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
|
||||
Method::from_interpreted_offset();
|
||||
@@ -200,8 +200,8 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
|
||||
// assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
|
||||
Label L;
|
||||
__ ld(temp2, __ argument_offset(temp2, temp2, 0), R15_esp);
|
||||
__ cmpd(CCR1, temp2, recv);
|
||||
__ beq(CCR1, L);
|
||||
__ cmpd(CR1, temp2, recv);
|
||||
__ beq(CR1, L);
|
||||
__ stop("receiver not on stack");
|
||||
__ BIND(L);
|
||||
}
|
||||
@@ -248,8 +248,8 @@ address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler*
|
||||
BLOCK_COMMENT("verify_intrinsic_id {");
|
||||
__ load_sized_value(R30_tmp1, in_bytes(Method::intrinsic_id_offset()), R19_method,
|
||||
sizeof(u2), /*is_signed*/ false);
|
||||
__ cmpwi(CCR1, R30_tmp1, (int) iid);
|
||||
__ beq(CCR1, L);
|
||||
__ cmpwi(CR1, R30_tmp1, (int) iid);
|
||||
__ beq(CR1, L);
|
||||
if (iid == vmIntrinsics::_linkToVirtual ||
|
||||
iid == vmIntrinsics::_linkToSpecial) {
|
||||
// could do this for all kinds, but would explode assembly code size
|
||||
@@ -425,8 +425,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmpdi(CCR1, temp2_index, 0);
|
||||
__ bge(CCR1, L_index_ok);
|
||||
__ cmpdi(CR1, temp2_index, 0);
|
||||
__ bge(CR1, L_index_ok);
|
||||
__ stop("no virtual index");
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
@@ -457,8 +457,8 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
|
||||
__ ld(vtable_index, NONZERO(java_lang_invoke_MemberName::vmindex_offset()), member_reg);
|
||||
if (VerifyMethodHandles) {
|
||||
Label L_index_ok;
|
||||
__ cmpdi(CCR1, vtable_index, 0);
|
||||
__ bge(CCR1, L_index_ok);
|
||||
__ cmpdi(CR1, vtable_index, 0);
|
||||
__ bge(CR1, L_index_ok);
|
||||
__ stop("invalid vtable index for MH.invokeInterface");
|
||||
__ BIND(L_index_ok);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
//
|
||||
// Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
// Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
// Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
//
|
||||
// This code is free software; you can redistribute it and/or modify it
|
||||
@@ -236,14 +236,14 @@ register %{
|
||||
// in the CR register.
|
||||
|
||||
// types: v = volatile, nv = non-volatile, s = system
|
||||
reg_def CCR0(SOC, SOC, Op_RegFlags, 0, CCR0->as_VMReg()); // v
|
||||
reg_def CCR1(SOC, SOC, Op_RegFlags, 1, CCR1->as_VMReg()); // v
|
||||
reg_def CCR2(SOC, SOC, Op_RegFlags, 2, CCR2->as_VMReg()); // nv
|
||||
reg_def CCR3(SOC, SOC, Op_RegFlags, 3, CCR3->as_VMReg()); // nv
|
||||
reg_def CCR4(SOC, SOC, Op_RegFlags, 4, CCR4->as_VMReg()); // nv
|
||||
reg_def CCR5(SOC, SOC, Op_RegFlags, 5, CCR5->as_VMReg()); // v
|
||||
reg_def CCR6(SOC, SOC, Op_RegFlags, 6, CCR6->as_VMReg()); // v
|
||||
reg_def CCR7(SOC, SOC, Op_RegFlags, 7, CCR7->as_VMReg()); // v
|
||||
reg_def CR0(SOC, SOC, Op_RegFlags, 0, CR0->as_VMReg()); // v
|
||||
reg_def CR1(SOC, SOC, Op_RegFlags, 1, CR1->as_VMReg()); // v
|
||||
reg_def CR2(SOC, SOC, Op_RegFlags, 2, CR2->as_VMReg()); // nv
|
||||
reg_def CR3(SOC, SOC, Op_RegFlags, 3, CR3->as_VMReg()); // nv
|
||||
reg_def CR4(SOC, SOC, Op_RegFlags, 4, CR4->as_VMReg()); // nv
|
||||
reg_def CR5(SOC, SOC, Op_RegFlags, 5, CR5->as_VMReg()); // v
|
||||
reg_def CR6(SOC, SOC, Op_RegFlags, 6, CR6->as_VMReg()); // v
|
||||
reg_def CR7(SOC, SOC, Op_RegFlags, 7, CR7->as_VMReg()); // v
|
||||
|
||||
// Special registers of PPC64
|
||||
|
||||
@@ -443,14 +443,14 @@ alloc_class chunk1 (
|
||||
alloc_class chunk2 (
|
||||
// Chunk2 contains *all* 8 condition code registers.
|
||||
|
||||
CCR0,
|
||||
CCR1,
|
||||
CCR2,
|
||||
CCR3,
|
||||
CCR4,
|
||||
CCR5,
|
||||
CCR6,
|
||||
CCR7
|
||||
CR0,
|
||||
CR1,
|
||||
CR2,
|
||||
CR3,
|
||||
CR4,
|
||||
CR5,
|
||||
CR6,
|
||||
CR7
|
||||
);
|
||||
|
||||
alloc_class chunk3 (
|
||||
@@ -803,30 +803,30 @@ reg_class bits64_reg_ro(
|
||||
// Special Class for Condition Code Flags Register
|
||||
|
||||
reg_class int_flags(
|
||||
/*CCR0*/ // scratch
|
||||
/*CCR1*/ // scratch
|
||||
/*CCR2*/ // nv!
|
||||
/*CCR3*/ // nv!
|
||||
/*CCR4*/ // nv!
|
||||
CCR5,
|
||||
CCR6,
|
||||
CCR7
|
||||
/*CR0*/ // scratch
|
||||
/*CR1*/ // scratch
|
||||
/*CR2*/ // nv!
|
||||
/*CR3*/ // nv!
|
||||
/*CR4*/ // nv!
|
||||
CR5,
|
||||
CR6,
|
||||
CR7
|
||||
);
|
||||
|
||||
reg_class int_flags_ro(
|
||||
CCR0,
|
||||
CCR1,
|
||||
CCR2,
|
||||
CCR3,
|
||||
CCR4,
|
||||
CCR5,
|
||||
CCR6,
|
||||
CCR7
|
||||
CR0,
|
||||
CR1,
|
||||
CR2,
|
||||
CR3,
|
||||
CR4,
|
||||
CR5,
|
||||
CR6,
|
||||
CR7
|
||||
);
|
||||
|
||||
reg_class int_flags_CR0(CCR0);
|
||||
reg_class int_flags_CR1(CCR1);
|
||||
reg_class int_flags_CR6(CCR6);
|
||||
reg_class int_flags_CR0(CR0);
|
||||
reg_class int_flags_CR1(CR1);
|
||||
reg_class int_flags_CR6(CR6);
|
||||
reg_class ctr_reg(SR_CTR);
|
||||
|
||||
// ----------------------------
|
||||
@@ -5568,8 +5568,8 @@ instruct loadF_ac(regF dst, memory mem, flagsRegCR0 cr0) %{
|
||||
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
||||
Label next;
|
||||
__ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
||||
__ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CCR0, next);
|
||||
__ fcmpu(CR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CR0, next);
|
||||
__ bind(next);
|
||||
__ isync();
|
||||
%}
|
||||
@@ -5604,8 +5604,8 @@ instruct loadD_ac(regD dst, memory mem, flagsRegCR0 cr0) %{
|
||||
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
||||
Label next;
|
||||
__ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
||||
__ fcmpu(CCR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CCR0, next);
|
||||
__ fcmpu(CR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
||||
__ bne(CR0, next);
|
||||
__ bind(next);
|
||||
__ isync();
|
||||
%}
|
||||
@@ -7394,8 +7394,8 @@ instruct compareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7413,8 +7413,8 @@ instruct compareAndSwapB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIs
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7432,8 +7432,8 @@ instruct compareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7451,8 +7451,8 @@ instruct compareAndSwapS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iRegIs
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7469,8 +7469,8 @@ instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7488,8 +7488,8 @@ instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7506,8 +7506,8 @@ instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7525,8 +7525,8 @@ instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc
|
||||
predicate(n->as_LoadStore()->barrier_data() == 0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
$res$$Register, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7546,8 +7546,8 @@ instruct weakCompareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7560,8 +7560,8 @@ instruct weakCompareAndSwapB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7574,8 +7574,8 @@ instruct weakCompareAndSwapB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7588,8 +7588,8 @@ instruct weakCompareAndSwapB4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7602,8 +7602,8 @@ instruct weakCompareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7616,8 +7616,8 @@ instruct weakCompareAndSwapS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7630,8 +7630,8 @@ instruct weakCompareAndSwapS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7644,8 +7644,8 @@ instruct weakCompareAndSwapS4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP tmp2, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, $tmp2$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7658,8 +7658,8 @@ instruct weakCompareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7672,10 +7672,10 @@ instruct weakCompareAndSwapI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7688,8 +7688,8 @@ instruct weakCompareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7702,10 +7702,10 @@ instruct weakCompareAndSwapN_acq_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgw(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7718,9 +7718,9 @@ instruct weakCompareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7733,10 +7733,10 @@ instruct weakCompareAndSwapL_acq_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7749,8 +7749,8 @@ instruct weakCompareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7763,10 +7763,10 @@ instruct weakCompareAndSwapP_acq_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
||||
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
||||
// value is never passed to caller.
|
||||
__ cmpxchgd(CCR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
||||
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
||||
%}
|
||||
@@ -7781,8 +7781,8 @@ instruct compareAndExchangeB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7795,8 +7795,8 @@ instruct compareAndExchangeB4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7809,8 +7809,8 @@ instruct compareAndExchangeB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7829,8 +7829,8 @@ instruct compareAndExchangeB4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7849,8 +7849,8 @@ instruct compareAndExchangeS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7863,8 +7863,8 @@ instruct compareAndExchangeS4_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr, iR
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7877,8 +7877,8 @@ instruct compareAndExchangeS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, noreg, noreg,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7897,8 +7897,8 @@ instruct compareAndExchangeS4_acq_regP_regI_regI(iRegIdst res, rarg3RegP mem_ptr
|
||||
effect(TEMP_DEF res, USE_KILL src2, USE_KILL mem_ptr, TEMP tmp1, TEMP cr0);
|
||||
format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register, $tmp1$$Register, R0,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7917,8 +7917,8 @@ instruct compareAndExchangeI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7931,8 +7931,8 @@ instruct compareAndExchangeI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as int" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7951,8 +7951,8 @@ instruct compareAndExchangeN_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7965,8 +7965,8 @@ instruct compareAndExchangeN_acq_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -7985,8 +7985,8 @@ instruct compareAndExchangeL_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as long" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -7999,8 +7999,8 @@ instruct compareAndExchangeL_acq_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as long" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -8020,8 +8020,8 @@ instruct compareAndExchangeP_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iReg
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
%}
|
||||
@@ -8035,8 +8035,8 @@ instruct compareAndExchangeP_acq_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr,
|
||||
effect(TEMP_DEF res, TEMP cr0);
|
||||
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
||||
ins_encode %{
|
||||
// CmpxchgX sets CCR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CCR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
||||
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
||||
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
||||
noreg, nullptr, true);
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
@@ -11389,7 +11389,7 @@ instruct cmpL3_reg_reg(iRegIdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 c
|
||||
format %{ "cmpL3_reg_reg $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ cmpd(CCR0, $src1$$Register, $src2$$Register);
|
||||
__ cmpd(CR0, $src1$$Register, $src2$$Register);
|
||||
__ set_cmp3($dst$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@@ -11661,11 +11661,11 @@ instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{
|
||||
//
|
||||
// block BXX:
|
||||
// 0: instruct cmpFUnordered_reg_reg (cmpF_reg_reg-0):
|
||||
// cmpFUrd CCR6, F11, F9
|
||||
// cmpFUrd CR6, F11, F9
|
||||
// 4: instruct cmov_bns_less (cmpF_reg_reg-1):
|
||||
// cmov CCR6
|
||||
// cmov CR6
|
||||
// 8: instruct branchConSched:
|
||||
// B_FARle CCR6, B56 P=0.500000 C=-1.000000
|
||||
// B_FARle CR6, B56 P=0.500000 C=-1.000000
|
||||
match(Set crx (CmpF src1 src2));
|
||||
ins_cost(DEFAULT_COST+BRANCH_COST);
|
||||
|
||||
@@ -11724,7 +11724,7 @@ instruct cmpF3_reg_reg(iRegIdst dst, regF src1, regF src2, flagsRegCR0 cr0) %{
|
||||
format %{ "cmpF3_reg_reg $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ fcmpu(CCR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ fcmpu(CR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@@ -11808,7 +11808,7 @@ instruct cmpD3_reg_reg(iRegIdst dst, regD src1, regD src2, flagsRegCR0 cr0) %{
|
||||
format %{ "cmpD3_reg_reg $dst, $src1, $src2" %}
|
||||
|
||||
ins_encode %{
|
||||
__ fcmpu(CCR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ fcmpu(CR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
||||
__ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
@@ -12069,6 +12069,7 @@ instruct branchLoopEndFar(cmpOp cmp, flagsRegSrc crx, label labl) %{
|
||||
instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
|
||||
iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
|
||||
match(Set result (PartialSubtypeCheck subklass superklass));
|
||||
predicate(!UseSecondarySupersTable);
|
||||
effect(TEMP_DEF result, TEMP tmp_klass, TEMP tmp_arrayptr);
|
||||
ins_cost(DEFAULT_COST*10);
|
||||
|
||||
@@ -12080,6 +12081,30 @@ instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P supe
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
// Two versions of partialSubtypeCheck, both used when we need to
|
||||
// search for a super class in the secondary supers array. The first
|
||||
// is used when we don't know _a priori_ the class being searched
|
||||
// for. The second, far more common, is used when we do know: this is
|
||||
// used for instanceof, checkcast, and any case where C2 can determine
|
||||
// it by constant propagation.
|
||||
instruct partialSubtypeCheckVarSuper(iRegPsrc sub, iRegPsrc super, iRegPdst result,
|
||||
iRegPdst tempR1, iRegPdst tempR2, iRegPdst tempR3, iRegPdst tempR4,
|
||||
flagsRegCR0 cr0, regCTR ctr)
|
||||
%{
|
||||
match(Set result (PartialSubtypeCheck sub super));
|
||||
predicate(UseSecondarySupersTable);
|
||||
effect(KILL cr0, KILL ctr, TEMP_DEF result, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP tempR4);
|
||||
|
||||
ins_cost(DEFAULT_COST * 10); // slightly larger than the next version
|
||||
format %{ "partialSubtypeCheck $result, $sub, $super" %}
|
||||
ins_encode %{
|
||||
__ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
|
||||
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
||||
$result$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_memory);
|
||||
%}
|
||||
|
||||
instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP super_con, rarg6RegP result,
|
||||
rarg1RegP tempR1, rarg5RegP tempR2, rarg4RegP tempR3, rscratch1RegP tempR4,
|
||||
flagsRegCR0 cr0, regCTR ctr)
|
||||
@@ -12094,9 +12119,9 @@ instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP
|
||||
ins_encode %{
|
||||
u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
|
||||
if (InlineSecondarySupersTest) {
|
||||
__ lookup_secondary_supers_table($sub$$Register, $super_reg$$Register,
|
||||
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
||||
$result$$Register, super_klass_slot);
|
||||
__ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
|
||||
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
||||
$result$$Register, super_klass_slot);
|
||||
} else {
|
||||
address stub = StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot);
|
||||
Register r_stub_addr = $tempR1$$Register;
|
||||
@@ -12745,7 +12770,7 @@ instruct string_inflate(Universe dummy, rarg1RegP src, rarg2RegP dst, iRegIsrc l
|
||||
__ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
|
||||
$tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register);
|
||||
__ rldicl_($tmp1$$Register, $len$$Register, 0, 64-3); // Remaining characters.
|
||||
__ beq(CCR0, Ldone);
|
||||
__ beq(CR0, Ldone);
|
||||
__ string_inflate($src$$Register, $dst$$Register, $tmp1$$Register, $tmp2$$Register);
|
||||
__ bind(Ldone);
|
||||
%}
|
||||
@@ -12829,8 +12854,8 @@ instruct minI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegC
|
||||
ins_cost(DEFAULT_COST*2);
|
||||
|
||||
ins_encode %{
|
||||
__ cmpw(CCR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CCR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
__ cmpw(CR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
@@ -12862,8 +12887,8 @@ instruct maxI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegC
|
||||
ins_cost(DEFAULT_COST*2);
|
||||
|
||||
ins_encode %{
|
||||
__ cmpw(CCR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CCR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
__ cmpw(CR0, $src1$$Register, $src2$$Register);
|
||||
__ isel($dst$$Register, CR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
|
||||
%}
|
||||
ins_pipe(pipe_class_default);
|
||||
%}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -179,14 +179,14 @@ inline constexpr ConditionRegister as_ConditionRegister(int encoding) {
|
||||
return ConditionRegister(encoding);
|
||||
}
|
||||
|
||||
constexpr ConditionRegister CCR0 = as_ConditionRegister(0);
|
||||
constexpr ConditionRegister CCR1 = as_ConditionRegister(1);
|
||||
constexpr ConditionRegister CCR2 = as_ConditionRegister(2);
|
||||
constexpr ConditionRegister CCR3 = as_ConditionRegister(3);
|
||||
constexpr ConditionRegister CCR4 = as_ConditionRegister(4);
|
||||
constexpr ConditionRegister CCR5 = as_ConditionRegister(5);
|
||||
constexpr ConditionRegister CCR6 = as_ConditionRegister(6);
|
||||
constexpr ConditionRegister CCR7 = as_ConditionRegister(7);
|
||||
constexpr ConditionRegister CR0 = as_ConditionRegister(0);
|
||||
constexpr ConditionRegister CR1 = as_ConditionRegister(1);
|
||||
constexpr ConditionRegister CR2 = as_ConditionRegister(2);
|
||||
constexpr ConditionRegister CR3 = as_ConditionRegister(3);
|
||||
constexpr ConditionRegister CR4 = as_ConditionRegister(4);
|
||||
constexpr ConditionRegister CR5 = as_ConditionRegister(5);
|
||||
constexpr ConditionRegister CR6 = as_ConditionRegister(6);
|
||||
constexpr ConditionRegister CR7 = as_ConditionRegister(7);
|
||||
|
||||
|
||||
class VectorSRegister;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -101,7 +101,7 @@ void OptoRuntime::generate_exception_blob() {
|
||||
__ call_c((address) OptoRuntime::handle_exception_C);
|
||||
address calls_return_pc = __ last_calls_return_pc();
|
||||
# ifdef ASSERT
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ asm_assert_ne("handle_exception_C must not return null");
|
||||
# endif
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -908,9 +908,9 @@ static address gen_c2i_adapter(MacroAssembler *masm,
|
||||
|
||||
// Does compiled code exists? If yes, patch the caller's callsite.
|
||||
__ ld(code, method_(code));
|
||||
__ cmpdi(CCR0, code, 0);
|
||||
__ cmpdi(CR0, code, 0);
|
||||
__ ld(ientry, method_(interpreter_entry)); // preloaded
|
||||
__ beq(CCR0, call_interpreter);
|
||||
__ beq(CR0, call_interpreter);
|
||||
|
||||
|
||||
// Patch caller's callsite, method_(code) was not null which means that
|
||||
@@ -1184,9 +1184,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
// Argument is valid and klass is as expected, continue.
|
||||
|
||||
__ ld(code, method_(code));
|
||||
__ cmpdi(CCR0, code, 0);
|
||||
__ cmpdi(CR0, code, 0);
|
||||
__ ld(ientry, method_(interpreter_entry)); // preloaded
|
||||
__ beq_predict_taken(CCR0, call_interpreter);
|
||||
__ beq_predict_taken(CR0, call_interpreter);
|
||||
|
||||
// Branch to ic_miss_stub.
|
||||
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
|
||||
@@ -1203,7 +1203,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
|
||||
{ // Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CCR0, L_skip_barrier); // non-static
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
Register klass = R11_scratch1;
|
||||
@@ -1251,8 +1251,8 @@ static void object_move(MacroAssembler* masm,
|
||||
|
||||
__ addi(r_handle, r_caller_sp, reg2offset(src.first()));
|
||||
__ ld( r_temp_2, reg2offset(src.first()), r_caller_sp);
|
||||
__ cmpdi(CCR0, r_temp_2, 0);
|
||||
__ bne(CCR0, skip);
|
||||
__ cmpdi(CR0, r_temp_2, 0);
|
||||
__ bne(CR0, skip);
|
||||
// Use a null handle if oop is null.
|
||||
__ li(r_handle, 0);
|
||||
__ bind(skip);
|
||||
@@ -1281,8 +1281,8 @@ static void object_move(MacroAssembler* masm,
|
||||
__ std( r_oop, oop_offset, R1_SP);
|
||||
__ addi(r_handle, R1_SP, oop_offset);
|
||||
|
||||
__ cmpdi(CCR0, r_oop, 0);
|
||||
__ bne(CCR0, skip);
|
||||
__ cmpdi(CR0, r_oop, 0);
|
||||
__ bne(CR0, skip);
|
||||
// Use a null handle if oop is null.
|
||||
__ li(r_handle, 0);
|
||||
__ bind(skip);
|
||||
@@ -1642,7 +1642,7 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
#ifdef ASSERT
|
||||
__ block_comment("clean {");
|
||||
__ ld_ptr(tmp1, JavaThread::cont_entry_offset(), R16_thread);
|
||||
__ cmpd(CCR0, R1_SP, tmp1);
|
||||
__ cmpd(CR0, R1_SP, tmp1);
|
||||
__ asm_assert_eq(FILE_AND_LINE ": incorrect R1_SP");
|
||||
#endif
|
||||
|
||||
@@ -1653,15 +1653,15 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ beq(CCR0, L_skip_vthread_code);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
|
||||
// If the held monitor count is > 0 and this vthread is terminating then
|
||||
// it failed to release a JNI monitor. So we issue the same log message
|
||||
// that JavaThread::exit does.
|
||||
__ ld(R0, in_bytes(JavaThread::jni_monitor_count_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ beq(CCR0, L_skip_vthread_code);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
|
||||
// Save return value potentially containing the exception oop
|
||||
Register ex_oop = R15_esp; // nonvolatile register
|
||||
@@ -1683,8 +1683,8 @@ static void continuation_enter_cleanup(MacroAssembler* masm) {
|
||||
// Check if this is a virtual thread continuation
|
||||
Label L_skip_vthread_code;
|
||||
__ lwz(R0, in_bytes(ContinuationEntry::flags_offset()), R1_SP);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ beq(CCR0, L_skip_vthread_code);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ beq(CR0, L_skip_vthread_code);
|
||||
|
||||
// See comment just above. If not checking JNI calls the JNI count is only
|
||||
// needed for assertion checking.
|
||||
@@ -1749,8 +1749,8 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
#ifdef ASSERT
|
||||
Label is_interp_only;
|
||||
__ lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
|
||||
__ cmpwi(CCR0, R0, 0);
|
||||
__ bne(CCR0, is_interp_only);
|
||||
__ cmpwi(CR0, R0, 0);
|
||||
__ bne(CR0, is_interp_only);
|
||||
__ stop("enterSpecial interpreter entry called when not in interp_only_mode");
|
||||
__ bind(is_interp_only);
|
||||
#endif
|
||||
@@ -1770,8 +1770,8 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
|
||||
|
||||
// If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
|
||||
__ cmpwi(CCR0, reg_is_cont, 0);
|
||||
__ bne(CCR0, L_thaw);
|
||||
__ cmpwi(CR0, reg_is_cont, 0);
|
||||
__ bne(CR0, L_thaw);
|
||||
|
||||
// --- call Continuation.enter(Continuation c, boolean isContinue)
|
||||
|
||||
@@ -1818,8 +1818,8 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
fill_continuation_entry(masm, reg_cont_obj, reg_is_virtual);
|
||||
|
||||
// If isContinue, call to thaw. Otherwise, call Continuation.enter(Continuation c, boolean isContinue)
|
||||
__ cmpwi(CCR0, reg_is_cont, 0);
|
||||
__ bne(CCR0, L_thaw);
|
||||
__ cmpwi(CR0, reg_is_cont, 0);
|
||||
__ bne(CR0, L_thaw);
|
||||
|
||||
// --- call Continuation.enter(Continuation c, boolean isContinue)
|
||||
|
||||
@@ -1869,7 +1869,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
|
||||
// Pop frame and return
|
||||
DEBUG_ONLY(__ ld_ptr(R0, 0, R1_SP));
|
||||
__ addi(R1_SP, R1_SP, framesize_words*wordSize);
|
||||
DEBUG_ONLY(__ cmpd(CCR0, R0, R1_SP));
|
||||
DEBUG_ONLY(__ cmpd(CR0, R0, R1_SP));
|
||||
__ asm_assert_eq(FILE_AND_LINE ": inconsistent frame size");
|
||||
__ ld(R0, _abi0(lr), R1_SP); // Return pc
|
||||
__ mtlr(R0);
|
||||
@@ -1937,8 +1937,8 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
||||
|
||||
Label L_pinned;
|
||||
|
||||
__ cmpwi(CCR0, R3_RET, 0);
|
||||
__ bne(CCR0, L_pinned);
|
||||
__ cmpwi(CR0, R3_RET, 0);
|
||||
__ bne(CR0, L_pinned);
|
||||
|
||||
// yield succeeded
|
||||
|
||||
@@ -1961,8 +1961,8 @@ static void gen_continuation_yield(MacroAssembler* masm,
|
||||
|
||||
// handle pending exception thrown by freeze
|
||||
__ ld(tmp, in_bytes(JavaThread::pending_exception_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, tmp, 0);
|
||||
__ beq(CCR0, L_return); // return if no exception is pending
|
||||
__ cmpdi(CR0, tmp, 0);
|
||||
__ beq(CR0, L_return); // return if no exception is pending
|
||||
__ pop_frame();
|
||||
__ ld(R0, _abi0(lr), R1_SP); // Return pc
|
||||
__ mtlr(R0);
|
||||
@@ -2398,12 +2398,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
Register r_temp_3_or_noreg = UseObjectMonitorTable ? r_temp_3 : noreg;
|
||||
__ compiler_fast_lock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||
__ compiler_fast_lock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3_or_noreg);
|
||||
} else {
|
||||
// fast_lock kills r_temp_1, r_temp_2, r_temp_3.
|
||||
__ compiler_fast_lock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_lock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
__ beq(CCR0, locked);
|
||||
__ beq(CR0, locked);
|
||||
|
||||
// None of the above fast optimizations worked so we have to get into the
|
||||
// slow case of monitor enter. Inline a special case of call_VM that
|
||||
@@ -2538,8 +2538,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Not suspended.
|
||||
// TODO: PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
|
||||
__ lwz(suspend_flags, thread_(suspend_flags));
|
||||
__ cmpwi(CCR1, suspend_flags, 0);
|
||||
__ beq(CCR1, no_block);
|
||||
__ cmpwi(CR1, suspend_flags, 0);
|
||||
__ beq(CR1, no_block);
|
||||
|
||||
// Block. Save any potential method result value before the operation and
|
||||
// use a leaf call to leave the last_Java_frame setup undisturbed. Doing this
|
||||
@@ -2572,8 +2572,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
if (LockingMode != LM_LEGACY && method->is_object_wait0()) {
|
||||
Label not_preempted;
|
||||
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ beq(CCR0, not_preempted);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, not_preempted);
|
||||
__ mtlr(R0);
|
||||
__ li(R0, 0);
|
||||
__ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
@@ -2591,8 +2591,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
Label no_reguard;
|
||||
__ lwz(r_temp_1, thread_(stack_guard_state));
|
||||
__ cmpwi(CCR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ bne(CCR0, no_reguard);
|
||||
__ cmpwi(CR0, r_temp_1, StackOverflow::stack_guard_yellow_reserved_disabled);
|
||||
__ bne(CR0, no_reguard);
|
||||
|
||||
save_native_result(masm, ret_type, workspace_slot_offset);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages));
|
||||
@@ -2622,11 +2622,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
|
||||
// Try fastpath for unlocking.
|
||||
if (LockingMode == LM_LIGHTWEIGHT) {
|
||||
__ compiler_fast_unlock_lightweight_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_unlock_lightweight_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
} else {
|
||||
__ compiler_fast_unlock_object(CCR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
__ compiler_fast_unlock_object(CR0, r_oop, r_box, r_temp_1, r_temp_2, r_temp_3);
|
||||
}
|
||||
__ beq(CCR0, done);
|
||||
__ beq(CR0, done);
|
||||
|
||||
// Save and restore any potential method result value around the unlocking operation.
|
||||
save_native_result(masm, ret_type, workspace_slot_offset);
|
||||
@@ -2693,8 +2693,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// Check for pending exceptions.
|
||||
// --------------------------------------------------------------------------
|
||||
__ ld(r_temp_2, thread_(pending_exception));
|
||||
__ cmpdi(CCR0, r_temp_2, 0);
|
||||
__ bne(CCR0, handle_pending_exception);
|
||||
__ cmpdi(CR0, r_temp_2, 0);
|
||||
__ bne(CR0, handle_pending_exception);
|
||||
|
||||
// Return
|
||||
// --------------------------------------------------------------------------
|
||||
@@ -2851,7 +2851,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
|
||||
|
||||
#ifdef ASSERT
|
||||
// Make sure that there is at least one entry in the array.
|
||||
__ cmpdi(CCR0, number_of_frames_reg, 0);
|
||||
__ cmpdi(CR0, number_of_frames_reg, 0);
|
||||
__ asm_assert_ne("array_size must be > 0");
|
||||
#endif
|
||||
|
||||
@@ -2866,8 +2866,8 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
|
||||
pcs_reg,
|
||||
frame_size_reg,
|
||||
pc_reg);
|
||||
__ cmpdi(CCR0, number_of_frames_reg, 0);
|
||||
__ bne(CCR0, loop);
|
||||
__ cmpdi(CR0, number_of_frames_reg, 0);
|
||||
__ bne(CR0, loop);
|
||||
|
||||
// Get the return address pointing into the frame manager.
|
||||
__ ld(R0, 0, pcs_reg);
|
||||
@@ -3014,8 +3014,8 @@ void SharedRuntime::generate_deopt_blob() {
|
||||
// stored in the thread during exception entry above. The exception
|
||||
// oop will be the return value of this stub.
|
||||
Label skip_restore_excp;
|
||||
__ cmpdi(CCR0, exec_mode_reg, Deoptimization::Unpack_exception);
|
||||
__ bne(CCR0, skip_restore_excp);
|
||||
__ cmpdi(CR0, exec_mode_reg, Deoptimization::Unpack_exception);
|
||||
__ bne(CR0, skip_restore_excp);
|
||||
__ ld(R3_RET, in_bytes(JavaThread::exception_oop_offset()), R16_thread);
|
||||
__ ld(R4_ARG2, in_bytes(JavaThread::exception_pc_offset()), R16_thread);
|
||||
__ li(R0, 0);
|
||||
@@ -3165,7 +3165,7 @@ void OptoRuntime::generate_uncommon_trap_blob() {
|
||||
|
||||
#ifdef ASSERT
|
||||
__ lwz(R22_tmp2, in_bytes(Deoptimization::UnrollBlock::unpack_kind_offset()), unroll_block_reg);
|
||||
__ cmpdi(CCR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
|
||||
__ cmpdi(CR0, R22_tmp2, (unsigned)Deoptimization::Unpack_uncommon_trap);
|
||||
__ asm_assert_eq("OptoRuntime::generate_uncommon_trap_blob: expected Unpack_uncommon_trap");
|
||||
#endif
|
||||
|
||||
@@ -3295,8 +3295,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal
|
||||
BLOCK_COMMENT(" Check pending exception.");
|
||||
const Register pending_exception = R0;
|
||||
__ ld(pending_exception, thread_(pending_exception));
|
||||
__ cmpdi(CCR0, pending_exception, 0);
|
||||
__ beq(CCR0, noException);
|
||||
__ cmpdi(CR0, pending_exception, 0);
|
||||
__ beq(CR0, noException);
|
||||
|
||||
// Exception pending
|
||||
RegisterSaver::restore_live_registers_and_pop_frame(masm,
|
||||
@@ -3315,8 +3315,8 @@ SafepointBlob* SharedRuntime::generate_handler_blob(SharedStubId id, address cal
|
||||
Label no_adjust;
|
||||
// If our stashed return pc was modified by the runtime we avoid touching it
|
||||
__ ld(R0, frame_size_in_bytes + _abi0(lr), R1_SP);
|
||||
__ cmpd(CCR0, R0, R31);
|
||||
__ bne(CCR0, no_adjust);
|
||||
__ cmpd(CR0, R0, R31);
|
||||
__ bne(CR0, no_adjust);
|
||||
|
||||
// Adjust return pc forward to step over the safepoint poll instruction
|
||||
__ addi(R31, R31, 4);
|
||||
@@ -3395,8 +3395,8 @@ RuntimeStub* SharedRuntime::generate_resolve_blob(SharedStubId id, address desti
|
||||
BLOCK_COMMENT("Check for pending exceptions.");
|
||||
Label pending;
|
||||
__ ld(R11_scratch1, thread_(pending_exception));
|
||||
__ cmpdi(CCR0, R11_scratch1, 0);
|
||||
__ bne(CCR0, pending);
|
||||
__ cmpdi(CR0, R11_scratch1, 0);
|
||||
__ bne(CR0, pending);
|
||||
|
||||
__ mtctr(R3_RET); // Ctr will not be touched by restore_live_registers_and_pop_frame.
|
||||
|
||||
@@ -3499,8 +3499,8 @@ RuntimeStub* SharedRuntime::generate_throw_exception(SharedStubId id, address ru
|
||||
__ ld(R0,
|
||||
in_bytes(Thread::pending_exception_offset()),
|
||||
R16_thread);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ bne(CR0, L);
|
||||
__ stop("SharedRuntime::throw_exception: no pending exception");
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
57
src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp
Normal file
57
src/hotspot/cpu/ppc/stubDeclarations_ppc.hpp
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_PPC_STUBDECLARATIONS_HPP
|
||||
#define CPU_PPC_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 24000) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 24000) \
|
||||
|
||||
|
||||
#endif // CPU_PPC_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -32,14 +32,17 @@
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 20000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 24000,
|
||||
_final_stubs_code_size = 24000
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
// CRC32 Intrinsics.
|
||||
#define CRC32_TABLE_SIZE (4 * 256)
|
||||
#define REVERSE_CRC32_POLY 0xEDB88320
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2015, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -147,8 +147,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
assert(sizeof(AccessFlags) == 2, "wrong size");
|
||||
__ lhz(R11_scratch1/*access_flags*/, method_(access_flags));
|
||||
// testbit with condition register.
|
||||
__ testbitdi(CCR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CCR0, L);
|
||||
__ testbitdi(CR0, R0, R11_scratch1/*access_flags*/, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CR0, L);
|
||||
// For non-static functions, pass "this" in R4_ARG2 and copy it
|
||||
// to 2nd C-arg slot.
|
||||
// We need to box the Java object here, so we use arg_java
|
||||
@@ -175,8 +175,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
// signature points to '(' at entry
|
||||
#ifdef ASSERT
|
||||
__ lbz(sig_byte, 0, signature);
|
||||
__ cmplwi(CCR0, sig_byte, '(');
|
||||
__ bne(CCR0, do_dontreachhere);
|
||||
__ cmplwi(CR0, sig_byte, '(');
|
||||
__ bne(CR0, do_dontreachhere);
|
||||
#endif
|
||||
|
||||
__ bind(loop_start);
|
||||
@@ -184,41 +184,41 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ addi(argcnt, argcnt, 1);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, ')'); // end of signature
|
||||
__ beq(CCR0, loop_end);
|
||||
__ cmplwi(CR0, sig_byte, ')'); // end of signature
|
||||
__ beq(CR0, loop_end);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'B'); // byte
|
||||
__ beq(CCR0, do_int);
|
||||
__ cmplwi(CR0, sig_byte, 'B'); // byte
|
||||
__ beq(CR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'C'); // char
|
||||
__ beq(CCR0, do_int);
|
||||
__ cmplwi(CR0, sig_byte, 'C'); // char
|
||||
__ beq(CR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'D'); // double
|
||||
__ beq(CCR0, do_double);
|
||||
__ cmplwi(CR0, sig_byte, 'D'); // double
|
||||
__ beq(CR0, do_double);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'F'); // float
|
||||
__ beq(CCR0, do_float);
|
||||
__ cmplwi(CR0, sig_byte, 'F'); // float
|
||||
__ beq(CR0, do_float);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'I'); // int
|
||||
__ beq(CCR0, do_int);
|
||||
__ cmplwi(CR0, sig_byte, 'I'); // int
|
||||
__ beq(CR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'J'); // long
|
||||
__ beq(CCR0, do_long);
|
||||
__ cmplwi(CR0, sig_byte, 'J'); // long
|
||||
__ beq(CR0, do_long);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'S'); // short
|
||||
__ beq(CCR0, do_int);
|
||||
__ cmplwi(CR0, sig_byte, 'S'); // short
|
||||
__ beq(CR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'Z'); // boolean
|
||||
__ beq(CCR0, do_int);
|
||||
__ cmplwi(CR0, sig_byte, 'Z'); // boolean
|
||||
__ beq(CR0, do_int);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'L'); // object
|
||||
__ beq(CCR0, do_object);
|
||||
__ cmplwi(CR0, sig_byte, 'L'); // object
|
||||
__ beq(CR0, do_object);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, '['); // array
|
||||
__ beq(CCR0, do_array);
|
||||
__ cmplwi(CR0, sig_byte, '['); // array
|
||||
__ beq(CR0, do_array);
|
||||
|
||||
// __ cmplwi(CCR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
|
||||
// __ beq(CCR0, do_void);
|
||||
// __ cmplwi(CR0, sig_byte, 'V'); // void cannot appear since we do not parse the return type
|
||||
// __ beq(CR0, do_void);
|
||||
|
||||
__ bind(do_dontreachhere);
|
||||
|
||||
@@ -231,16 +231,16 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
|
||||
__ bind(start_skip);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
__ cmplwi(CCR0, sig_byte, '[');
|
||||
__ beq(CCR0, start_skip); // skip further brackets
|
||||
__ cmplwi(CCR0, sig_byte, '9');
|
||||
__ bgt(CCR0, end_skip); // no optional size
|
||||
__ cmplwi(CCR0, sig_byte, '0');
|
||||
__ bge(CCR0, start_skip); // skip optional size
|
||||
__ cmplwi(CR0, sig_byte, '[');
|
||||
__ beq(CR0, start_skip); // skip further brackets
|
||||
__ cmplwi(CR0, sig_byte, '9');
|
||||
__ bgt(CR0, end_skip); // no optional size
|
||||
__ cmplwi(CR0, sig_byte, '0');
|
||||
__ bge(CR0, start_skip); // skip optional size
|
||||
__ bind(end_skip);
|
||||
|
||||
__ cmplwi(CCR0, sig_byte, 'L');
|
||||
__ beq(CCR0, do_object); // for arrays of objects, the name of the object must be skipped
|
||||
__ cmplwi(CR0, sig_byte, 'L');
|
||||
__ beq(CR0, do_object); // for arrays of objects, the name of the object must be skipped
|
||||
__ b(do_boxed); // otherwise, go directly to do_boxed
|
||||
}
|
||||
|
||||
@@ -249,8 +249,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
Label L;
|
||||
__ bind(L);
|
||||
__ lbzu(sig_byte, 1, signature);
|
||||
__ cmplwi(CCR0, sig_byte, ';');
|
||||
__ bne(CCR0, L);
|
||||
__ cmplwi(CR0, sig_byte, ';');
|
||||
__ bne(CR0, L);
|
||||
}
|
||||
// Need to box the Java object here, so we use arg_java (address of
|
||||
// current Java stack slot) as argument and don't dereference it as
|
||||
@@ -258,16 +258,16 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
Label do_null;
|
||||
__ bind(do_boxed);
|
||||
__ ld(R0,0, arg_java);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ li(intSlot,0);
|
||||
__ beq(CCR0, do_null);
|
||||
__ beq(CR0, do_null);
|
||||
__ mr(intSlot, arg_java);
|
||||
__ bind(do_null);
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ cmplwi(CR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_int);
|
||||
@@ -275,8 +275,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ cmplwi(CR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_long);
|
||||
@@ -284,8 +284,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ std(intSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CCR0, move_intSlot_to_ARG);
|
||||
__ cmplwi(CR0, argcnt, max_int_register_arguments);
|
||||
__ blt(CR0, move_intSlot_to_ARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_float);
|
||||
@@ -293,8 +293,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ stfs(floatSlot, Argument::float_on_stack_offset_in_bytes_c, arg_c);
|
||||
__ addi(arg_java, arg_java, -BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CCR0, move_floatSlot_to_FARG);
|
||||
__ cmplwi(CR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CR0, move_floatSlot_to_FARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(do_double);
|
||||
@@ -302,8 +302,8 @@ address TemplateInterpreterGenerator::generate_slow_signature_handler() {
|
||||
__ stfd(floatSlot, 0, arg_c);
|
||||
__ addi(arg_java, arg_java, - 2 * BytesPerWord);
|
||||
__ addi(arg_c, arg_c, BytesPerWord);
|
||||
__ cmplwi(CCR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CCR0, move_floatSlot_to_FARG);
|
||||
__ cmplwi(CR0, fpcnt, max_fp_register_arguments);
|
||||
__ blt(CR0, move_floatSlot_to_FARG);
|
||||
__ b(loop_start);
|
||||
|
||||
__ bind(loop_end);
|
||||
@@ -510,8 +510,8 @@ address TemplateInterpreterGenerator::generate_Reference_get_entry(void) {
|
||||
__ ld(R3_RET, Interpreter::stackElementSize, R15_esp); // get receiver
|
||||
|
||||
// Check if receiver == nullptr and go the slow path.
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, slow_path);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ beq(CR0, slow_path);
|
||||
|
||||
__ load_heap_oop(R3_RET, referent_offset, R3_RET,
|
||||
/* non-volatile temp */ R31, R11_scratch1,
|
||||
@@ -725,8 +725,8 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
if (ProfileInterpreter) {
|
||||
const Register Rmdo = R3_counters;
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, no_mdo);
|
||||
__ cmpdi(CR0, Rmdo, 0);
|
||||
__ beq(CR0, no_mdo);
|
||||
|
||||
// Increment invocation counter in the MDO.
|
||||
const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
@@ -735,7 +735,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mdo_ic_offs, Rmdo);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ bne(CCR0, done);
|
||||
__ bne(CR0, done);
|
||||
__ b(*overflow);
|
||||
}
|
||||
|
||||
@@ -748,7 +748,7 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_ic_offs, R3_counters);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ beq(CCR0, *overflow);
|
||||
__ beq(CR0, *overflow);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
@@ -789,8 +789,8 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
|
||||
BLOCK_COMMENT("stack_overflow_check_with_compare {");
|
||||
__ sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
|
||||
__ ld(Rscratch1, thread_(stack_overflow_limit));
|
||||
__ cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
|
||||
__ bgt(CCR0/*is_stack_overflow*/, done);
|
||||
__ cmpld(CR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
|
||||
__ bgt(CR0/*is_stack_overflow*/, done);
|
||||
|
||||
// The stack overflows. Load target address of the runtime stub and call it.
|
||||
assert(SharedRuntime::throw_StackOverflowError_entry() != nullptr, "generated in wrong order");
|
||||
@@ -799,13 +799,13 @@ void TemplateInterpreterGenerator::generate_stack_overflow_check(Register Rmem_f
|
||||
// Restore caller_sp (c2i adapter may exist, but no shrinking of interpreted caller frame).
|
||||
#ifdef ASSERT
|
||||
Label frame_not_shrunk;
|
||||
__ cmpld(CCR0, R1_SP, R21_sender_SP);
|
||||
__ ble(CCR0, frame_not_shrunk);
|
||||
__ cmpld(CR0, R1_SP, R21_sender_SP);
|
||||
__ ble(CR0, frame_not_shrunk);
|
||||
__ stop("frame shrunk");
|
||||
__ bind(frame_not_shrunk);
|
||||
__ ld(Rscratch1, 0, R1_SP);
|
||||
__ ld(R0, 0, R21_sender_SP);
|
||||
__ cmpd(CCR0, R0, Rscratch1);
|
||||
__ cmpd(CR0, R0, Rscratch1);
|
||||
__ asm_assert_eq("backlink");
|
||||
#endif // ASSERT
|
||||
__ mr(R1_SP, R21_sender_SP);
|
||||
@@ -829,8 +829,8 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
|
||||
// Check if methods needs synchronization.
|
||||
{
|
||||
Label Lok;
|
||||
__ testbitdi(CCR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
__ btrue(CCR0,Lok);
|
||||
__ testbitdi(CR0, R0, Rflags, JVM_ACC_SYNCHRONIZED_BIT);
|
||||
__ btrue(CR0,Lok);
|
||||
__ stop("method doesn't need synchronization");
|
||||
__ bind(Lok);
|
||||
}
|
||||
@@ -842,8 +842,8 @@ void TemplateInterpreterGenerator::lock_method(Register Rflags, Register Rscratc
|
||||
Label Lstatic;
|
||||
Label Ldone;
|
||||
|
||||
__ testbitdi(CCR0, R0, Rflags, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CCR0, Lstatic);
|
||||
__ testbitdi(CR0, R0, Rflags, JVM_ACC_STATIC_BIT);
|
||||
__ btrue(CR0, Lstatic);
|
||||
|
||||
// Non-static case: load receiver obj from stack and we're done.
|
||||
__ ld(Robj_to_lock, R18_locals);
|
||||
@@ -950,8 +950,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
// environment and one for a possible native mirror.
|
||||
Label skip_native_calculate_max_stack;
|
||||
__ addi(Rtop_frame_size, Rsize_of_parameters, 2);
|
||||
__ cmpwi(CCR0, Rtop_frame_size, Argument::n_int_register_parameters_c);
|
||||
__ bge(CCR0, skip_native_calculate_max_stack);
|
||||
__ cmpwi(CR0, Rtop_frame_size, Argument::n_int_register_parameters_c);
|
||||
__ bge(CR0, skip_native_calculate_max_stack);
|
||||
__ li(Rtop_frame_size, Argument::n_int_register_parameters_c);
|
||||
__ bind(skip_native_calculate_max_stack);
|
||||
__ sldi(Rsize_of_parameters, Rsize_of_parameters, Interpreter::logStackElementSize);
|
||||
@@ -999,8 +999,8 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call, Regist
|
||||
if (ProfileInterpreter) {
|
||||
Label zero_continue;
|
||||
__ ld(R28_mdx, method_(method_data));
|
||||
__ cmpdi(CCR0, R28_mdx, 0);
|
||||
__ beq(CCR0, zero_continue);
|
||||
__ cmpdi(CR0, R28_mdx, 0);
|
||||
__ beq(CR0, zero_continue);
|
||||
__ addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
|
||||
__ bind(zero_continue);
|
||||
}
|
||||
@@ -1330,8 +1330,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ ld(signature_handler_fd, method_(signature_handler));
|
||||
Label call_signature_handler;
|
||||
|
||||
__ cmpdi(CCR0, signature_handler_fd, 0);
|
||||
__ bne(CCR0, call_signature_handler);
|
||||
__ cmpdi(CR0, signature_handler_fd, 0);
|
||||
__ bne(CR0, call_signature_handler);
|
||||
|
||||
// Method has never been called. Either generate a specialized
|
||||
// handler or point to the slow one.
|
||||
@@ -1342,8 +1342,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Check for an exception while looking up the target method. If we
|
||||
// incurred one, bail.
|
||||
__ ld(pending_exception, thread_(pending_exception));
|
||||
__ cmpdi(CCR0, pending_exception, 0);
|
||||
__ bne(CCR0, exception_return_sync_check); // Has pending exception.
|
||||
__ cmpdi(CR0, pending_exception, 0);
|
||||
__ bne(CR0, exception_return_sync_check); // Has pending exception.
|
||||
|
||||
// Reload signature handler, it may have been created/assigned in the meanwhile.
|
||||
__ ld(signature_handler_fd, method_(signature_handler));
|
||||
@@ -1398,8 +1398,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Access_flags is non-volatile and still, no need to restore it.
|
||||
|
||||
// Restore access flags.
|
||||
__ testbitdi(CCR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
||||
__ bfalse(CCR0, method_is_not_static);
|
||||
__ testbitdi(CR0, R0, access_flags, JVM_ACC_STATIC_BIT);
|
||||
__ bfalse(CR0, method_is_not_static);
|
||||
|
||||
// Load mirror from interpreter frame (FP in R11_scratch1)
|
||||
__ ld(R21_tmp1, _ijava_state_neg(mirror), R11_scratch1);
|
||||
@@ -1508,8 +1508,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Not suspended.
|
||||
// TODO PPC port assert(4 == Thread::sz_suspend_flags(), "unexpected field size");
|
||||
__ lwz(suspend_flags, thread_(suspend_flags));
|
||||
__ cmpwi(CCR1, suspend_flags, 0);
|
||||
__ beq(CCR1, sync_check_done);
|
||||
__ cmpwi(CR1, suspend_flags, 0);
|
||||
__ beq(CR1, sync_check_done);
|
||||
|
||||
__ bind(do_safepoint);
|
||||
__ isync();
|
||||
@@ -1552,8 +1552,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Check preemption for Object.wait()
|
||||
Label not_preempted;
|
||||
__ ld(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
__ cmpdi(CCR0, R0, 0);
|
||||
__ beq(CCR0, not_preempted);
|
||||
__ cmpdi(CR0, R0, 0);
|
||||
__ beq(CR0, not_preempted);
|
||||
__ mtlr(R0);
|
||||
__ li(R0, 0);
|
||||
__ std(R0, in_bytes(JavaThread::preempt_alternate_return_offset()), R16_thread);
|
||||
@@ -1611,8 +1611,8 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
|
||||
Label exception_return_sync_check_already_unlocked;
|
||||
__ ld(R0/*pending_exception*/, thread_(pending_exception));
|
||||
__ cmpdi(CCR0, R0/*pending_exception*/, 0);
|
||||
__ bne(CCR0, exception_return_sync_check_already_unlocked);
|
||||
__ cmpdi(CR0, R0/*pending_exception*/, 0);
|
||||
__ bne(CR0, exception_return_sync_check_already_unlocked);
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// No exception pending.
|
||||
@@ -1706,7 +1706,7 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ subf(Rnum, Rsize_of_parameters, Rsize_of_locals);
|
||||
__ subf(Rslot_addr, Rsize_of_parameters, R18_locals);
|
||||
__ srdi_(Rnum, Rnum, Interpreter::logStackElementSize);
|
||||
__ beq(CCR0, Lno_locals);
|
||||
__ beq(CR0, Lno_locals);
|
||||
__ li(R0, 0);
|
||||
__ mtctr(Rnum);
|
||||
|
||||
@@ -2080,8 +2080,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
__ ld(return_pc, 0, R1_SP);
|
||||
__ ld(return_pc, _abi0(lr), return_pc);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), return_pc);
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ bne(CCR0, Lcaller_not_deoptimized);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ bne(CR0, Lcaller_not_deoptimized);
|
||||
|
||||
// The deoptimized case.
|
||||
// In this case, we can't call dispatch_next() after the frame is
|
||||
@@ -2127,16 +2127,16 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
|
||||
Label L_done;
|
||||
|
||||
__ lbz(R11_scratch1, 0, R14_bcp);
|
||||
__ cmpwi(CCR0, R11_scratch1, Bytecodes::_invokestatic);
|
||||
__ bne(CCR0, L_done);
|
||||
__ cmpwi(CR0, R11_scratch1, Bytecodes::_invokestatic);
|
||||
__ bne(CR0, L_done);
|
||||
|
||||
// The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
|
||||
// Detect such a case in the InterpreterRuntime function and return the member name argument, or null.
|
||||
__ ld(R4_ARG2, 0, R18_locals);
|
||||
__ call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp);
|
||||
|
||||
__ cmpdi(CCR0, R4_ARG2, 0);
|
||||
__ beq(CCR0, L_done);
|
||||
__ cmpdi(CR0, R4_ARG2, 0);
|
||||
__ beq(CR0, L_done);
|
||||
__ std(R4_ARG2, wordSize, R15_esp);
|
||||
__ bind(L_done);
|
||||
#endif // INCLUDE_JVMTI
|
||||
@@ -2321,8 +2321,8 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
|
||||
int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
|
||||
__ ld(R11_scratch1, offs1, R11_scratch1);
|
||||
__ lwa(R12_scratch2, offs2, R12_scratch2);
|
||||
__ cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||
__ blt(CCR0, Lskip_vm_call);
|
||||
__ cmpd(CR0, R12_scratch2, R11_scratch1);
|
||||
__ blt(CR0, Lskip_vm_call);
|
||||
}
|
||||
|
||||
__ push(state);
|
||||
@@ -2396,8 +2396,8 @@ void TemplateInterpreterGenerator::stop_interpreter_at() {
|
||||
int offs2 = __ load_const_optimized(R12_scratch2, (address) &BytecodeCounter::_counter_value, R0, true);
|
||||
__ ld(R11_scratch1, offs1, R11_scratch1);
|
||||
__ lwa(R12_scratch2, offs2, R12_scratch2);
|
||||
__ cmpd(CCR0, R12_scratch2, R11_scratch1);
|
||||
__ bne(CCR0, L);
|
||||
__ cmpd(CR0, R12_scratch2, R11_scratch1);
|
||||
__ bne(CR0, L);
|
||||
__ illtrap();
|
||||
__ bind(L);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -123,9 +123,9 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
|
||||
int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset())
|
||||
: in_bytes(ResolvedFieldEntry::put_code_offset());
|
||||
__ lbz(Rnew_bc, code_offset, Rtemp);
|
||||
__ cmpwi(CCR0, Rnew_bc, 0);
|
||||
__ cmpwi(CR0, Rnew_bc, 0);
|
||||
__ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
|
||||
__ beq(CCR0, L_patch_done);
|
||||
__ beq(CR0, L_patch_done);
|
||||
// __ isync(); // acquire not needed
|
||||
break;
|
||||
}
|
||||
@@ -140,8 +140,8 @@ void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Reg
|
||||
if (JvmtiExport::can_post_breakpoint()) {
|
||||
Label L_fast_patch;
|
||||
__ lbz(Rtemp, 0, R14_bcp);
|
||||
__ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
|
||||
__ bne(CCR0, L_fast_patch);
|
||||
__ cmpwi(CR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
|
||||
__ bne(CR0, L_fast_patch);
|
||||
// Perform the quickening, slowly, in the bowels of the breakpoint table.
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
|
||||
__ b(L_patch_done);
|
||||
@@ -261,14 +261,14 @@ void TemplateTable::ldc(LdcType type) {
|
||||
__ addi(Rscratch2, Rscratch2, tags_offset);
|
||||
__ lbzx(Rscratch2, Rscratch2, Rscratch1);
|
||||
|
||||
__ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
|
||||
__ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
|
||||
__ cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
__ cmpwi(CR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
|
||||
__ cmpwi(CR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
|
||||
__ cror(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
|
||||
// Resolved class - need to call vm to get java mirror of the class.
|
||||
__ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
|
||||
__ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
|
||||
__ beq(CCR0, notClass);
|
||||
__ cmpwi(CR1, Rscratch2, JVM_CONSTANT_Class);
|
||||
__ crnor(CR0, Assembler::equal, CR1, Assembler::equal); // Neither resolved class nor unresolved case from above?
|
||||
__ beq(CR0, notClass);
|
||||
|
||||
__ li(R4, is_ldc_wide(type) ? 1 : 0);
|
||||
call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
|
||||
@@ -279,16 +279,16 @@ void TemplateTable::ldc(LdcType type) {
|
||||
__ bind(notClass);
|
||||
__ addi(Rcpool, Rcpool, base_offset);
|
||||
__ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
|
||||
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
|
||||
__ bne(CCR0, notInt);
|
||||
__ cmpdi(CR0, Rscratch2, JVM_CONSTANT_Integer);
|
||||
__ bne(CR0, notInt);
|
||||
__ lwax(R17_tos, Rcpool, Rscratch1);
|
||||
__ push(itos);
|
||||
__ b(exit);
|
||||
|
||||
__ align(32, 12);
|
||||
__ bind(notInt);
|
||||
__ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
|
||||
__ bne(CCR0, notFloat);
|
||||
__ cmpdi(CR0, Rscratch2, JVM_CONSTANT_Float);
|
||||
__ bne(CR0, notFloat);
|
||||
__ lfsx(F15_ftos, Rcpool, Rscratch1);
|
||||
__ push(ftos);
|
||||
__ b(exit);
|
||||
@@ -318,12 +318,12 @@ void TemplateTable::fast_aldc(LdcType type) {
|
||||
int simm16_rest = __ load_const_optimized(R11_scratch1, Universe::the_null_sentinel_addr(), R0, true);
|
||||
__ ld(R31, simm16_rest, R11_scratch1);
|
||||
__ resolve_oop_handle(R31, R11_scratch1, R12_scratch2, MacroAssembler::PRESERVATION_NONE);
|
||||
__ cmpld(CCR0, R17_tos, R31);
|
||||
__ cmpld(CR0, R17_tos, R31);
|
||||
if (VM_Version::has_isel()) {
|
||||
__ isel_0(R17_tos, CCR0, Assembler::equal);
|
||||
__ isel_0(R17_tos, CR0, Assembler::equal);
|
||||
} else {
|
||||
Label not_sentinel;
|
||||
__ bne(CCR0, not_sentinel);
|
||||
__ bne(CR0, not_sentinel);
|
||||
__ li(R17_tos, 0);
|
||||
__ bind(not_sentinel);
|
||||
}
|
||||
@@ -359,15 +359,15 @@ void TemplateTable::ldc2_w() {
|
||||
__ lbzx(Rtag, Rtag, Rindex);
|
||||
__ sldi(Rindex, Rindex, LogBytesPerWord);
|
||||
|
||||
__ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
|
||||
__ bne(CCR0, not_double);
|
||||
__ cmpdi(CR0, Rtag, JVM_CONSTANT_Double);
|
||||
__ bne(CR0, not_double);
|
||||
__ lfdx(F15_ftos, Rcpool, Rindex);
|
||||
__ push(dtos);
|
||||
__ b(exit);
|
||||
|
||||
__ bind(not_double);
|
||||
__ cmpdi(CCR0, Rtag, JVM_CONSTANT_Long);
|
||||
__ bne(CCR0, not_long);
|
||||
__ cmpdi(CR0, Rtag, JVM_CONSTANT_Long);
|
||||
__ bne(CR0, not_long);
|
||||
__ ldx(R17_tos, Rcpool, Rindex);
|
||||
__ push(ltos);
|
||||
__ b(exit);
|
||||
@@ -401,32 +401,32 @@ void TemplateTable::condy_helper(Label& Done) {
|
||||
{
|
||||
// tos in (itos, ftos, stos, btos, ctos, ztos)
|
||||
Label notInt, notFloat, notShort, notByte, notChar, notBool;
|
||||
__ cmplwi(CCR0, flags, itos);
|
||||
__ bne(CCR0, notInt);
|
||||
__ cmplwi(CR0, flags, itos);
|
||||
__ bne(CR0, notInt);
|
||||
// itos
|
||||
__ lwax(R17_tos, obj, off);
|
||||
__ push(itos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notInt);
|
||||
__ cmplwi(CCR0, flags, ftos);
|
||||
__ bne(CCR0, notFloat);
|
||||
__ cmplwi(CR0, flags, ftos);
|
||||
__ bne(CR0, notFloat);
|
||||
// ftos
|
||||
__ lfsx(F15_ftos, obj, off);
|
||||
__ push(ftos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notFloat);
|
||||
__ cmplwi(CCR0, flags, stos);
|
||||
__ bne(CCR0, notShort);
|
||||
__ cmplwi(CR0, flags, stos);
|
||||
__ bne(CR0, notShort);
|
||||
// stos
|
||||
__ lhax(R17_tos, obj, off);
|
||||
__ push(stos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notShort);
|
||||
__ cmplwi(CCR0, flags, btos);
|
||||
__ bne(CCR0, notByte);
|
||||
__ cmplwi(CR0, flags, btos);
|
||||
__ bne(CR0, notByte);
|
||||
// btos
|
||||
__ lbzx(R17_tos, obj, off);
|
||||
__ extsb(R17_tos, R17_tos);
|
||||
@@ -434,16 +434,16 @@ void TemplateTable::condy_helper(Label& Done) {
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notByte);
|
||||
__ cmplwi(CCR0, flags, ctos);
|
||||
__ bne(CCR0, notChar);
|
||||
__ cmplwi(CR0, flags, ctos);
|
||||
__ bne(CR0, notChar);
|
||||
// ctos
|
||||
__ lhzx(R17_tos, obj, off);
|
||||
__ push(ctos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notChar);
|
||||
__ cmplwi(CCR0, flags, ztos);
|
||||
__ bne(CCR0, notBool);
|
||||
__ cmplwi(CR0, flags, ztos);
|
||||
__ bne(CR0, notBool);
|
||||
// ztos
|
||||
__ lbzx(R17_tos, obj, off);
|
||||
__ push(ztos);
|
||||
@@ -456,16 +456,16 @@ void TemplateTable::condy_helper(Label& Done) {
|
||||
case Bytecodes::_ldc2_w:
|
||||
{
|
||||
Label notLong, notDouble;
|
||||
__ cmplwi(CCR0, flags, ltos);
|
||||
__ bne(CCR0, notLong);
|
||||
__ cmplwi(CR0, flags, ltos);
|
||||
__ bne(CR0, notLong);
|
||||
// ltos
|
||||
__ ldx(R17_tos, obj, off);
|
||||
__ push(ltos);
|
||||
__ b(Done);
|
||||
|
||||
__ bind(notLong);
|
||||
__ cmplwi(CCR0, flags, dtos);
|
||||
__ bne(CCR0, notDouble);
|
||||
__ cmplwi(CR0, flags, dtos);
|
||||
__ bne(CR0, notDouble);
|
||||
// dtos
|
||||
__ lfdx(F15_ftos, obj, off);
|
||||
__ push(dtos);
|
||||
@@ -517,16 +517,16 @@ void TemplateTable::iload_internal(RewriteControl rc) {
|
||||
// last two iloads in a pair. Comparing against fast_iload means that
|
||||
// the next bytecode is neither an iload or a caload, and therefore
|
||||
// an iload pair.
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
|
||||
__ beq(CCR0, Ldone);
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
|
||||
__ beq(CR0, Ldone);
|
||||
|
||||
__ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
|
||||
__ cmpwi(CR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
|
||||
__ beq(CCR1, Lrewrite);
|
||||
__ beq(CR1, Lrewrite);
|
||||
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
|
||||
__ beq(CCR0, Lrewrite);
|
||||
__ beq(CR0, Lrewrite);
|
||||
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
|
||||
|
||||
@@ -812,20 +812,20 @@ void TemplateTable::aload_0_internal(RewriteControl rc) {
|
||||
__ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
|
||||
|
||||
// If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
|
||||
__ beq(CCR0, Ldont_rewrite);
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
|
||||
__ beq(CR0, Ldont_rewrite);
|
||||
|
||||
__ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
|
||||
__ cmpwi(CR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
|
||||
__ beq(CCR1, Lrewrite);
|
||||
__ beq(CR1, Lrewrite);
|
||||
|
||||
__ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
|
||||
__ cmpwi(CR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
|
||||
__ beq(CCR0, Lrewrite);
|
||||
__ beq(CR0, Lrewrite);
|
||||
|
||||
__ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
|
||||
__ cmpwi(CR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
|
||||
__ beq(CCR1, Lrewrite);
|
||||
__ beq(CR1, Lrewrite);
|
||||
|
||||
__ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
|
||||
|
||||
@@ -997,8 +997,8 @@ void TemplateTable::aastore() {
|
||||
Register Rscratch3 = Rindex;
|
||||
|
||||
// Do array store check - check for null value first.
|
||||
__ cmpdi(CCR0, R17_tos, 0);
|
||||
__ beq(CCR0, Lis_null);
|
||||
__ cmpdi(CR0, R17_tos, 0);
|
||||
__ beq(CR0, Lis_null);
|
||||
|
||||
__ load_klass(Rarray_klass, Rarray);
|
||||
__ load_klass(Rvalue_klass, R17_tos);
|
||||
@@ -1045,9 +1045,9 @@ void TemplateTable::bastore() {
|
||||
__ load_klass(Rscratch, Rarray);
|
||||
__ lwz(Rscratch, in_bytes(Klass::layout_helper_offset()), Rscratch);
|
||||
int diffbit = exact_log2(Klass::layout_helper_boolean_diffbit());
|
||||
__ testbitdi(CCR0, R0, Rscratch, diffbit);
|
||||
__ testbitdi(CR0, R0, Rscratch, diffbit);
|
||||
Label L_skip;
|
||||
__ bfalse(CCR0, L_skip);
|
||||
__ bfalse(CR0, L_skip);
|
||||
__ andi(R17_tos, R17_tos, 1); // if it is a T_BOOLEAN array, mask the stored value to 0/1
|
||||
__ bind(L_skip);
|
||||
|
||||
@@ -1262,11 +1262,11 @@ void TemplateTable::idiv() {
|
||||
Register Rdividend = R11_scratch1; // Used by irem.
|
||||
|
||||
__ addi(R0, R17_tos, 1);
|
||||
__ cmplwi(CCR0, R0, 2);
|
||||
__ bgt(CCR0, Lnormal); // divisor <-1 or >1
|
||||
__ cmplwi(CR0, R0, 2);
|
||||
__ bgt(CR0, Lnormal); // divisor <-1 or >1
|
||||
|
||||
__ cmpwi(CCR1, R17_tos, 0);
|
||||
__ beq(CCR1, Lexception); // divisor == 0
|
||||
__ cmpwi(CR1, R17_tos, 0);
|
||||
__ beq(CR1, Lexception); // divisor == 0
|
||||
|
||||
__ pop_i(Rdividend);
|
||||
__ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
|
||||
@@ -1307,11 +1307,11 @@ void TemplateTable::ldiv() {
|
||||
Register Rdividend = R11_scratch1; // Used by lrem.
|
||||
|
||||
__ addi(R0, R17_tos, 1);
|
||||
__ cmpldi(CCR0, R0, 2);
|
||||
__ bgt(CCR0, Lnormal); // divisor <-1 or >1
|
||||
__ cmpldi(CR0, R0, 2);
|
||||
__ bgt(CR0, Lnormal); // divisor <-1 or >1
|
||||
|
||||
__ cmpdi(CCR1, R17_tos, 0);
|
||||
__ beq(CCR1, Lexception); // divisor == 0
|
||||
__ cmpdi(CR1, R17_tos, 0);
|
||||
__ beq(CR1, Lexception); // divisor == 0
|
||||
|
||||
__ pop_l(Rdividend);
|
||||
__ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
|
||||
@@ -1565,18 +1565,18 @@ void TemplateTable::convert() {
|
||||
|
||||
case Bytecodes::_d2i:
|
||||
case Bytecodes::_f2i:
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos);
|
||||
__ li(R17_tos, 0); // 0 in case of NAN
|
||||
__ bso(CCR0, done);
|
||||
__ bso(CR0, done);
|
||||
__ fctiwz(F15_ftos, F15_ftos);
|
||||
__ move_d_to_l();
|
||||
break;
|
||||
|
||||
case Bytecodes::_d2l:
|
||||
case Bytecodes::_f2l:
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos);
|
||||
__ li(R17_tos, 0); // 0 in case of NAN
|
||||
__ bso(CCR0, done);
|
||||
__ bso(CR0, done);
|
||||
__ fctidz(F15_ftos, F15_ftos);
|
||||
__ move_d_to_l();
|
||||
break;
|
||||
@@ -1593,7 +1593,7 @@ void TemplateTable::lcmp() {
|
||||
const Register Rscratch = R11_scratch1;
|
||||
__ pop_l(Rscratch); // first operand, deeper in stack
|
||||
|
||||
__ cmpd(CCR0, Rscratch, R17_tos); // compare
|
||||
__ cmpd(CR0, Rscratch, R17_tos); // compare
|
||||
__ set_cmp3(R17_tos); // set result as follows: <: -1, =: 0, >: 1
|
||||
}
|
||||
|
||||
@@ -1611,7 +1611,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) {
|
||||
__ pop_d(Rfirst);
|
||||
}
|
||||
|
||||
__ fcmpu(CCR0, Rfirst, Rsecond); // compare
|
||||
__ fcmpu(CR0, Rfirst, Rsecond); // compare
|
||||
// if unordered_result is 1, treat unordered_result like 'greater than'
|
||||
assert(unordered_result == 1 || unordered_result == -1, "unordered_result can be either 1 or -1");
|
||||
__ set_cmpu3(R17_tos, unordered_result != 1);
|
||||
@@ -1683,8 +1683,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
Label Lforward;
|
||||
|
||||
// Check branch direction.
|
||||
__ cmpdi(CCR0, Rdisp, 0);
|
||||
__ bgt(CCR0, Lforward);
|
||||
__ cmpdi(CR0, Rdisp, 0);
|
||||
__ bgt(CR0, Lforward);
|
||||
|
||||
__ get_method_counters(R19_method, R4_counters, Lforward);
|
||||
|
||||
@@ -1695,8 +1695,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
// If no method data exists, go to profile_continue.
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, Lno_mdo);
|
||||
__ cmpdi(CR0, Rmdo, 0);
|
||||
__ beq(CR0, Lno_mdo);
|
||||
|
||||
// Increment backedge counter in the MDO.
|
||||
const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
@@ -1706,7 +1706,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ stw(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ bne(CR0, Lforward);
|
||||
__ b(Loverflow);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
@@ -1722,7 +1722,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ stw(Rscratch2, mo_bc_offs, R4_counters);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ bne(CR0, Lforward);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
@@ -1733,13 +1733,13 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
|
||||
|
||||
// Was an OSR adapter generated?
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, Lforward);
|
||||
__ cmpdi(CR0, R3_RET, 0);
|
||||
__ beq(CR0, Lforward);
|
||||
|
||||
// Has the nmethod been invalidated already?
|
||||
__ lbz(R0, in_bytes(nmethod::state_offset()), R3_RET);
|
||||
__ cmpwi(CCR0, R0, nmethod::in_use);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ cmpwi(CR0, R0, nmethod::in_use);
|
||||
__ bne(CR0, Lforward);
|
||||
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
// We can use all registers because we will not return to interpreter from this point.
|
||||
@@ -1775,18 +1775,18 @@ void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rs
|
||||
|
||||
if (is_jint) {
|
||||
if (cmp0) {
|
||||
__ cmpwi(CCR0, Rfirst, 0);
|
||||
__ cmpwi(CR0, Rfirst, 0);
|
||||
} else {
|
||||
__ cmpw(CCR0, Rfirst, Rsecond);
|
||||
__ cmpw(CR0, Rfirst, Rsecond);
|
||||
}
|
||||
} else {
|
||||
if (cmp0) {
|
||||
__ cmpdi(CCR0, Rfirst, 0);
|
||||
__ cmpdi(CR0, Rfirst, 0);
|
||||
} else {
|
||||
__ cmpd(CCR0, Rfirst, Rsecond);
|
||||
__ cmpd(CR0, Rfirst, Rsecond);
|
||||
}
|
||||
}
|
||||
branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
|
||||
branch_conditional(CR0, cc, Lnot_taken, /*invert*/ true);
|
||||
|
||||
// Conition is false => Jump!
|
||||
branch(false, false);
|
||||
@@ -1885,10 +1885,10 @@ void TemplateTable::tableswitch() {
|
||||
__ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
|
||||
// Check for default case (=index outside [low,high]).
|
||||
__ cmpw(CCR0, R17_tos, Rlow_byte);
|
||||
__ cmpw(CCR1, R17_tos, Rhigh_byte);
|
||||
__ blt(CCR0, Ldefault_case);
|
||||
__ bgt(CCR1, Ldefault_case);
|
||||
__ cmpw(CR0, R17_tos, Rlow_byte);
|
||||
__ cmpw(CR1, R17_tos, Rhigh_byte);
|
||||
__ blt(CR0, Ldefault_case);
|
||||
__ bgt(CR1, Ldefault_case);
|
||||
|
||||
// Lookup dispatch offset.
|
||||
__ sub(Rindex, R17_tos, Rlow_byte);
|
||||
@@ -1944,8 +1944,8 @@ void TemplateTable::fast_linearswitch() {
|
||||
__ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
|
||||
|
||||
__ mtctr(Rcount);
|
||||
__ cmpwi(CCR0, Rcount, 0);
|
||||
__ bne(CCR0, Lloop_entry);
|
||||
__ cmpwi(CR0, Rcount, 0);
|
||||
__ bne(CR0, Lloop_entry);
|
||||
|
||||
// Default case
|
||||
__ bind(Ldefault_case);
|
||||
@@ -1961,8 +1961,8 @@ void TemplateTable::fast_linearswitch() {
|
||||
__ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
|
||||
__ bind(Lloop_entry);
|
||||
__ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
|
||||
__ cmpw(CCR0, Rvalue, Rcmp_value);
|
||||
__ bne(CCR0, Lsearch_loop);
|
||||
__ cmpw(CR0, Rvalue, Rcmp_value);
|
||||
__ bne(CR0, Lsearch_loop);
|
||||
|
||||
// Found, load offset.
|
||||
__ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
|
||||
@@ -2057,8 +2057,8 @@ void TemplateTable::fast_binaryswitch() {
|
||||
// else
|
||||
// Rh = Ri
|
||||
Label Lgreater;
|
||||
__ cmpw(CCR0, Rkey, Rscratch);
|
||||
__ bge(CCR0, Lgreater);
|
||||
__ cmpw(CR0, Rkey, Rscratch);
|
||||
__ bge(CR0, Lgreater);
|
||||
__ mr(Rj, Rh);
|
||||
__ b(entry);
|
||||
__ bind(Lgreater);
|
||||
@@ -2067,10 +2067,10 @@ void TemplateTable::fast_binaryswitch() {
|
||||
// while (i+1 < j)
|
||||
__ bind(entry);
|
||||
__ addi(Rscratch, Ri, 1);
|
||||
__ cmpw(CCR0, Rscratch, Rj);
|
||||
__ cmpw(CR0, Rscratch, Rj);
|
||||
__ add(Rh, Ri, Rj); // start h = i + j >> 1;
|
||||
|
||||
__ blt(CCR0, loop);
|
||||
__ blt(CR0, loop);
|
||||
}
|
||||
|
||||
// End of binary search, result index is i (must check again!).
|
||||
@@ -2086,8 +2086,8 @@ void TemplateTable::fast_binaryswitch() {
|
||||
|
||||
Label not_found;
|
||||
// Ri = offset offset
|
||||
__ cmpw(CCR0, Rkey, Rscratch);
|
||||
__ beq(CCR0, not_found);
|
||||
__ cmpw(CR0, Rkey, Rscratch);
|
||||
__ beq(CR0, not_found);
|
||||
// entry not found -> j = default offset
|
||||
__ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
|
||||
__ b(default_case);
|
||||
@@ -2130,8 +2130,8 @@ void TemplateTable::_return(TosState state) {
|
||||
// Load klass of this obj.
|
||||
__ load_klass(Rklass, R17_tos);
|
||||
__ lbz(Rklass_flags, in_bytes(Klass::misc_flags_offset()), Rklass);
|
||||
__ testbitdi(CCR0, R0, Rklass_flags, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ bfalse(CCR0, Lskip_register_finalizer);
|
||||
__ testbitdi(CR0, R0, Rklass_flags, exact_log2(KlassFlags::_misc_has_finalizer));
|
||||
__ bfalse(CR0, Lskip_register_finalizer);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
|
||||
|
||||
@@ -2143,7 +2143,7 @@ void TemplateTable::_return(TosState state) {
|
||||
Label no_safepoint;
|
||||
__ ld(R11_scratch1, in_bytes(JavaThread::polling_word_offset()), R16_thread);
|
||||
__ andi_(R11_scratch1, R11_scratch1, SafepointMechanism::poll_bit());
|
||||
__ beq(CCR0, no_safepoint);
|
||||
__ beq(CR0, no_safepoint);
|
||||
__ push(state);
|
||||
__ push_cont_fastpath();
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::at_safepoint));
|
||||
@@ -2214,8 +2214,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
|
||||
// Load-acquire the bytecode to match store-release in InterpreterRuntime
|
||||
__ lbz(Rscratch, bytecode_offset, Rcache);
|
||||
// Acquire by cmp-br-isync (see below).
|
||||
__ cmpdi(CCR0, Rscratch, (int)code);
|
||||
__ beq(CCR0, Lresolved);
|
||||
__ cmpdi(CR0, Rscratch, (int)code);
|
||||
__ beq(CR0, Lresolved);
|
||||
|
||||
// Class initialization barrier slow path lands here as well.
|
||||
__ bind(L_clinit_barrier_slow);
|
||||
@@ -2263,8 +2263,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
int code_offset = (byte_no == f1_byte) ? in_bytes(ResolvedFieldEntry::get_code_offset())
|
||||
: in_bytes(ResolvedFieldEntry::put_code_offset());
|
||||
__ lbz(R0, code_offset, Rcache);
|
||||
__ cmpwi(CCR0, R0, (int)code); // have we resolved this bytecode?
|
||||
__ beq(CCR0, resolved);
|
||||
__ cmpwi(CR0, R0, (int)code); // have we resolved this bytecode?
|
||||
__ beq(CR0, resolved);
|
||||
|
||||
// resolve first time through
|
||||
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
|
||||
@@ -2332,8 +2332,8 @@ void TemplateTable::load_resolved_method_entry_handle(Register cache,
|
||||
|
||||
// maybe push appendix to arguments (just before return address)
|
||||
Label L_no_push;
|
||||
__ testbitdi(CCR0, R0, flags, ResolvedMethodEntry::has_appendix_shift);
|
||||
__ bfalse(CCR0, L_no_push);
|
||||
__ testbitdi(CR0, R0, flags, ResolvedMethodEntry::has_appendix_shift);
|
||||
__ bfalse(CR0, L_no_push);
|
||||
// invokehandle uses an index into the resolved references array
|
||||
__ lhz(ref_index, in_bytes(ResolvedMethodEntry::resolved_references_index_offset()), cache);
|
||||
// Push the appendix as a trailing parameter.
|
||||
@@ -2395,8 +2395,8 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
|
||||
__ ld_ptr(method, in_bytes(ResolvedIndyEntry::method_offset()), cache);
|
||||
|
||||
// The invokedynamic is unresolved iff method is null
|
||||
__ cmpdi(CCR0, method, 0);
|
||||
__ bne(CCR0, resolved);
|
||||
__ cmpdi(CR0, method, 0);
|
||||
__ bne(CR0, resolved);
|
||||
|
||||
Bytecodes::Code code = bytecode();
|
||||
|
||||
@@ -2408,7 +2408,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
|
||||
__ load_resolved_indy_entry(cache, index);
|
||||
__ ld_ptr(method, in_bytes(ResolvedIndyEntry::method_offset()), cache);
|
||||
|
||||
DEBUG_ONLY(__ cmpdi(CCR0, method, 0));
|
||||
DEBUG_ONLY(__ cmpdi(CR0, method, 0));
|
||||
__ asm_assert_ne("Should be resolved by now");
|
||||
__ bind(resolved);
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
@@ -2417,7 +2417,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
|
||||
// Check if there is an appendix
|
||||
__ lbz(index, in_bytes(ResolvedIndyEntry::flags_offset()), cache);
|
||||
__ rldicl_(R0, index, 64-ResolvedIndyEntry::has_appendix_shift, 63);
|
||||
__ beq(CCR0, L_no_push);
|
||||
__ beq(CR0, L_no_push);
|
||||
|
||||
// Get appendix
|
||||
__ lhz(index, in_bytes(ResolvedIndyEntry::resolved_references_index_offset()), cache);
|
||||
@@ -2489,8 +2489,8 @@ void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch,
|
||||
int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
|
||||
__ lwz(Rscratch, offs, Rscratch);
|
||||
|
||||
__ cmpwi(CCR0, Rscratch, 0);
|
||||
__ beq(CCR0, Lno_field_access_post);
|
||||
__ cmpwi(CR0, Rscratch, 0);
|
||||
__ beq(CR0, Lno_field_access_post);
|
||||
|
||||
// Post access enabled - do it!
|
||||
if (is_static) {
|
||||
@@ -2574,13 +2574,13 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
#ifdef ASSERT
|
||||
Label LFlagInvalid;
|
||||
__ cmpldi(CCR0, Rtos_state, number_of_states);
|
||||
__ bge(CCR0, LFlagInvalid);
|
||||
__ cmpldi(CR0, Rtos_state, number_of_states);
|
||||
__ bge(CR0, LFlagInvalid);
|
||||
#endif
|
||||
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead).
|
||||
__ sldi(Rtos_state, Rtos_state, LogBytesPerWord);
|
||||
__ cmpwi(CCR2, Rscratch, 1); // Volatile?
|
||||
__ cmpwi(CR2, Rscratch, 1); // Volatile?
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
||||
__ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
|
||||
}
|
||||
@@ -2631,12 +2631,12 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
}
|
||||
{
|
||||
Label acquire_double;
|
||||
__ beq(CCR2, acquire_double); // Volatile?
|
||||
__ beq(CR2, acquire_double); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ bind(acquire_double);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CCR0, Lisync);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CR0, Lisync);
|
||||
__ b(Lisync); // In case of NAN.
|
||||
}
|
||||
|
||||
@@ -2652,12 +2652,12 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
}
|
||||
{
|
||||
Label acquire_float;
|
||||
__ beq(CCR2, acquire_float); // Volatile?
|
||||
__ beq(CR2, acquire_float); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ bind(acquire_float);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CCR0, Lisync);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ beq_predict_taken(CR0, Lisync);
|
||||
__ b(Lisync); // In case of NAN.
|
||||
}
|
||||
|
||||
@@ -2671,7 +2671,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2684,7 +2684,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2698,7 +2698,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2712,7 +2712,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
// use btos rewriting, no truncating to t/f bit is needed for getfield.
|
||||
patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2725,7 +2725,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2738,7 +2738,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 28, 28); // Align load.
|
||||
@@ -2753,7 +2753,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
if (!is_static && rc == may_rewrite) {
|
||||
patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
|
||||
}
|
||||
__ beq(CCR2, Lacquire); // Volatile?
|
||||
__ beq(CR2, Lacquire); // Volatile?
|
||||
__ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
|
||||
|
||||
__ align(32, 12);
|
||||
@@ -2796,8 +2796,8 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo
|
||||
int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
|
||||
__ lwz(Rscratch, offs, Rscratch);
|
||||
|
||||
__ cmpwi(CCR0, Rscratch, 0);
|
||||
__ beq(CCR0, Lno_field_mod_post);
|
||||
__ cmpwi(CR0, Rscratch, 0);
|
||||
__ beq(CR0, Lno_field_mod_post);
|
||||
|
||||
// Do the post
|
||||
const Register Robj = Rscratch;
|
||||
@@ -2830,11 +2830,11 @@ void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, boo
|
||||
// the type to determine where the object is.
|
||||
__ lbz(Rtos_state, in_bytes(ResolvedFieldEntry::type_offset()), Rcache);
|
||||
|
||||
__ cmpwi(CCR0, Rtos_state, ltos);
|
||||
__ cmpwi(CCR1, Rtos_state, dtos);
|
||||
__ cmpwi(CR0, Rtos_state, ltos);
|
||||
__ cmpwi(CR1, Rtos_state, dtos);
|
||||
__ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
|
||||
__ crnor(CCR0, Assembler::equal, CCR1, Assembler::equal);
|
||||
__ beq(CCR0, is_one_slot);
|
||||
__ crnor(CR0, Assembler::equal, CR1, Assembler::equal);
|
||||
__ beq(CR0, is_one_slot);
|
||||
__ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
|
||||
__ bind(is_one_slot);
|
||||
break;
|
||||
@@ -2881,7 +2881,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
Rscratch2 = R12_scratch2, // used by load_field_cp_cache_entry
|
||||
Rscratch3 = R6_ARG4,
|
||||
Rbc = Rscratch3;
|
||||
const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
const ConditionRegister CR_is_vol = CR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
|
||||
static address field_rw_branch_table[number_of_states],
|
||||
field_norw_branch_table[number_of_states],
|
||||
@@ -2907,8 +2907,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
|
||||
#ifdef ASSERT
|
||||
Label LFlagInvalid;
|
||||
__ cmpldi(CCR0, Rtos_state, number_of_states);
|
||||
__ bge(CCR0, LFlagInvalid);
|
||||
__ cmpldi(CR0, Rtos_state, number_of_states);
|
||||
__ bge(CR0, LFlagInvalid);
|
||||
#endif
|
||||
|
||||
// Load from branch table and dispatch (volatile case: one instruction ahead).
|
||||
@@ -3124,7 +3124,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
Rscratch = R11_scratch1, // used by load_field_cp_cache_entry
|
||||
Rscratch2 = R12_scratch2, // used by load_field_cp_cache_entry
|
||||
Rscratch3 = R4_ARG2;
|
||||
const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
const ConditionRegister CR_is_vol = CR2; // Non-volatile condition register (survives runtime call in do_oop_store).
|
||||
|
||||
// Constant pool already resolved => Load flags and offset of field.
|
||||
__ load_field_entry(Rcache, Rscratch);
|
||||
@@ -3139,7 +3139,7 @@ void TemplateTable::fast_storefield(TosState state) {
|
||||
if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
|
||||
{
|
||||
Label LnotVolatile;
|
||||
__ beq(CCR0, LnotVolatile);
|
||||
__ beq(CR0, LnotVolatile);
|
||||
__ release();
|
||||
__ align(32, 12);
|
||||
__ bind(LnotVolatile);
|
||||
@@ -3219,7 +3219,7 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
|
||||
// Get volatile flag.
|
||||
__ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit.
|
||||
__ bne(CCR0, LisVolatile);
|
||||
__ bne(CR0, LisVolatile);
|
||||
|
||||
switch(bytecode()) {
|
||||
case Bytecodes::_fast_agetfield:
|
||||
@@ -3307,8 +3307,8 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
Label Ldummy;
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
|
||||
__ lfsx(F15_ftos, Rclass_or_obj, Roffset);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CCR0, Ldummy);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CR0, Ldummy);
|
||||
__ bind(Ldummy);
|
||||
__ isync();
|
||||
break;
|
||||
@@ -3322,8 +3322,8 @@ void TemplateTable::fast_accessfield(TosState state) {
|
||||
Label Ldummy;
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
|
||||
__ lfdx(F15_ftos, Rclass_or_obj, Roffset);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CCR0, Ldummy);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CR0, Ldummy);
|
||||
__ bind(Ldummy);
|
||||
__ isync();
|
||||
break;
|
||||
@@ -3360,7 +3360,7 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
|
||||
// Get volatile flag.
|
||||
__ rldicl_(Rscratch, Rflags, 64-ResolvedFieldEntry::is_volatile_shift, 63); // Extract volatile bit.
|
||||
__ bne(CCR0, LisVolatile);
|
||||
__ bne(CR0, LisVolatile);
|
||||
|
||||
switch(state) {
|
||||
case atos:
|
||||
@@ -3398,8 +3398,8 @@ void TemplateTable::fast_xaccess(TosState state) {
|
||||
Label Ldummy;
|
||||
if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
|
||||
__ lfsx(F15_ftos, Rclass_or_obj, Roffset);
|
||||
__ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CCR0, Ldummy);
|
||||
__ fcmpu(CR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
|
||||
__ bne_predict_not_taken(CR0, Ldummy);
|
||||
__ bind(Ldummy);
|
||||
__ isync();
|
||||
break;
|
||||
@@ -3480,8 +3480,8 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
load_resolved_method_entry_virtual(Rcache, noreg, Rflags);
|
||||
|
||||
// Handle final method separately.
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotFinal);
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CR0, LnotFinal);
|
||||
|
||||
if (RewriteBytecodes && !CDSConfig::is_using_archive() && !CDSConfig::is_dumping_static_archive()) {
|
||||
patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
|
||||
@@ -3587,8 +3587,8 @@ void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
|
||||
Label LnotFinal;
|
||||
|
||||
// Check for vfinal.
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotFinal);
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CR0, LnotFinal);
|
||||
|
||||
Register Rscratch = Rflags, // Rflags is dead now.
|
||||
Rmethod = Rtemp2,
|
||||
@@ -3641,8 +3641,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// to handle this corner case.
|
||||
|
||||
Label LnotObjectMethod, Lthrow_ame;
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_forced_virtual_shift);
|
||||
__ bfalse(CCR0, LnotObjectMethod);
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_forced_virtual_shift);
|
||||
__ bfalse(CR0, LnotObjectMethod);
|
||||
invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rcache, Rscratch1, Rscratch2);
|
||||
__ bind(LnotObjectMethod);
|
||||
|
||||
@@ -3652,8 +3652,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
// Check for private method invocation - indicated by vfinal
|
||||
Label LnotVFinal, L_no_such_interface, L_subtype;
|
||||
|
||||
__ testbitdi(CCR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CCR0, LnotVFinal);
|
||||
__ testbitdi(CR0, R0, Rflags, ResolvedMethodEntry::is_vfinal_shift);
|
||||
__ bfalse(CR0, LnotVFinal);
|
||||
|
||||
__ check_klass_subtype(Rrecv_klass, Rinterface_klass, Rscratch1, Rscratch2, L_subtype);
|
||||
// If we get here the typecheck failed
|
||||
@@ -3687,8 +3687,8 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
__ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rmethod2, Rscratch1, Rscratch2,
|
||||
L_no_such_interface);
|
||||
|
||||
__ cmpdi(CCR0, Rmethod2, 0);
|
||||
__ beq(CCR0, Lthrow_ame);
|
||||
__ cmpdi(CR0, Rmethod2, 0);
|
||||
__ beq(CR0, Lthrow_ame);
|
||||
// Found entry. Jump off!
|
||||
// Argument and return type profiling.
|
||||
__ profile_arguments_type(Rmethod2, Rscratch1, Rscratch2, true);
|
||||
@@ -3795,8 +3795,8 @@ void TemplateTable::_new() {
|
||||
__ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
|
||||
__ lbzx(Rtags, Rindex, Rtags);
|
||||
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ bne(CCR0, Lslow_case);
|
||||
__ cmpdi(CR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ bne(CR0, Lslow_case);
|
||||
|
||||
// Get instanceKlass
|
||||
__ sldi(Roffset, Rindex, LogBytesPerWord);
|
||||
@@ -3810,7 +3810,7 @@ void TemplateTable::_new() {
|
||||
|
||||
// Make sure klass is not abstract, or interface or java/lang/Class.
|
||||
__ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
|
||||
__ bne(CCR0, Lslow_case);
|
||||
__ bne(CR0, Lslow_case);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Fast case:
|
||||
@@ -3829,8 +3829,8 @@ void TemplateTable::_new() {
|
||||
__ add(RnewTopValue, Rinstance_size, RoldTopValue);
|
||||
|
||||
// If there is enough space, we do not CAS and do not clear.
|
||||
__ cmpld(CCR0, RnewTopValue, RendValue);
|
||||
__ bgt(CCR0, Lslow_case);
|
||||
__ cmpld(CR0, RnewTopValue, RendValue);
|
||||
__ bgt(CR0, Lslow_case);
|
||||
|
||||
__ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
|
||||
|
||||
@@ -3947,8 +3947,8 @@ void TemplateTable::checkcast() {
|
||||
Rtags = R12_scratch2;
|
||||
|
||||
// Null does not pass.
|
||||
__ cmpdi(CCR0, R17_tos, 0);
|
||||
__ beq(CCR0, Lis_null);
|
||||
__ cmpdi(CR0, R17_tos, 0);
|
||||
__ beq(CR0, Lis_null);
|
||||
|
||||
// Get constant pool tag to find out if the bytecode has already been "quickened".
|
||||
__ get_cpool_and_tags(Rcpool, Rtags);
|
||||
@@ -3958,8 +3958,8 @@ void TemplateTable::checkcast() {
|
||||
__ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
|
||||
__ lbzx(Rtags, Rtags, Roffset);
|
||||
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CCR0, Lquicked);
|
||||
__ cmpdi(CR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CR0, Lquicked);
|
||||
|
||||
// Call into the VM to "quicken" instanceof.
|
||||
__ push_ptr(); // for GC
|
||||
@@ -4009,8 +4009,8 @@ void TemplateTable::instanceof() {
|
||||
Rtags = R12_scratch2;
|
||||
|
||||
// Null does not pass.
|
||||
__ cmpdi(CCR0, R17_tos, 0);
|
||||
__ beq(CCR0, Lis_null);
|
||||
__ cmpdi(CR0, R17_tos, 0);
|
||||
__ beq(CR0, Lis_null);
|
||||
|
||||
// Get constant pool tag to find out if the bytecode has already been "quickened".
|
||||
__ get_cpool_and_tags(Rcpool, Rtags);
|
||||
@@ -4020,8 +4020,8 @@ void TemplateTable::instanceof() {
|
||||
__ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
|
||||
__ lbzx(Rtags, Rtags, Roffset);
|
||||
|
||||
__ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CCR0, Lquicked);
|
||||
__ cmpdi(CR0, Rtags, JVM_CONSTANT_Class);
|
||||
__ beq(CR0, Lquicked);
|
||||
|
||||
// Call into the VM to "quicken" instanceof.
|
||||
__ push_ptr(); // for GC
|
||||
@@ -4127,8 +4127,8 @@ void TemplateTable::monitorenter() {
|
||||
__ null_check_throw(Robj_to_lock, -1, Rscratch1);
|
||||
|
||||
// Check if any slot is present => short cut to allocation if not.
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CCR0, Lallocate_new);
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CR0, Lallocate_new);
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Find a free slot in the monitor block.
|
||||
@@ -4141,24 +4141,24 @@ void TemplateTable::monitorenter() {
|
||||
__ ld(Rcurrent_obj, in_bytes(BasicObjectLock::obj_offset()), Rcurrent_monitor);
|
||||
// Exit if current entry is for same object; this guarantees, that new monitor
|
||||
// used for recursive lock is above the older one.
|
||||
__ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CCR0, Lexit); // recursive locking
|
||||
__ cmpd(CR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CR0, Lexit); // recursive locking
|
||||
|
||||
__ cmpdi(CCR0, Rcurrent_obj, 0);
|
||||
__ bne(CCR0, LnotFree);
|
||||
__ cmpdi(CR0, Rcurrent_obj, 0);
|
||||
__ bne(CR0, LnotFree);
|
||||
__ mr(Rfree_slot, Rcurrent_monitor); // remember free slot closest to the bottom
|
||||
__ bind(LnotFree);
|
||||
|
||||
__ addi(Rcurrent_monitor, Rcurrent_monitor, frame::interpreter_frame_monitor_size_in_bytes());
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CCR0, Lloop);
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CR0, Lloop);
|
||||
__ bind(Lexit);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------------------
|
||||
// Check if we found a free slot.
|
||||
__ cmpdi(CCR0, Rfree_slot, 0);
|
||||
__ bne(CCR0, Lfound);
|
||||
__ cmpdi(CR0, Rfree_slot, 0);
|
||||
__ bne(CR0, Lfound);
|
||||
|
||||
// We didn't find a free BasicObjLock => allocate one.
|
||||
__ bind(Lallocate_new);
|
||||
@@ -4206,8 +4206,8 @@ void TemplateTable::monitorexit() {
|
||||
__ null_check_throw(Robj_to_lock, -1, Rscratch);
|
||||
|
||||
// Check corner case: unbalanced monitorEnter / Exit.
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CCR0, Lillegal_monitor_state);
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ beq(CR0, Lillegal_monitor_state);
|
||||
|
||||
// Find the corresponding slot in the monitors stack section.
|
||||
{
|
||||
@@ -4216,12 +4216,12 @@ void TemplateTable::monitorexit() {
|
||||
__ bind(Lloop);
|
||||
__ ld(Rcurrent_obj, in_bytes(BasicObjectLock::obj_offset()), Rcurrent_monitor);
|
||||
// Is this entry for same obj?
|
||||
__ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CCR0, Lfound);
|
||||
__ cmpd(CR0, Rcurrent_obj, Robj_to_lock);
|
||||
__ beq(CR0, Lfound);
|
||||
|
||||
__ addi(Rcurrent_monitor, Rcurrent_monitor, frame::interpreter_frame_monitor_size_in_bytes());
|
||||
__ cmpld(CCR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CCR0, Lloop);
|
||||
__ cmpld(CR0, Rcurrent_monitor, Rbot);
|
||||
__ bne(CR0, Lloop);
|
||||
}
|
||||
|
||||
// Fell through without finding the basic obj lock => throw up!
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024 SAP SE. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -91,8 +91,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
// Check offset vs vtable length.
|
||||
const Register vtable_len = R12_scratch2;
|
||||
__ lwz(vtable_len, in_bytes(Klass::vtable_length_offset()), rcvr_klass);
|
||||
__ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
|
||||
__ bge(CCR0, L);
|
||||
__ cmpwi(CR0, vtable_len, vtable_index*vtableEntry::size());
|
||||
__ bge(CR0, L);
|
||||
__ li(R12_scratch2, vtable_index);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
|
||||
__ bind(L);
|
||||
@@ -108,8 +108,8 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label L;
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ bne(CCR0, L);
|
||||
__ cmpdi(CR0, R19_method, 0);
|
||||
__ bne(CR0, L);
|
||||
__ stop("Vtable entry is ZERO");
|
||||
__ bind(L);
|
||||
}
|
||||
@@ -194,8 +194,8 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
|
||||
#ifndef PRODUCT
|
||||
if (DebugVtables) {
|
||||
Label ok;
|
||||
__ cmpdi(CCR0, R19_method, 0);
|
||||
__ bne(CCR0, ok);
|
||||
__ cmpdi(CR0, R19_method, 0);
|
||||
__ bne(CR0, ok);
|
||||
__ stop("method is null");
|
||||
__ bind(ok);
|
||||
}
|
||||
|
||||
@@ -330,7 +330,104 @@ class InternalAddress: public Address {
|
||||
};
|
||||
|
||||
class Assembler : public AbstractAssembler {
|
||||
public:
|
||||
protected:
|
||||
|
||||
static int zfa_zli_lookup_double(uint64_t value) {
|
||||
switch(value) {
|
||||
case 0xbff0000000000000 : return 0;
|
||||
case 0x0010000000000000 : return 1;
|
||||
case 0x3ef0000000000000 : return 2;
|
||||
case 0x3f00000000000000 : return 3;
|
||||
case 0x3f70000000000000 : return 4;
|
||||
case 0x3f80000000000000 : return 5;
|
||||
case 0x3fb0000000000000 : return 6;
|
||||
case 0x3fc0000000000000 : return 7;
|
||||
case 0x3fd0000000000000 : return 8;
|
||||
case 0x3fd4000000000000 : return 9;
|
||||
case 0x3fd8000000000000 : return 10;
|
||||
case 0x3fdc000000000000 : return 11;
|
||||
case 0x3fe0000000000000 : return 12;
|
||||
case 0x3fe4000000000000 : return 13;
|
||||
case 0x3fe8000000000000 : return 14;
|
||||
case 0x3fec000000000000 : return 15;
|
||||
case 0x3ff0000000000000 : return 16;
|
||||
case 0x3ff4000000000000 : return 17;
|
||||
case 0x3ff8000000000000 : return 18;
|
||||
case 0x3ffc000000000000 : return 19;
|
||||
case 0x4000000000000000 : return 20;
|
||||
case 0x4004000000000000 : return 21;
|
||||
case 0x4008000000000000 : return 22;
|
||||
case 0x4010000000000000 : return 23;
|
||||
case 0x4020000000000000 : return 24;
|
||||
case 0x4030000000000000 : return 25;
|
||||
case 0x4060000000000000 : return 26;
|
||||
case 0x4070000000000000 : return 27;
|
||||
case 0x40e0000000000000 : return 28;
|
||||
case 0x40f0000000000000 : return 29;
|
||||
case 0x7ff0000000000000 : return 30;
|
||||
case 0x7ff8000000000000 : return 31;
|
||||
default: break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
static int zfa_zli_lookup_float(uint32_t value) {
|
||||
switch(value) {
|
||||
case 0xbf800000 : return 0;
|
||||
case 0x00800000 : return 1;
|
||||
case 0x37800000 : return 2;
|
||||
case 0x38000000 : return 3;
|
||||
case 0x3b800000 : return 4;
|
||||
case 0x3c000000 : return 5;
|
||||
case 0x3d800000 : return 6;
|
||||
case 0x3e000000 : return 7;
|
||||
case 0x3e800000 : return 8;
|
||||
case 0x3ea00000 : return 9;
|
||||
case 0x3ec00000 : return 10;
|
||||
case 0x3ee00000 : return 11;
|
||||
case 0x3f000000 : return 12;
|
||||
case 0x3f200000 : return 13;
|
||||
case 0x3f400000 : return 14;
|
||||
case 0x3f600000 : return 15;
|
||||
case 0x3f800000 : return 16;
|
||||
case 0x3fa00000 : return 17;
|
||||
case 0x3fc00000 : return 18;
|
||||
case 0x3fe00000 : return 19;
|
||||
case 0x40000000 : return 20;
|
||||
case 0x40200000 : return 21;
|
||||
case 0x40400000 : return 22;
|
||||
case 0x40800000 : return 23;
|
||||
case 0x41000000 : return 24;
|
||||
case 0x41800000 : return 25;
|
||||
case 0x43000000 : return 26;
|
||||
case 0x43800000 : return 27;
|
||||
case 0x47000000 : return 28;
|
||||
case 0x47800000 : return 29;
|
||||
case 0x7f800000 : return 30;
|
||||
case 0x7fc00000 : return 31;
|
||||
default: break;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
static bool can_zfa_zli_float(jfloat f) {
|
||||
if (!UseZfa) {
|
||||
return false;
|
||||
}
|
||||
uint32_t f_bits = (uint32_t)jint_cast(f);
|
||||
return zfa_zli_lookup_float(f_bits) != -1;
|
||||
}
|
||||
|
||||
static bool can_zfa_zli_double(jdouble d) {
|
||||
if (!UseZfa) {
|
||||
return false;
|
||||
}
|
||||
uint64_t d_bits = (uint64_t)julong_cast(d);
|
||||
return zfa_zli_lookup_double(d_bits) != -1;
|
||||
}
|
||||
|
||||
enum {
|
||||
instruction_size = 4,
|
||||
@@ -972,6 +1069,13 @@ enum operand_size { int8, int16, int32, uint32, int64 };
|
||||
fp_base<Fmt, funct5>(Rd->raw_encoding(), Rs1->raw_encoding(), Rs2, (RoundingMode)rm);
|
||||
}
|
||||
|
||||
template <FmtPrecision Fmt, uint8_t funct5>
|
||||
void fp_base(FloatRegister Rd, uint8_t Rs1, uint8_t Rs2, int8_t rm) {
|
||||
guarantee(is_uimm5(Rs1), "Rs1 is out of validity");
|
||||
guarantee(is_uimm5(Rs2), "Rs2 is out of validity");
|
||||
fp_base<Fmt, funct5>(Rd->raw_encoding(), Rs1, Rs2, (RoundingMode)rm);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
enum FClassBits {
|
||||
@@ -1293,6 +1397,18 @@ enum operand_size { int8, int16, int32, uint32, int64 };
|
||||
fp_base<H_16_hp, 0b11100>(Rd, Rs1, 0b00000, 0b000);
|
||||
}
|
||||
|
||||
// -------------- ZFA Instruction Definitions --------------
|
||||
// Zfa Extension for Additional Floating-Point Instructions
|
||||
void _fli_s(FloatRegister Rd, uint8_t Rs1) {
|
||||
assert_cond(UseZfa);
|
||||
fp_base<S_32_sp, 0b11110>(Rd, Rs1, 0b00001, 0b000);
|
||||
}
|
||||
|
||||
void _fli_d(FloatRegister Rd, uint8_t Rs1) {
|
||||
assert_cond(UseZfa);
|
||||
fp_base<D_64_dp, 0b11110>(Rd, Rs1, 0b00001, 0b000);
|
||||
}
|
||||
|
||||
// ==========================
|
||||
// RISC-V Vector Extension
|
||||
// ==========================
|
||||
|
||||
@@ -425,6 +425,8 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
assert(dest->is_register(), "should not call otherwise");
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
address const_addr = nullptr;
|
||||
jfloat fconst;
|
||||
jdouble dconst;
|
||||
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
@@ -460,15 +462,25 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
break;
|
||||
|
||||
case T_FLOAT:
|
||||
const_addr = float_constant(c->as_jfloat());
|
||||
assert(const_addr != nullptr, "must create float constant in the constant table");
|
||||
__ flw(dest->as_float_reg(), InternalAddress(const_addr));
|
||||
fconst = c->as_jfloat();
|
||||
if (MacroAssembler::can_fp_imm_load(fconst)) {
|
||||
__ fli_s(dest->as_float_reg(), fconst);
|
||||
} else {
|
||||
const_addr = float_constant(fconst);
|
||||
assert(const_addr != nullptr, "must create float constant in the constant table");
|
||||
__ flw(dest->as_float_reg(), InternalAddress(const_addr));
|
||||
}
|
||||
break;
|
||||
|
||||
case T_DOUBLE:
|
||||
const_addr = double_constant(c->as_jdouble());
|
||||
assert(const_addr != nullptr, "must create double constant in the constant table");
|
||||
__ fld(dest->as_double_reg(), InternalAddress(const_addr));
|
||||
dconst = c->as_jdouble();
|
||||
if (MacroAssembler::can_dp_imm_load(dconst)) {
|
||||
__ fli_d(dest->as_double_reg(), dconst);
|
||||
} else {
|
||||
const_addr = double_constant(c->as_jdouble());
|
||||
assert(const_addr != nullptr, "must create double constant in the constant table");
|
||||
__ fld(dest->as_double_reg(), InternalAddress(const_addr));
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
|
||||
@@ -1409,6 +1409,14 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
load_chr_insn str1_load_chr = str1_isL ? (load_chr_insn)&MacroAssembler::lbu : (load_chr_insn)&MacroAssembler::lhu;
|
||||
load_chr_insn str2_load_chr = str2_isL ? (load_chr_insn)&MacroAssembler::lbu : (load_chr_insn)&MacroAssembler::lhu;
|
||||
|
||||
int base_offset1 = arrayOopDesc::base_offset_in_bytes(T_BYTE);
|
||||
int base_offset2 = arrayOopDesc::base_offset_in_bytes(T_CHAR);
|
||||
|
||||
assert((base_offset1 % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
assert((base_offset2 % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
BLOCK_COMMENT("string_compare {");
|
||||
|
||||
// Bizarrely, the counts are passed in bytes, regardless of whether they
|
||||
@@ -1426,6 +1434,24 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
mv(cnt2, cnt1);
|
||||
bind(L);
|
||||
|
||||
// Load 4 bytes once to compare for alignment before main loop. Note that this
|
||||
// is only possible for LL/UU case. We need to resort to load_long_misaligned
|
||||
// for both LU and UL cases.
|
||||
if (str1_isL == str2_isL) { // LL or UU
|
||||
beq(str1, str2, DONE);
|
||||
int base_offset = isLL ? base_offset1 : base_offset2;
|
||||
if (AvoidUnalignedAccesses && (base_offset % 8) != 0) {
|
||||
mv(t0, minCharsInWord / 2);
|
||||
ble(cnt2, t0, SHORT_STRING);
|
||||
lwu(tmp1, Address(str1));
|
||||
lwu(tmp2, Address(str2));
|
||||
bne(tmp1, tmp2, DIFFERENCE);
|
||||
addi(str1, str1, 4);
|
||||
addi(str2, str2, 4);
|
||||
subi(cnt2, cnt2, minCharsInWord / 2);
|
||||
}
|
||||
}
|
||||
|
||||
// A very short string
|
||||
mv(t0, minCharsInWord);
|
||||
ble(cnt2, t0, SHORT_STRING);
|
||||
@@ -1434,8 +1460,14 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
// load first parts of strings and finish initialization while loading
|
||||
{
|
||||
if (str1_isL == str2_isL) { // LL or UU
|
||||
// check if str1 and str2 is same pointer
|
||||
beq(str1, str2, DONE);
|
||||
#ifdef ASSERT
|
||||
Label align_ok;
|
||||
orr(t0, str1, str2);
|
||||
andi(t0, t0, 0x7);
|
||||
beqz(t0, align_ok);
|
||||
stop("bad alignment");
|
||||
bind(align_ok);
|
||||
#endif
|
||||
// load 8 bytes once to compare
|
||||
ld(tmp1, Address(str1));
|
||||
ld(tmp2, Address(str2));
|
||||
@@ -1452,7 +1484,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
sub(cnt2, zr, cnt2);
|
||||
} else if (isLU) { // LU case
|
||||
lwu(tmp1, Address(str1));
|
||||
ld(tmp2, Address(str2));
|
||||
load_long_misaligned(tmp2, Address(str2), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
mv(t0, STUB_THRESHOLD);
|
||||
bge(cnt2, t0, STUB);
|
||||
subi(cnt2, cnt2, 4);
|
||||
@@ -1465,11 +1497,11 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
sub(cnt2, zr, cnt2);
|
||||
addi(cnt1, cnt1, 4);
|
||||
} else { // UL case
|
||||
ld(tmp1, Address(str1));
|
||||
load_long_misaligned(tmp1, Address(str1), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
lwu(tmp2, Address(str2));
|
||||
mv(t0, STUB_THRESHOLD);
|
||||
bge(cnt2, t0, STUB);
|
||||
addi(cnt2, cnt2, -4);
|
||||
subi(cnt2, cnt2, 4);
|
||||
slli(t0, cnt2, 1);
|
||||
sub(cnt1, zr, t0);
|
||||
add(str1, str1, t0);
|
||||
@@ -1486,6 +1518,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
// main loop
|
||||
bind(NEXT_WORD);
|
||||
if (str1_isL == str2_isL) { // LL or UU
|
||||
// both of the two loads are 8-byte aligned
|
||||
add(t0, str1, cnt2);
|
||||
ld(tmp1, Address(t0));
|
||||
add(t0, str2, cnt2);
|
||||
@@ -1495,7 +1528,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
add(t0, str1, cnt1);
|
||||
lwu(tmp1, Address(t0));
|
||||
add(t0, str2, cnt2);
|
||||
ld(tmp2, Address(t0));
|
||||
load_long_misaligned(tmp2, Address(t0), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
addi(cnt1, cnt1, 4);
|
||||
inflate_lo32(tmp3, tmp1);
|
||||
mv(tmp1, tmp3);
|
||||
@@ -1504,7 +1537,7 @@ void C2_MacroAssembler::string_compare(Register str1, Register str2,
|
||||
add(t0, str2, cnt2);
|
||||
lwu(tmp2, Address(t0));
|
||||
add(t0, str1, cnt1);
|
||||
ld(tmp1, Address(t0));
|
||||
load_long_misaligned(tmp1, Address(t0), tmp3, (base_offset2 % 8) != 0 ? 4 : 8);
|
||||
inflate_lo32(tmp3, tmp2);
|
||||
mv(tmp2, tmp3);
|
||||
addi(cnt1, cnt1, 8);
|
||||
@@ -1637,6 +1670,9 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
|
||||
int length_offset = arrayOopDesc::length_offset_in_bytes();
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
|
||||
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
Register cnt1 = tmp3;
|
||||
Register cnt2 = tmp1; // cnt2 only used in array length compare
|
||||
Label DONE, SAME, NEXT_WORD, SHORT, TAIL03, TAIL01;
|
||||
@@ -1660,10 +1696,31 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
|
||||
|
||||
la(a1, Address(a1, base_offset));
|
||||
la(a2, Address(a2, base_offset));
|
||||
|
||||
// Load 4 bytes once to compare for alignment before main loop.
|
||||
if (AvoidUnalignedAccesses && (base_offset % 8) != 0) {
|
||||
subi(cnt1, cnt1, elem_per_word / 2);
|
||||
bltz(cnt1, TAIL03);
|
||||
lwu(tmp1, Address(a1));
|
||||
lwu(tmp2, Address(a2));
|
||||
addi(a1, a1, 4);
|
||||
addi(a2, a2, 4);
|
||||
bne(tmp1, tmp2, DONE);
|
||||
}
|
||||
|
||||
// Check for short strings, i.e. smaller than wordSize.
|
||||
subi(cnt1, cnt1, elem_per_word);
|
||||
bltz(cnt1, SHORT);
|
||||
|
||||
#ifdef ASSERT
|
||||
Label align_ok;
|
||||
orr(t0, a1, a2);
|
||||
andi(t0, t0, 0x7);
|
||||
beqz(t0, align_ok);
|
||||
stop("bad alignment");
|
||||
bind(align_ok);
|
||||
#endif
|
||||
|
||||
// Main 8 byte comparison loop.
|
||||
bind(NEXT_WORD); {
|
||||
ld(tmp1, Address(a1));
|
||||
@@ -1729,20 +1786,45 @@ void C2_MacroAssembler::arrays_equals(Register a1, Register a2,
|
||||
void C2_MacroAssembler::string_equals(Register a1, Register a2,
|
||||
Register result, Register cnt1)
|
||||
{
|
||||
Label SAME, DONE, SHORT, NEXT_WORD;
|
||||
Label SAME, DONE, SHORT, NEXT_WORD, TAIL03, TAIL01;
|
||||
Register tmp1 = t0;
|
||||
Register tmp2 = t1;
|
||||
|
||||
assert_different_registers(a1, a2, result, cnt1, tmp1, tmp2);
|
||||
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(T_BYTE);
|
||||
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
BLOCK_COMMENT("string_equals {");
|
||||
|
||||
mv(result, false);
|
||||
|
||||
// Load 4 bytes once to compare for alignment before main loop.
|
||||
if (AvoidUnalignedAccesses && (base_offset % 8) != 0) {
|
||||
subi(cnt1, cnt1, 4);
|
||||
bltz(cnt1, TAIL03);
|
||||
lwu(tmp1, Address(a1));
|
||||
lwu(tmp2, Address(a2));
|
||||
addi(a1, a1, 4);
|
||||
addi(a2, a2, 4);
|
||||
bne(tmp1, tmp2, DONE);
|
||||
}
|
||||
|
||||
// Check for short strings, i.e. smaller than wordSize.
|
||||
subi(cnt1, cnt1, wordSize);
|
||||
bltz(cnt1, SHORT);
|
||||
|
||||
#ifdef ASSERT
|
||||
Label align_ok;
|
||||
orr(t0, a1, a2);
|
||||
andi(t0, t0, 0x7);
|
||||
beqz(t0, align_ok);
|
||||
stop("bad alignment");
|
||||
bind(align_ok);
|
||||
#endif
|
||||
|
||||
// Main 8 byte comparison loop.
|
||||
bind(NEXT_WORD); {
|
||||
ld(tmp1, Address(a1));
|
||||
@@ -1757,8 +1839,6 @@ void C2_MacroAssembler::string_equals(Register a1, Register a2,
|
||||
beqz(tmp1, SAME);
|
||||
|
||||
bind(SHORT);
|
||||
Label TAIL03, TAIL01;
|
||||
|
||||
// 0-7 bytes left.
|
||||
test_bit(tmp1, cnt1, 2);
|
||||
beqz(tmp1, TAIL03);
|
||||
@@ -2512,6 +2592,9 @@ void C2_MacroAssembler::arrays_equals_v(Register a1, Register a2, Register resul
|
||||
int length_offset = arrayOopDesc::length_offset_in_bytes();
|
||||
int base_offset = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE);
|
||||
|
||||
assert((base_offset % (UseCompactObjectHeaders ? 4 :
|
||||
(UseCompressedClassPointers ? 8 : 4))) == 0, "Must be");
|
||||
|
||||
BLOCK_COMMENT("arrays_equals_v {");
|
||||
|
||||
// if (a1 == a2), return true
|
||||
|
||||
@@ -103,6 +103,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
|
||||
product(bool, UseZba, false, DIAGNOSTIC, "Use Zba instructions") \
|
||||
product(bool, UseZbb, false, DIAGNOSTIC, "Use Zbb instructions") \
|
||||
product(bool, UseZbs, false, DIAGNOSTIC, "Use Zbs instructions") \
|
||||
product(bool, UseZfa, false, EXPERIMENTAL, "Use Zfa instructions") \
|
||||
product(bool, UseZfh, false, DIAGNOSTIC, "Use Zfh instructions") \
|
||||
product(bool, UseZfhmin, false, DIAGNOSTIC, "Use Zfhmin instructions") \
|
||||
product(bool, UseZacas, false, EXPERIMENTAL, "Use Zacas instructions") \
|
||||
|
||||
@@ -2593,6 +2593,45 @@ void MacroAssembler::movptr2(Register Rd, uint64_t addr, int32_t &offset, Regist
|
||||
offset = lower12;
|
||||
}
|
||||
|
||||
// floating point imm move
|
||||
bool MacroAssembler::can_fp_imm_load(float imm) {
|
||||
jint f_bits = jint_cast(imm);
|
||||
if (f_bits == 0) {
|
||||
return true;
|
||||
}
|
||||
return can_zfa_zli_float(imm);
|
||||
}
|
||||
|
||||
bool MacroAssembler::can_dp_imm_load(double imm) {
|
||||
julong d_bits = julong_cast(imm);
|
||||
if (d_bits == 0) {
|
||||
return true;
|
||||
}
|
||||
return can_zfa_zli_double(imm);
|
||||
}
|
||||
|
||||
void MacroAssembler::fli_s(FloatRegister Rd, float imm) {
|
||||
jint f_bits = jint_cast(imm);
|
||||
if (f_bits == 0) {
|
||||
fmv_w_x(Rd, zr);
|
||||
return;
|
||||
}
|
||||
int Rs = zfa_zli_lookup_float(f_bits);
|
||||
assert(Rs != -1, "Must be");
|
||||
_fli_s(Rd, Rs);
|
||||
}
|
||||
|
||||
void MacroAssembler::fli_d(FloatRegister Rd, double imm) {
|
||||
uint64_t d_bits = (uint64_t)julong_cast(imm);
|
||||
if (d_bits == 0) {
|
||||
fmv_d_x(Rd, zr);
|
||||
return;
|
||||
}
|
||||
int Rs = zfa_zli_lookup_double(d_bits);
|
||||
assert(Rs != -1, "Must be");
|
||||
_fli_d(Rd, Rs);
|
||||
}
|
||||
|
||||
void MacroAssembler::add(Register Rd, Register Rn, int64_t increment, Register tmp) {
|
||||
if (is_simm12(increment)) {
|
||||
addi(Rd, Rn, increment);
|
||||
@@ -4268,7 +4307,7 @@ void MacroAssembler::population_count(Register dst, Register src,
|
||||
{
|
||||
bind(loop);
|
||||
addi(dst, dst, 1);
|
||||
addi(tmp2, tmp1, -1);
|
||||
subi(tmp2, tmp1, 1);
|
||||
andr(tmp1, tmp1, tmp2);
|
||||
bnez(tmp1, loop);
|
||||
}
|
||||
|
||||
@@ -920,6 +920,11 @@ public:
|
||||
void movptr1(Register Rd, uintptr_t addr, int32_t &offset);
|
||||
void movptr2(Register Rd, uintptr_t addr, int32_t &offset, Register tmp);
|
||||
public:
|
||||
// float imm move
|
||||
static bool can_fp_imm_load(float imm);
|
||||
static bool can_dp_imm_load(double imm);
|
||||
void fli_s(FloatRegister Rd, float imm);
|
||||
void fli_d(FloatRegister Rd, double imm);
|
||||
|
||||
// arith
|
||||
void add (Register Rd, Register Rn, int64_t increment, Register tmp = t0);
|
||||
|
||||
@@ -4920,7 +4920,11 @@ instruct loadConF(fRegF dst, immF con) %{
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
if (MacroAssembler::can_fp_imm_load($con$$constant)) {
|
||||
__ fli_s(as_FloatRegister($dst$$reg), $con$$constant);
|
||||
} else {
|
||||
__ flw(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(fp_load_constant_s);
|
||||
@@ -4950,7 +4954,11 @@ instruct loadConD(fRegD dst, immD con) %{
|
||||
%}
|
||||
|
||||
ins_encode %{
|
||||
__ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
if (MacroAssembler::can_dp_imm_load($con$$constant)) {
|
||||
__ fli_d(as_FloatRegister($dst$$reg), $con$$constant);
|
||||
} else {
|
||||
__ fld(as_FloatRegister($dst$$reg), $constantaddress($con));
|
||||
}
|
||||
%}
|
||||
|
||||
ins_pipe(fp_load_constant_d);
|
||||
|
||||
87
src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp
Normal file
87
src/hotspot/cpu/riscv/stubDeclarations_riscv.hpp
Normal file
@@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
#define CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 10000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 45000) \
|
||||
do_stub(compiler, compare_long_string_LL) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_LL, \
|
||||
compare_long_string_LL, compare_long_string_LL) \
|
||||
do_stub(compiler, compare_long_string_UU) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_UU, \
|
||||
compare_long_string_UU, compare_long_string_UU) \
|
||||
do_stub(compiler, compare_long_string_LU) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_LU, \
|
||||
compare_long_string_LU, compare_long_string_LU) \
|
||||
do_stub(compiler, compare_long_string_UL) \
|
||||
do_arch_entry(riscv, compiler, compare_long_string_UL, \
|
||||
compare_long_string_UL, compare_long_string_UL) \
|
||||
do_stub(compiler, string_indexof_linear_ll) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_ll, \
|
||||
string_indexof_linear_ll, string_indexof_linear_ll) \
|
||||
do_stub(compiler, string_indexof_linear_uu) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_uu, \
|
||||
string_indexof_linear_uu, string_indexof_linear_uu) \
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_arch_entry(riscv, compiler, string_indexof_linear_ul, \
|
||||
string_indexof_linear_ul, string_indexof_linear_ul) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 20000 ZGC_ONLY(+10000)) \
|
||||
do_stub(final, copy_byte_f) \
|
||||
do_arch_entry(riscv, final, copy_byte_f, copy_byte_f, \
|
||||
copy_byte_f) \
|
||||
do_stub(final, copy_byte_b) \
|
||||
do_arch_entry(riscv, final, copy_byte_b, copy_byte_b, \
|
||||
copy_byte_b) \
|
||||
do_stub(final, zero_blocks) \
|
||||
do_arch_entry(riscv, final, zero_blocks, zero_blocks, \
|
||||
zero_blocks) \
|
||||
|
||||
|
||||
#endif // CPU_RISCV_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2014, 2025, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -33,14 +33,19 @@
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
address StubRoutines::riscv::_zero_blocks = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_LL = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_UU = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_LU = nullptr;
|
||||
address StubRoutines::riscv::_compare_long_string_UL = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ll = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_uu = nullptr;
|
||||
address StubRoutines::riscv::_string_indexof_linear_ul = nullptr;
|
||||
|
||||
// define fields for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
|
||||
bool StubRoutines::riscv::_completed = false;
|
||||
|
||||
|
||||
@@ -35,63 +35,53 @@ static bool returns_to_call_stub(address return_pc) {
|
||||
return return_pc == _call_stub_return_address;
|
||||
}
|
||||
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 10000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 45000,
|
||||
_final_stubs_code_size = 20000 ZGC_ONLY(+10000)
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
class riscv {
|
||||
friend class StubGenerator;
|
||||
#if INCLUDE_JVMCI
|
||||
friend class JVMCIVMStructs;
|
||||
#endif
|
||||
|
||||
private:
|
||||
static address _zero_blocks;
|
||||
// declare fields for arch-specific entries
|
||||
|
||||
static address _compare_long_string_LL;
|
||||
static address _compare_long_string_LU;
|
||||
static address _compare_long_string_UL;
|
||||
static address _compare_long_string_UU;
|
||||
static address _string_indexof_linear_ll;
|
||||
static address _string_indexof_linear_uu;
|
||||
static address _string_indexof_linear_ul;
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
|
||||
static bool _completed;
|
||||
|
||||
public:
|
||||
|
||||
static address zero_blocks() {
|
||||
return _zero_blocks;
|
||||
}
|
||||
// declare getters for arch-specific entries
|
||||
|
||||
static address compare_long_string_LL() {
|
||||
return _compare_long_string_LL;
|
||||
}
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
|
||||
static address compare_long_string_LU() {
|
||||
return _compare_long_string_LU;
|
||||
}
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
static address compare_long_string_UL() {
|
||||
return _compare_long_string_UL;
|
||||
}
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
|
||||
static address compare_long_string_UU() {
|
||||
return _compare_long_string_UU;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ul() {
|
||||
return _string_indexof_linear_ul;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_ll() {
|
||||
return _string_indexof_linear_ll;
|
||||
}
|
||||
|
||||
static address string_indexof_linear_uu() {
|
||||
return _string_indexof_linear_uu;
|
||||
}
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
|
||||
static bool complete() {
|
||||
return _completed;
|
||||
|
||||
@@ -157,6 +157,7 @@ class VM_Version : public Abstract_VM_Version {
|
||||
decl(ext_Zbc , "Zbc" , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
decl(ext_Zbs , "Zbs" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZbs)) \
|
||||
decl(ext_Zcb , "Zcb" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZcb)) \
|
||||
decl(ext_Zfa , "Zfa" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfa)) \
|
||||
decl(ext_Zfh , "Zfh" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfh)) \
|
||||
decl(ext_Zfhmin , "Zfhmin" , RV_NO_FLAG_BIT, true , UPDATE_DEFAULT(UseZfhmin)) \
|
||||
decl(ext_Zicsr , "Zicsr" , RV_NO_FLAG_BIT, true , NO_UPDATE_DEFAULT) \
|
||||
@@ -226,6 +227,7 @@ class VM_Version : public Abstract_VM_Version {
|
||||
RV_ENABLE_EXTENSION(UseZbb) \
|
||||
RV_ENABLE_EXTENSION(UseZbs) \
|
||||
RV_ENABLE_EXTENSION(UseZcb) \
|
||||
RV_ENABLE_EXTENSION(UseZfa) \
|
||||
RV_ENABLE_EXTENSION(UseZfhmin) \
|
||||
RV_ENABLE_EXTENSION(UseZic64b) \
|
||||
RV_ENABLE_EXTENSION(UseZicbom) \
|
||||
|
||||
60
src/hotspot/cpu/s390/stubDeclarations_s390.hpp
Normal file
60
src/hotspot/cpu/s390/stubDeclarations_s390.hpp
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_S390_STUBDECLARATIONS_HPP
|
||||
#define CPU_S390_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 2000) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 20000 ) \
|
||||
do_stub(compiler, partial_subtype_check) \
|
||||
do_arch_entry(zarch, compiler, partial_subtype_check, \
|
||||
partial_subtype_check, partial_subtype_check) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 20000) \
|
||||
|
||||
|
||||
#endif // CPU_S390_STUBDECLARATIONS_HPP
|
||||
@@ -118,7 +118,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Set up a new C frame, copy Java arguments, call frame manager
|
||||
// or native_entry, and process result.
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "call_stub");
|
||||
StubGenStubId stub_id = StubGenStubId::call_stub_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Register r_arg_call_wrapper_addr = Z_ARG1;
|
||||
@@ -458,7 +459,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// pending exception stored in JavaThread that can be tested from
|
||||
// within the VM.
|
||||
address generate_catch_exception() {
|
||||
StubCodeMark mark(this, "StubRoutines", "catch_exception");
|
||||
StubGenStubId stub_id = StubGenStubId::catch_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -509,7 +511,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// (Z_R14 is unchanged and is live out).
|
||||
//
|
||||
address generate_forward_exception() {
|
||||
StubCodeMark mark(this, "StubRoutines", "forward_exception");
|
||||
StubGenStubId stub_id = StubGenStubId::forward_exception_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
#define pending_exception_offset in_bytes(Thread::pending_exception_offset())
|
||||
@@ -589,7 +592,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// raddr: Z_R14, blown by call
|
||||
//
|
||||
address generate_partial_subtype_check() {
|
||||
StubCodeMark mark(this, "StubRoutines", "partial_subtype_check");
|
||||
StubGenStubId stub_id = StubGenStubId::partial_subtype_check_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
Label miss;
|
||||
|
||||
address start = __ pc();
|
||||
@@ -621,8 +625,9 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
address generate_lookup_secondary_supers_table_stub(u1 super_klass_index) {
|
||||
StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table");
|
||||
void generate_lookup_secondary_supers_table_stub() {
|
||||
StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
const Register
|
||||
r_super_klass = Z_ARG1,
|
||||
@@ -632,20 +637,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
r_array_base = Z_ARG5,
|
||||
r_bitmap = Z_R10,
|
||||
r_result = Z_R11;
|
||||
address start = __ pc();
|
||||
for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
|
||||
StubRoutines::_lookup_secondary_supers_table_stubs[slot] = __ pc();
|
||||
__ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass,
|
||||
r_array_base, r_array_length, r_array_index,
|
||||
r_bitmap, r_result, slot);
|
||||
|
||||
__ lookup_secondary_supers_table_const(r_sub_klass, r_super_klass,
|
||||
r_array_base, r_array_length, r_array_index,
|
||||
r_bitmap, r_result, super_klass_index);
|
||||
|
||||
__ z_br(Z_R14);
|
||||
|
||||
return start;
|
||||
__ z_br(Z_R14);
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path implementation for UseSecondarySupersTable.
|
||||
address generate_lookup_secondary_supers_table_slow_path_stub() {
|
||||
StubCodeMark mark(this, "StubRoutines", "lookup_secondary_supers_table_slow_path");
|
||||
StubGenStubId stub_id = StubGenStubId::lookup_secondary_supers_table_slow_path_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -1260,51 +1265,75 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
// Generate stub for disjoint byte copy. If "aligned" is true, the
|
||||
// "from" and "to" addresses are assumed to be heapword aligned.
|
||||
address generate_disjoint_byte_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
|
||||
// This is the zarch specific stub generator for byte array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
address generate_disjoint_nonoop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
int element_size;
|
||||
switch (stub_id) {
|
||||
case jbyte_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 1;
|
||||
break;
|
||||
case arrayof_jbyte_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 1;
|
||||
break;
|
||||
case jshort_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 2;
|
||||
break;
|
||||
case arrayof_jshort_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 2;
|
||||
break;
|
||||
case jint_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 4;
|
||||
break;
|
||||
case arrayof_jint_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 4;
|
||||
break;
|
||||
case jlong_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
element_size = 8;
|
||||
break;
|
||||
case arrayof_jlong_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
element_size = 8;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 1, false, false);
|
||||
generate_disjoint_copy(aligned, element_size, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_short_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for short array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 2, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_int_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for int array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 4, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_long_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for long array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
generate_disjoint_copy(aligned, 8, false, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_disjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address generate_disjoint_oop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case oop_disjoint_arraycopy_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case arrayof_oop_disjoint_arraycopy_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_disjoint_arraycopy_uninit_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
case arrayof_oop_disjoint_arraycopy_uninit_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
// This is the zarch specific stub generator for oop array copy.
|
||||
// Refer to generate_disjoint_copy for a list of prereqs and features.
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
@@ -1328,77 +1357,96 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_conjoint_byte_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping byte array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jbyte_disjoint_arraycopy()
|
||||
: StubRoutines::jbyte_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 0); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 1, false);
|
||||
|
||||
address generate_conjoint_nonoop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
int shift; // i.e. log2(element size)
|
||||
address nooverlap_target;
|
||||
switch (stub_id) {
|
||||
case jbyte_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 0;
|
||||
nooverlap_target = StubRoutines::jbyte_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jbyte_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 0;
|
||||
nooverlap_target = StubRoutines::arrayof_jbyte_disjoint_arraycopy();
|
||||
break;
|
||||
case jshort_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 1;
|
||||
nooverlap_target = StubRoutines::jshort_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jshort_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 1;
|
||||
nooverlap_target = StubRoutines::arrayof_jshort_disjoint_arraycopy();
|
||||
break;
|
||||
case jint_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 2;
|
||||
nooverlap_target = StubRoutines::jint_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jint_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 2;
|
||||
nooverlap_target = StubRoutines::arrayof_jint_disjoint_arraycopy();
|
||||
break;
|
||||
case jlong_arraycopy_id:
|
||||
aligned = false;
|
||||
shift = 3;
|
||||
nooverlap_target = StubRoutines::jlong_disjoint_arraycopy();
|
||||
break;
|
||||
case arrayof_jlong_arraycopy_id:
|
||||
aligned = true;
|
||||
shift = 3;
|
||||
nooverlap_target = StubRoutines::arrayof_jlong_disjoint_arraycopy();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 1 << shift, false);
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
|
||||
address generate_conjoint_short_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping short array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jshort_disjoint_arraycopy()
|
||||
: StubRoutines::jshort_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 1); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 2, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_int_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping int array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jint_disjoint_arraycopy()
|
||||
: StubRoutines::jint_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 2); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 4, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_long_copy(bool aligned, const char * name) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
// This is the zarch specific stub generator for overlapping long array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features:
|
||||
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_jlong_disjoint_arraycopy()
|
||||
: StubRoutines::jlong_disjoint_arraycopy();
|
||||
|
||||
array_overlap_test(nooverlap_target, 3); // Branch away to nooverlap_target if disjoint.
|
||||
generate_conjoint_copy(aligned, 8, false);
|
||||
|
||||
return __ addr_at(start_off);
|
||||
}
|
||||
|
||||
address generate_conjoint_oop_copy(bool aligned, const char * name, bool dest_uninitialized) {
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
address generate_conjoint_oop_copy(StubGenStubId stub_id) {
|
||||
bool aligned;
|
||||
bool dest_uninitialized;
|
||||
address nooverlap_target;
|
||||
switch (stub_id) {
|
||||
case oop_arraycopy_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = false;
|
||||
nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
case arrayof_oop_arraycopy_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = false;
|
||||
nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
case oop_arraycopy_uninit_id:
|
||||
aligned = false;
|
||||
dest_uninitialized = true;
|
||||
nooverlap_target = StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
case arrayof_oop_arraycopy_uninit_id:
|
||||
aligned = true;
|
||||
dest_uninitialized = true;
|
||||
nooverlap_target = StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
StubCodeMark mark(this, stub_id);
|
||||
// This is the zarch specific stub generator for overlapping oop array copy.
|
||||
// Refer to generate_conjoint_copy for a list of prereqs and features.
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
unsigned int size = UseCompressedOops ? 4 : 8;
|
||||
unsigned int shift = UseCompressedOops ? 2 : 3;
|
||||
|
||||
address nooverlap_target = aligned ? StubRoutines::arrayof_oop_disjoint_arraycopy(dest_uninitialized)
|
||||
: StubRoutines::oop_disjoint_arraycopy(dest_uninitialized);
|
||||
|
||||
// Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
|
||||
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
|
||||
|
||||
@@ -1425,33 +1473,33 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Note: the disjoint stubs must be generated first, some of
|
||||
// the conjoint stubs use them.
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (false, "jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy");
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy (false, "jint_disjoint_arraycopy");
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy (false, "jlong_disjoint_arraycopy");
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy", false);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (false, "oop_disjoint_arraycopy_uninit", true);
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_id);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::oop_disjoint_arraycopy_uninit_id);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy (true, "arrayof_jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy (true, "arrayof_jint_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy (true, "arrayof_jlong_disjoint_arraycopy");
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy", false);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (true, "arrayof_oop_disjoint_arraycopy_uninit", true);
|
||||
StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jbyte_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_nonoop_copy(StubGenStubId::arrayof_jshort_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jint_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_nonoop_copy (StubGenStubId::arrayof_jlong_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy (StubGenStubId::arrayof_oop_disjoint_arraycopy_uninit_id);
|
||||
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy (false, "jbyte_arraycopy");
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy");
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_copy (false, "jint_arraycopy");
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy (false, "jlong_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy (false, "oop_arraycopy", false);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy (false, "oop_arraycopy_uninit", true);
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jbyte_arraycopy_id);
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jshort_arraycopy_id);
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jint_arraycopy_id);
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::jlong_arraycopy_id);
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_id);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::oop_arraycopy_uninit_id);
|
||||
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy (true, "arrayof_jbyte_arraycopy");
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy");
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy (true, "arrayof_jint_arraycopy");
|
||||
StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy (true, "arrayof_jlong_arraycopy");
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy", false);
|
||||
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy (true, "arrayof_oop_arraycopy_uninit", true);
|
||||
StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jbyte_arraycopy_id);
|
||||
StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jshort_arraycopy_id);
|
||||
StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_nonoop_copy (StubGenStubId::arrayof_jint_arraycopy_id);
|
||||
StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_nonoop_copy(StubGenStubId::arrayof_jlong_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_id);
|
||||
StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(StubGenStubId::arrayof_oop_arraycopy_uninit_id);
|
||||
}
|
||||
|
||||
// Call interface for AES_encryptBlock, AES_decryptBlock stubs.
|
||||
@@ -1733,9 +1781,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute AES encrypt function.
|
||||
address generate_AES_encryptBlock(const char* name) {
|
||||
address generate_AES_encryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlock(false);
|
||||
@@ -1744,9 +1793,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute AES decrypt function.
|
||||
address generate_AES_decryptBlock(const char* name) {
|
||||
address generate_AES_decryptBlock() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlock(true);
|
||||
@@ -1804,9 +1854,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute chained AES encrypt function.
|
||||
address generate_cipherBlockChaining_AES_encrypt(const char* name) {
|
||||
address generate_cipherBlockChaining_AES_encrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlockChaining(false);
|
||||
@@ -1815,9 +1866,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute chained AES decrypt function.
|
||||
address generate_cipherBlockChaining_AES_decrypt(const char* name) {
|
||||
address generate_cipherBlockChaining_AES_decrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_AES_cipherBlockChaining(true);
|
||||
@@ -2521,9 +2573,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Compute AES-CTR crypto function.
|
||||
// Encrypt or decrypt is selected via parameters. Only one stub is necessary.
|
||||
address generate_counterMode_AESCrypt(const char* name) {
|
||||
address generate_counterMode_AESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
generate_counterMode_AES(false);
|
||||
@@ -2536,7 +2589,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Compute GHASH function.
|
||||
address generate_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register state = Z_ARG1;
|
||||
@@ -2613,9 +2667,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// provides for a large enough source data buffer.
|
||||
//
|
||||
// Compute SHA-1 function.
|
||||
address generate_SHA1_stub(bool multiBlock, const char* name) {
|
||||
address generate_SHA1_stub(StubGenStubId stub_id) {
|
||||
bool multiBlock;
|
||||
switch (stub_id) {
|
||||
case sha1_implCompress_id:
|
||||
multiBlock = false;
|
||||
break;
|
||||
case sha1_implCompressMB_id:
|
||||
multiBlock = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register srcBuff = Z_ARG1; // Points to first block to process (offset already added).
|
||||
@@ -2695,9 +2760,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute SHA-256 function.
|
||||
address generate_SHA256_stub(bool multiBlock, const char* name) {
|
||||
address generate_SHA256_stub(StubGenStubId stub_id) {
|
||||
bool multiBlock;
|
||||
switch (stub_id) {
|
||||
case sha256_implCompress_id:
|
||||
multiBlock = false;
|
||||
break;
|
||||
case sha256_implCompressMB_id:
|
||||
multiBlock = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register srcBuff = Z_ARG1;
|
||||
@@ -2775,9 +2851,20 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
|
||||
// Compute SHA-512 function.
|
||||
address generate_SHA512_stub(bool multiBlock, const char* name) {
|
||||
address generate_SHA512_stub(StubGenStubId stub_id) {
|
||||
bool multiBlock;
|
||||
switch (stub_id) {
|
||||
case sha512_implCompress_id:
|
||||
multiBlock = false;
|
||||
break;
|
||||
case sha512_implCompressMB_id:
|
||||
multiBlock = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
const Register srcBuff = Z_ARG1;
|
||||
@@ -2867,7 +2954,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
* Z_RET - int crc result
|
||||
**/
|
||||
// Compute CRC function (generic, for all polynomials).
|
||||
void generate_CRC_updateBytes(const char* name, Register table, bool invertCRC) {
|
||||
void generate_CRC_updateBytes(Register table, bool invertCRC) {
|
||||
|
||||
// arguments to kernel_crc32:
|
||||
Register crc = Z_ARG1; // Current checksum, preset by caller or result from previous call, int.
|
||||
@@ -2898,18 +2985,19 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
|
||||
// Compute CRC32 function.
|
||||
address generate_CRC32_updateBytes(const char* name) {
|
||||
address generate_CRC32_updateBytes() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::updateBytesCRC32_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", name);
|
||||
assert(UseCRC32Intrinsics, "should not generate this stub (%s) with CRC32 intrinsics disabled", StubRoutines::get_stub_name(stub_id));
|
||||
|
||||
BLOCK_COMMENT("CRC32_updateBytes {");
|
||||
Register table = Z_ARG4; // crc32 table address.
|
||||
StubRoutines::zarch::generate_load_crc_table_addr(_masm, table);
|
||||
|
||||
generate_CRC_updateBytes(name, table, true);
|
||||
generate_CRC_updateBytes(table, true);
|
||||
BLOCK_COMMENT("} CRC32_updateBytes");
|
||||
|
||||
return __ addr_at(start_off);
|
||||
@@ -2917,18 +3005,19 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
|
||||
// Compute CRC32C function.
|
||||
address generate_CRC32C_updateBytes(const char* name) {
|
||||
address generate_CRC32C_updateBytes() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::updateBytesCRC32C_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
|
||||
|
||||
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", name);
|
||||
assert(UseCRC32CIntrinsics, "should not generate this stub (%s) with CRC32C intrinsics disabled", StubRoutines::get_stub_name(stub_id));
|
||||
|
||||
BLOCK_COMMENT("CRC32C_updateBytes {");
|
||||
Register table = Z_ARG4; // crc32c table address.
|
||||
StubRoutines::zarch::generate_load_crc32c_table_addr(_masm, table);
|
||||
|
||||
generate_CRC_updateBytes(name, table, false);
|
||||
generate_CRC_updateBytes(table, false);
|
||||
BLOCK_COMMENT("} CRC32C_updateBytes");
|
||||
|
||||
return __ addr_at(start_off);
|
||||
@@ -2943,7 +3032,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Z_ARG5 - z address
|
||||
address generate_multiplyToLen() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "multiplyToLen");
|
||||
StubGenStubId stub_id = StubGenStubId::multiplyToLen_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -2974,7 +3064,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
address generate_method_entry_barrier() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "nmethod_entry_barrier");
|
||||
StubGenStubId stub_id = StubGenStubId::method_entry_barrier_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
address start = __ pc();
|
||||
|
||||
@@ -3039,7 +3130,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// exception handler for upcall stubs
|
||||
address generate_upcall_stub_exception_handler() {
|
||||
StubCodeMark mark(this, "StubRoutines", "upcall stub exception handler");
|
||||
StubGenStubId stub_id = StubGenStubId::upcall_stub_exception_handler_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// Native caller has no idea how to handle exceptions,
|
||||
@@ -3056,7 +3148,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// Z_ARG1 = jobject receiver
|
||||
// Z_method = Method* result
|
||||
address generate_upcall_stub_load_target() {
|
||||
StubCodeMark mark(this, "StubRoutines", "upcall_stub_load_target");
|
||||
StubGenStubId stub_id = StubGenStubId::upcall_stub_load_target_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
__ resolve_global_jobject(Z_ARG1, Z_tmp_1, Z_tmp_2);
|
||||
@@ -3093,12 +3186,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
if (UseCRC32Intrinsics) {
|
||||
StubRoutines::_crc_table_adr = (address)StubRoutines::zarch::_crc_table;
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes("CRC32_updateBytes");
|
||||
StubRoutines::_updateBytesCRC32 = generate_CRC32_updateBytes();
|
||||
}
|
||||
|
||||
if (UseCRC32CIntrinsics) {
|
||||
StubRoutines::_crc32c_table_addr = (address)StubRoutines::zarch::_crc32c_table;
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes("CRC32C_updateBytes");
|
||||
StubRoutines::_updateBytesCRC32C = generate_CRC32C_updateBytes();
|
||||
}
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
@@ -3117,8 +3210,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void generate_final_stubs() {
|
||||
// Generates all stubs and initializes the entry points.
|
||||
|
||||
StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check();
|
||||
|
||||
// Support for verify_oop (must happen after universe_init).
|
||||
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop_subroutine();
|
||||
|
||||
@@ -3131,19 +3222,31 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_method_entry_barrier = generate_method_entry_barrier();
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (UseSecondarySupersTable) {
|
||||
StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
if (!InlineSecondarySupersTest) {
|
||||
generate_lookup_secondary_supers_table_stub();
|
||||
}
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
StubRoutines::_upcall_stub_exception_handler = generate_upcall_stub_exception_handler();
|
||||
StubRoutines::_upcall_stub_load_target = generate_upcall_stub_load_target();
|
||||
}
|
||||
|
||||
void generate_compiler_stubs() {
|
||||
|
||||
StubRoutines::zarch::_partial_subtype_check = generate_partial_subtype_check();
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
// Generate AES intrinsics code.
|
||||
if (UseAESIntrinsics) {
|
||||
if (VM_Version::has_Crypto_AES()) {
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock("AES_encryptBlock");
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock("AES_decryptBlock");
|
||||
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt("AES_encryptBlock_chaining");
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt("AES_decryptBlock_chaining");
|
||||
StubRoutines::_aescrypt_encryptBlock = generate_AES_encryptBlock();
|
||||
StubRoutines::_aescrypt_decryptBlock = generate_AES_decryptBlock();
|
||||
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_AES_encrypt();
|
||||
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_AES_decrypt();
|
||||
} else {
|
||||
// In PRODUCT builds, the function pointers will keep their initial (null) value.
|
||||
// LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called.
|
||||
@@ -3153,7 +3256,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
if (UseAESCTRIntrinsics) {
|
||||
if (VM_Version::has_Crypto_AES_CTR()) {
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt("counterMode_AESCrypt");
|
||||
StubRoutines::_counterMode_AESCrypt = generate_counterMode_AESCrypt();
|
||||
} else {
|
||||
// In PRODUCT builds, the function pointers will keep their initial (null) value.
|
||||
// LibraryCallKit::try_to_inline() will return false then, preventing the intrinsic to be called.
|
||||
@@ -3168,16 +3271,16 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// Generate SHA1/SHA256/SHA512 intrinsics code.
|
||||
if (UseSHA1Intrinsics) {
|
||||
StubRoutines::_sha1_implCompress = generate_SHA1_stub(false, "SHA1_singleBlock");
|
||||
StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(true, "SHA1_multiBlock");
|
||||
StubRoutines::_sha1_implCompress = generate_SHA1_stub(StubGenStubId::sha1_implCompress_id);
|
||||
StubRoutines::_sha1_implCompressMB = generate_SHA1_stub(StubGenStubId::sha1_implCompressMB_id);
|
||||
}
|
||||
if (UseSHA256Intrinsics) {
|
||||
StubRoutines::_sha256_implCompress = generate_SHA256_stub(false, "SHA256_singleBlock");
|
||||
StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(true, "SHA256_multiBlock");
|
||||
StubRoutines::_sha256_implCompress = generate_SHA256_stub(StubGenStubId::sha256_implCompress_id);
|
||||
StubRoutines::_sha256_implCompressMB = generate_SHA256_stub(StubGenStubId::sha256_implCompressMB_id);
|
||||
}
|
||||
if (UseSHA512Intrinsics) {
|
||||
StubRoutines::_sha512_implCompress = generate_SHA512_stub(false, "SHA512_singleBlock");
|
||||
StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(true, "SHA512_multiBlock");
|
||||
StubRoutines::_sha512_implCompress = generate_SHA512_stub(StubGenStubId::sha512_implCompress_id);
|
||||
StubRoutines::_sha512_implCompressMB = generate_SHA512_stub(StubGenStubId::sha512_implCompressMB_id);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
@@ -3192,35 +3295,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
StubRoutines::_montgomerySquare
|
||||
= CAST_FROM_FN_PTR(address, SharedRuntime::montgomery_square);
|
||||
}
|
||||
if (UseSecondarySupersTable) {
|
||||
StubRoutines::_lookup_secondary_supers_table_slow_path_stub = generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
if (!InlineSecondarySupersTest) {
|
||||
for (int slot = 0; slot < Klass::SECONDARY_SUPERS_TABLE_SIZE; slot++) {
|
||||
StubRoutines::_lookup_secondary_supers_table_stubs[slot] = generate_lookup_secondary_supers_table_stub(slot);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
}
|
||||
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubsKind kind) : StubCodeGenerator(code) {
|
||||
switch(kind) {
|
||||
case Initial_stubs:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id) : StubCodeGenerator(code, blob_id) {
|
||||
switch(blob_id) {
|
||||
case initial_id:
|
||||
generate_initial_stubs();
|
||||
break;
|
||||
case Continuation_stubs:
|
||||
case continuation_id:
|
||||
generate_continuation_stubs();
|
||||
break;
|
||||
case Compiler_stubs:
|
||||
case compiler_id:
|
||||
generate_compiler_stubs();
|
||||
break;
|
||||
case Final_stubs:
|
||||
case final_id:
|
||||
generate_final_stubs();
|
||||
break;
|
||||
default:
|
||||
fatal("unexpected stubs kind: %d", kind);
|
||||
fatal("unexpected blob id: %d", blob_id);
|
||||
break;
|
||||
};
|
||||
}
|
||||
@@ -3259,6 +3354,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
};
|
||||
|
||||
void StubGenerator_generate(CodeBuffer* code, StubCodeGenerator::StubsKind kind) {
|
||||
StubGenerator g(code, kind);
|
||||
void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id) {
|
||||
StubGenerator g(code, blob_id);
|
||||
}
|
||||
|
||||
@@ -32,7 +32,18 @@
|
||||
// Implementation of the platform-specific part of StubRoutines - for
|
||||
// a description of how to extend it, see the stubRoutines.hpp file.
|
||||
|
||||
address StubRoutines::zarch::_partial_subtype_check = nullptr;
|
||||
// define fields for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = nullptr;
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
address StubRoutines:: arch :: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY, DEFINE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_INIT
|
||||
#undef DEFINE_ARCH_ENTRY
|
||||
|
||||
// Comapct string intrinsics: Translate table for string inflate intrinsic. Used by trot instruction.
|
||||
address StubRoutines::zarch::_trot_table_addr = nullptr;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -31,14 +31,17 @@
|
||||
|
||||
static bool returns_to_call_stub(address return_pc) { return return_pc == _call_stub_return_address; }
|
||||
|
||||
enum { // Platform dependent constants.
|
||||
// simply increase sizes if too small (assembler will crash if too small)
|
||||
_initial_stubs_code_size = 20000,
|
||||
_continuation_stubs_code_size = 2000,
|
||||
_compiler_stubs_code_size = 20000,
|
||||
_final_stubs_code_size = 20000
|
||||
// emit enum used to size per-blob code buffers
|
||||
|
||||
#define DEFINE_BLOB_SIZE(blob_name, size) \
|
||||
_ ## blob_name ## _code_size = size,
|
||||
|
||||
enum platform_dependent_constants {
|
||||
STUBGEN_ARCH_BLOBS_DO(DEFINE_BLOB_SIZE)
|
||||
};
|
||||
|
||||
#undef DEFINE_BLOB_SIZE
|
||||
|
||||
// MethodHandles adapters
|
||||
enum method_handles_platform_dependent_constants {
|
||||
method_handles_adapters_code_size = 5000
|
||||
@@ -69,10 +72,24 @@ class zarch {
|
||||
locked = 1
|
||||
};
|
||||
|
||||
// declare fields for arch-specific entries
|
||||
|
||||
#define DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address STUB_FIELD_NAME(field_name) ;
|
||||
|
||||
#define DECLARE_ARCH_ENTRY_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DECLARE_ARCH_ENTRY(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
private:
|
||||
STUBGEN_ARCH_ENTRIES_DO(DECLARE_ARCH_ENTRY, DECLARE_ARCH_ENTRY_INIT)
|
||||
|
||||
#undef DECLARE_ARCH_ENTRY_INIT
|
||||
#undef DECLARE_ARCH_ENTRY
|
||||
|
||||
private:
|
||||
|
||||
static int _atomic_memory_operation_lock;
|
||||
|
||||
static address _partial_subtype_check;
|
||||
static juint _crc_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
static juint _crc32c_table[CRC32_TABLES][CRC32_COLUMN_SIZE];
|
||||
|
||||
@@ -81,6 +98,20 @@ class zarch {
|
||||
static jlong _trot_table[TROT_COLUMN_SIZE];
|
||||
|
||||
public:
|
||||
|
||||
// declare getters for arch-specific entries
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name) \
|
||||
static address getter_name() { return STUB_FIELD_NAME(field_name) ; }
|
||||
|
||||
#define DEFINE_ARCH_ENTRY_GETTER_INIT(arch, blob_name, stub_name, field_name, getter_name, init_function) \
|
||||
DEFINE_ARCH_ENTRY_GETTER(arch, blob_name, stub_name, field_name, getter_name)
|
||||
|
||||
STUBGEN_ARCH_ENTRIES_DO(DEFINE_ARCH_ENTRY_GETTER, DEFINE_ARCH_ENTRY_GETTER_INIT)
|
||||
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER_INIT
|
||||
#undef DEFINE_ARCH_ENTRY_GETTER
|
||||
|
||||
// Global lock for everyone who needs to use atomic_compare_and_exchange
|
||||
// or atomic_increment -- should probably use more locks for more
|
||||
// scalability -- for instance one for each eden space or group of.
|
||||
@@ -92,8 +123,6 @@ class zarch {
|
||||
static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
|
||||
static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
|
||||
|
||||
static address partial_subtype_check() { return _partial_subtype_check; }
|
||||
|
||||
static void generate_load_absolute_address(MacroAssembler* masm, Register table, address table_addr, uint64_t table_contents);
|
||||
static void generate_load_crc_table_addr(MacroAssembler* masm, Register table);
|
||||
static void generate_load_crc32c_table_addr(MacroAssembler* masm, Register table);
|
||||
|
||||
@@ -199,13 +199,14 @@ void StubGenerator::generate_string_indexof(address *fnptrs) {
|
||||
|
||||
static void generate_string_indexof_stubs(StubGenerator *stubgen, address *fnptrs,
|
||||
StrIntrinsicNode::ArgEncoding ae, MacroAssembler *_masm) {
|
||||
StubCodeMark mark(stubgen, "StubRoutines", "stringIndexOf");
|
||||
bool isLL = (ae == StrIntrinsicNode::LL);
|
||||
bool isUL = (ae == StrIntrinsicNode::UL);
|
||||
bool isUU = (ae == StrIntrinsicNode::UU);
|
||||
bool isU = isUL || isUU; // At least one is UTF-16
|
||||
assert(isLL || isUL || isUU, "Encoding not recognized");
|
||||
|
||||
StubGenStubId stub_id = (isLL ? StubGenStubId::string_indexof_linear_ll_id : (isUL ? StubGenStubId::string_indexof_linear_ul_id : StubGenStubId::string_indexof_linear_uu_id));
|
||||
StubCodeMark mark(stubgen, stub_id);
|
||||
// Keep track of isUL since we need to generate UU code in the main body
|
||||
// for the case where we expand the needle from bytes to words on the stack.
|
||||
// This is done at L_wcharBegin. The algorithm used is:
|
||||
|
||||
@@ -9136,14 +9136,14 @@ void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Registe
|
||||
Label L_exit;
|
||||
|
||||
if (is_pclmulqdq_supported ) {
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1);
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
|
||||
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
|
||||
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
|
||||
assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\"");
|
||||
} else {
|
||||
const_or_pre_comp_const_index[0] = 1;
|
||||
@@ -9216,14 +9216,14 @@ void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Registe
|
||||
Label L_exit;
|
||||
|
||||
if (is_pclmulqdq_supported) {
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr;
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1);
|
||||
const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::crc32c_table_addr();
|
||||
const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 1);
|
||||
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3);
|
||||
const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 2);
|
||||
const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 3);
|
||||
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5);
|
||||
const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 4);
|
||||
const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::crc32c_table_addr() + 5);
|
||||
} else {
|
||||
const_or_pre_comp_const_index[0] = 1;
|
||||
const_or_pre_comp_const_index[1] = 0;
|
||||
|
||||
262
src/hotspot/cpu/x86/stubDeclarations_x86.hpp
Normal file
262
src/hotspot/cpu/x86/stubDeclarations_x86.hpp
Normal file
@@ -0,0 +1,262 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Red Hat, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef CPU_X86_STUBDECLARATIONS_HPP
|
||||
#define CPU_X86_STUBDECLARATIONS_HPP
|
||||
|
||||
#define STUBGEN_INITIAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(initial, 20000 WINDOWS_ONLY(+1000)) \
|
||||
do_stub(initial, verify_mxcsr) \
|
||||
do_arch_entry(x86, initial, verify_mxcsr, verify_mxcsr_entry, \
|
||||
verify_mxcsr_entry) \
|
||||
LP64_ONLY( \
|
||||
do_stub(initial, get_previous_sp) \
|
||||
do_arch_entry(x86, initial, get_previous_sp, \
|
||||
get_previous_sp_entry, \
|
||||
get_previous_sp_entry) \
|
||||
do_stub(initial, f2i_fixup) \
|
||||
do_arch_entry(x86, initial, f2i_fixup, f2i_fixup, f2i_fixup) \
|
||||
do_stub(initial, f2l_fixup) \
|
||||
do_arch_entry(x86, initial, f2l_fixup, f2l_fixup, f2l_fixup) \
|
||||
do_stub(initial, d2i_fixup) \
|
||||
do_arch_entry(x86, initial, d2i_fixup, d2i_fixup, d2i_fixup) \
|
||||
do_stub(initial, d2l_fixup) \
|
||||
do_arch_entry(x86, initial, d2l_fixup, d2l_fixup, d2l_fixup) \
|
||||
do_stub(initial, float_sign_mask) \
|
||||
do_arch_entry(x86, initial, float_sign_mask, float_sign_mask, \
|
||||
float_sign_mask) \
|
||||
do_stub(initial, float_sign_flip) \
|
||||
do_arch_entry(x86, initial, float_sign_flip, float_sign_flip, \
|
||||
float_sign_flip) \
|
||||
do_stub(initial, double_sign_mask) \
|
||||
do_arch_entry(x86, initial, double_sign_mask, double_sign_mask, \
|
||||
double_sign_mask) \
|
||||
do_stub(initial, double_sign_flip) \
|
||||
do_arch_entry(x86, initial, double_sign_flip, double_sign_flip, \
|
||||
double_sign_flip) \
|
||||
) \
|
||||
NOT_LP64( \
|
||||
do_stub(initial, verify_fpu_cntrl_word) \
|
||||
do_arch_entry(x86, initial, verify_fpu_cntrl_word, \
|
||||
verify_fpu_cntrl_wrd_entry, \
|
||||
verify_fpu_cntrl_wrd_entry) \
|
||||
do_stub(initial, d2i_wrapper) \
|
||||
do_arch_entry(x86, initial, d2i_wrapper, d2i_wrapper, \
|
||||
d2i_wrapper) \
|
||||
do_stub(initial, d2l_wrapper) \
|
||||
do_arch_entry(x86, initial, d2l_wrapper, d2l_wrapper, \
|
||||
d2l_wrapper) \
|
||||
) \
|
||||
|
||||
|
||||
#define STUBGEN_CONTINUATION_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(continuation, 1000 LP64_ONLY(+2000)) \
|
||||
|
||||
|
||||
#define STUBGEN_COMPILER_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(compiler, 20000 LP64_ONLY(+60000) WINDOWS_ONLY(+2000)) \
|
||||
do_stub(compiler, vector_float_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_float_sign_mask, \
|
||||
vector_float_sign_mask, vector_float_sign_mask) \
|
||||
do_stub(compiler, vector_float_sign_flip) \
|
||||
do_arch_entry(x86, compiler, vector_float_sign_flip, \
|
||||
vector_float_sign_flip, vector_float_sign_flip) \
|
||||
do_stub(compiler, vector_double_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_double_sign_mask, \
|
||||
vector_double_sign_mask, vector_double_sign_mask) \
|
||||
do_stub(compiler, vector_double_sign_flip) \
|
||||
do_arch_entry(x86, compiler, vector_double_sign_flip, \
|
||||
vector_double_sign_flip, vector_double_sign_flip) \
|
||||
do_stub(compiler, vector_all_bits_set) \
|
||||
do_arch_entry(x86, compiler, vector_all_bits_set, \
|
||||
vector_all_bits_set, vector_all_bits_set) \
|
||||
do_stub(compiler, vector_int_mask_cmp_bits) \
|
||||
do_arch_entry(x86, compiler, vector_int_mask_cmp_bits, \
|
||||
vector_int_mask_cmp_bits, vector_int_mask_cmp_bits) \
|
||||
do_stub(compiler, vector_short_to_byte_mask) \
|
||||
do_arch_entry(x86, compiler, vector_short_to_byte_mask, \
|
||||
vector_short_to_byte_mask, vector_short_to_byte_mask) \
|
||||
do_stub(compiler, vector_byte_perm_mask) \
|
||||
do_arch_entry(x86, compiler,vector_byte_perm_mask, \
|
||||
vector_byte_perm_mask, vector_byte_perm_mask) \
|
||||
do_stub(compiler, vector_int_to_byte_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_to_byte_mask, \
|
||||
vector_int_to_byte_mask, vector_int_to_byte_mask) \
|
||||
do_stub(compiler, vector_int_to_short_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_to_short_mask, \
|
||||
vector_int_to_short_mask, vector_int_to_short_mask) \
|
||||
do_stub(compiler, vector_32_bit_mask) \
|
||||
do_arch_entry(x86, compiler, vector_32_bit_mask, \
|
||||
vector_32_bit_mask, vector_32_bit_mask) \
|
||||
do_stub(compiler, vector_64_bit_mask) \
|
||||
do_arch_entry(x86, compiler, vector_64_bit_mask, \
|
||||
vector_64_bit_mask, vector_64_bit_mask) \
|
||||
do_stub(compiler, vector_byte_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_shuffle_mask, \
|
||||
vector_byte_shuffle_mask, vector_byte_shuffle_mask) \
|
||||
do_stub(compiler, vector_short_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_shuffle_mask, \
|
||||
vector_short_shuffle_mask, vector_short_shuffle_mask) \
|
||||
do_stub(compiler, vector_int_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_int_shuffle_mask, \
|
||||
vector_int_shuffle_mask, vector_int_shuffle_mask) \
|
||||
do_stub(compiler, vector_long_shuffle_mask) \
|
||||
do_arch_entry(x86, compiler, vector_long_shuffle_mask, \
|
||||
vector_long_shuffle_mask, vector_long_shuffle_mask) \
|
||||
do_stub(compiler, vector_long_sign_mask) \
|
||||
do_arch_entry(x86, compiler, vector_long_sign_mask, \
|
||||
vector_long_sign_mask, vector_long_sign_mask) \
|
||||
do_stub(compiler, vector_iota_indices) \
|
||||
do_arch_entry(x86, compiler, vector_iota_indices, \
|
||||
vector_iota_indices, vector_iota_indices) \
|
||||
do_stub(compiler, vector_count_leading_zeros_lut) \
|
||||
do_arch_entry(x86, compiler, vector_count_leading_zeros_lut, \
|
||||
vector_count_leading_zeros_lut, \
|
||||
vector_count_leading_zeros_lut) \
|
||||
do_stub(compiler, vector_reverse_bit_lut) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_bit_lut, \
|
||||
vector_reverse_bit_lut, vector_reverse_bit_lut) \
|
||||
do_stub(compiler, vector_reverse_byte_perm_mask_short) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_short, \
|
||||
vector_reverse_byte_perm_mask_short, \
|
||||
vector_reverse_byte_perm_mask_short) \
|
||||
do_stub(compiler, vector_reverse_byte_perm_mask_int) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_int, \
|
||||
vector_reverse_byte_perm_mask_int, \
|
||||
vector_reverse_byte_perm_mask_int) \
|
||||
do_stub(compiler, vector_reverse_byte_perm_mask_long) \
|
||||
do_arch_entry(x86, compiler, vector_reverse_byte_perm_mask_long, \
|
||||
vector_reverse_byte_perm_mask_long, \
|
||||
vector_reverse_byte_perm_mask_long) \
|
||||
do_stub(compiler, vector_popcount_lut) \
|
||||
do_arch_entry(x86, compiler, vector_popcount_lut, \
|
||||
vector_popcount_lut, vector_popcount_lut) \
|
||||
do_stub(compiler, upper_word_mask) \
|
||||
do_arch_entry(x86, compiler, upper_word_mask, upper_word_mask_addr, \
|
||||
upper_word_mask_addr) \
|
||||
do_stub(compiler, shuffle_byte_flip_mask) \
|
||||
do_arch_entry(x86, compiler, shuffle_byte_flip_mask, \
|
||||
shuffle_byte_flip_mask_addr, \
|
||||
shuffle_byte_flip_mask_addr) \
|
||||
do_stub(compiler, pshuffle_byte_flip_mask) \
|
||||
do_arch_entry(x86, compiler, pshuffle_byte_flip_mask, \
|
||||
pshuffle_byte_flip_mask_addr, \
|
||||
pshuffle_byte_flip_mask_addr) \
|
||||
LP64_ONLY( \
|
||||
/* x86_64 exposes these 3 stubs via a generic entry array */ \
|
||||
/* oher arches use arch-specific entries */ \
|
||||
/* this really needs rationalising */ \
|
||||
do_stub(compiler, string_indexof_linear_ll) \
|
||||
do_stub(compiler, string_indexof_linear_uu) \
|
||||
do_stub(compiler, string_indexof_linear_ul) \
|
||||
do_stub(compiler, pshuffle_byte_flip_mask_sha512) \
|
||||
do_arch_entry(x86, compiler, pshuffle_byte_flip_mask_sha512, \
|
||||
pshuffle_byte_flip_mask_addr_sha512, \
|
||||
pshuffle_byte_flip_mask_addr_sha512) \
|
||||
do_stub(compiler, compress_perm_table32) \
|
||||
do_arch_entry(x86, compiler, compress_perm_table32, \
|
||||
compress_perm_table32, compress_perm_table32) \
|
||||
do_stub(compiler, compress_perm_table64) \
|
||||
do_arch_entry(x86, compiler, compress_perm_table64, \
|
||||
compress_perm_table64, compress_perm_table64) \
|
||||
do_stub(compiler, expand_perm_table32) \
|
||||
do_arch_entry(x86, compiler, expand_perm_table32, \
|
||||
expand_perm_table32, expand_perm_table32) \
|
||||
do_stub(compiler, expand_perm_table64) \
|
||||
do_arch_entry(x86, compiler, expand_perm_table64, \
|
||||
expand_perm_table64, expand_perm_table64) \
|
||||
do_stub(compiler, avx2_shuffle_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_shuffle_base64, \
|
||||
avx2_shuffle_base64, base64_avx2_shuffle_addr) \
|
||||
do_stub(compiler, avx2_input_mask_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_input_mask_base64, \
|
||||
avx2_input_mask_base64, \
|
||||
base64_avx2_input_mask_addr) \
|
||||
do_stub(compiler, avx2_lut_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_lut_base64, \
|
||||
avx2_lut_base64, base64_avx2_lut_addr) \
|
||||
do_stub(compiler, avx2_decode_tables_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_decode_tables_base64, \
|
||||
avx2_decode_tables_base64, \
|
||||
base64_AVX2_decode_tables_addr) \
|
||||
do_stub(compiler, avx2_decode_lut_tables_base64) \
|
||||
do_arch_entry(x86, compiler, avx2_decode_lut_tables_base64, \
|
||||
avx2_decode_lut_tables_base64, \
|
||||
base64_AVX2_decode_LUT_tables_addr) \
|
||||
do_stub(compiler, shuffle_base64) \
|
||||
do_arch_entry(x86, compiler, shuffle_base64, shuffle_base64, \
|
||||
base64_shuffle_addr) \
|
||||
do_stub(compiler, lookup_lo_base64) \
|
||||
do_arch_entry(x86, compiler, lookup_lo_base64, lookup_lo_base64, \
|
||||
base64_vbmi_lookup_lo_addr) \
|
||||
do_stub(compiler, lookup_hi_base64) \
|
||||
do_arch_entry(x86, compiler, lookup_hi_base64, lookup_hi_base64, \
|
||||
base64_vbmi_lookup_hi_addr) \
|
||||
do_stub(compiler, lookup_lo_base64url) \
|
||||
do_arch_entry(x86, compiler, lookup_lo_base64url, \
|
||||
lookup_lo_base64url, \
|
||||
base64_vbmi_lookup_lo_url_addr) \
|
||||
do_stub(compiler, lookup_hi_base64url) \
|
||||
do_arch_entry(x86, compiler, lookup_hi_base64url, \
|
||||
lookup_hi_base64url, \
|
||||
base64_vbmi_lookup_hi_url_addr) \
|
||||
do_stub(compiler, pack_vec_base64) \
|
||||
do_arch_entry(x86, compiler, pack_vec_base64, pack_vec_base64, \
|
||||
base64_vbmi_pack_vec_addr) \
|
||||
do_stub(compiler, join_0_1_base64) \
|
||||
do_arch_entry(x86, compiler, join_0_1_base64, join_0_1_base64, \
|
||||
base64_vbmi_join_0_1_addr) \
|
||||
do_stub(compiler, join_1_2_base64) \
|
||||
do_arch_entry(x86, compiler, join_1_2_base64, join_1_2_base64, \
|
||||
base64_vbmi_join_1_2_addr) \
|
||||
do_stub(compiler, join_2_3_base64) \
|
||||
do_arch_entry(x86, compiler, join_2_3_base64, join_2_3_base64, \
|
||||
base64_vbmi_join_2_3_addr) \
|
||||
do_stub(compiler, encoding_table_base64) \
|
||||
do_arch_entry(x86, compiler, encoding_table_base64, \
|
||||
encoding_table_base64, base64_encoding_table_addr) \
|
||||
do_stub(compiler, decoding_table_base64) \
|
||||
do_arch_entry(x86, compiler, decoding_table_base64, \
|
||||
decoding_table_base64, base64_decoding_table_addr) \
|
||||
) \
|
||||
|
||||
|
||||
#define STUBGEN_FINAL_BLOBS_ARCH_DO(do_stub, \
|
||||
do_arch_blob, \
|
||||
do_arch_entry, \
|
||||
do_arch_entry_init) \
|
||||
do_arch_blob(final, 11000 LP64_ONLY(+20000) \
|
||||
WINDOWS_ONLY(+22000) ZGC_ONLY(+20000)) \
|
||||
|
||||
#endif // CPU_X86_STUBDECLARATIONS_HPP
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -28,6 +28,7 @@
|
||||
#include "code/codeBlob.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
|
||||
// Stub Code definitions
|
||||
|
||||
@@ -87,29 +88,29 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_d2i_fixup();
|
||||
address generate_d2l_fixup();
|
||||
|
||||
address generate_count_leading_zeros_lut(const char *stub_name);
|
||||
address generate_popcount_avx_lut(const char *stub_name);
|
||||
address generate_iota_indices(const char *stub_name);
|
||||
address generate_vector_reverse_bit_lut(const char *stub_name);
|
||||
address generate_count_leading_zeros_lut();
|
||||
address generate_popcount_avx_lut();
|
||||
address generate_iota_indices();
|
||||
address generate_vector_reverse_bit_lut();
|
||||
|
||||
address generate_vector_reverse_byte_perm_mask_long(const char *stub_name);
|
||||
address generate_vector_reverse_byte_perm_mask_int(const char *stub_name);
|
||||
address generate_vector_reverse_byte_perm_mask_short(const char *stub_name);
|
||||
address generate_vector_byte_shuffle_mask(const char *stub_name);
|
||||
address generate_vector_reverse_byte_perm_mask_long();
|
||||
address generate_vector_reverse_byte_perm_mask_int();
|
||||
address generate_vector_reverse_byte_perm_mask_short();
|
||||
address generate_vector_byte_shuffle_mask();
|
||||
|
||||
address generate_fp_mask(const char *stub_name, int64_t mask);
|
||||
address generate_fp_mask(StubGenStubId stub_id, int64_t mask);
|
||||
|
||||
address generate_compress_perm_table(const char *stub_name, int32_t esize);
|
||||
address generate_compress_perm_table(StubGenStubId stub_id);
|
||||
|
||||
address generate_expand_perm_table(const char *stub_name, int32_t esize);
|
||||
address generate_expand_perm_table(StubGenStubId stub_id);
|
||||
|
||||
address generate_vector_mask(const char *stub_name, int64_t mask);
|
||||
address generate_vector_mask(StubGenStubId stub_id, int64_t mask);
|
||||
|
||||
address generate_vector_byte_perm_mask(const char *stub_name);
|
||||
address generate_vector_byte_perm_mask();
|
||||
|
||||
address generate_vector_fp_mask(const char *stub_name, int64_t mask);
|
||||
address generate_vector_fp_mask(StubGenStubId stub_id, int64_t mask);
|
||||
|
||||
address generate_vector_custom_i32(const char *stub_name, Assembler::AvxVectorLen len,
|
||||
address generate_vector_custom_i32(StubGenStubId stub_id, Assembler::AvxVectorLen len,
|
||||
int32_t val0, int32_t val1, int32_t val2, int32_t val3,
|
||||
int32_t val4 = 0, int32_t val5 = 0, int32_t val6 = 0, int32_t val7 = 0,
|
||||
int32_t val8 = 0, int32_t val9 = 0, int32_t val10 = 0, int32_t val11 = 0,
|
||||
@@ -179,12 +180,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// - If user sets AVX3Threshold=0, then special cases for small blocks sizes operate over
|
||||
// 64 byte vector registers (ZMMs).
|
||||
|
||||
address generate_disjoint_copy_avx3_masked(address* entry, const char *name, int shift,
|
||||
bool aligned, bool is_oop, bool dest_uninitialized);
|
||||
address generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry);
|
||||
|
||||
address generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift,
|
||||
address nooverlap_target, bool aligned, bool is_oop,
|
||||
bool dest_uninitialized);
|
||||
address generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry,
|
||||
address nooverlap_target);
|
||||
|
||||
void arraycopy_avx3_special_cases(XMMRegister xmm, KRegister mask, Register from,
|
||||
Register to, Register count, int shift,
|
||||
@@ -225,27 +224,21 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Register temp, int shift = Address::times_1, int offset = 0);
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
|
||||
address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name);
|
||||
address generate_disjoint_byte_copy(address* entry);
|
||||
|
||||
address generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
|
||||
address* entry, const char *name);
|
||||
address generate_conjoint_byte_copy(address nooverlap_target, address* entry);
|
||||
|
||||
address generate_disjoint_short_copy(bool aligned, address *entry, const char *name);
|
||||
address generate_disjoint_short_copy(address *entry);
|
||||
|
||||
address generate_fill(BasicType t, bool aligned, const char *name);
|
||||
address generate_fill(StubGenStubId stub_id);
|
||||
|
||||
address generate_conjoint_short_copy(bool aligned, address nooverlap_target,
|
||||
address *entry, const char *name);
|
||||
address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
|
||||
const char *name, bool dest_uninitialized = false);
|
||||
address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
|
||||
address *entry, const char *name,
|
||||
bool dest_uninitialized = false);
|
||||
address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
|
||||
const char *name, bool dest_uninitialized = false);
|
||||
address generate_conjoint_long_oop_copy(bool aligned, bool is_oop,
|
||||
address nooverlap_target, address *entry,
|
||||
const char *name, bool dest_uninitialized = false);
|
||||
address generate_conjoint_short_copy(address nooverlap_target, address *entry);
|
||||
address generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry);
|
||||
address generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target,
|
||||
address *entry);
|
||||
address generate_disjoint_long_oop_copy(StubGenStubId stub_id, address* entry);
|
||||
address generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target,
|
||||
address *entry);
|
||||
|
||||
// Helper for generating a dynamic type check.
|
||||
// Smashes no registers.
|
||||
@@ -255,8 +248,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_success);
|
||||
|
||||
// Generate checkcasting array copy stub
|
||||
address generate_checkcast_copy(const char *name, address *entry,
|
||||
bool dest_uninitialized = false);
|
||||
address generate_checkcast_copy(StubGenStubId stub_id, address *entry);
|
||||
|
||||
// Generate 'unsafe' array copy stub
|
||||
// Though just as safe as the other stubs, it takes an unscaled
|
||||
@@ -264,8 +256,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to a long, int, short, or byte copy loop.
|
||||
address generate_unsafe_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address generate_unsafe_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address long_copy_entry);
|
||||
|
||||
// Generate 'unsafe' set memory stub
|
||||
@@ -274,7 +265,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
//
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to an int, short, or byte copy loop.
|
||||
address generate_unsafe_setmemory(const char *name, address byte_copy_entry);
|
||||
address generate_unsafe_setmemory(address byte_copy_entry);
|
||||
|
||||
// Perform range checks on the proposed arraycopy.
|
||||
// Kills temp, but nothing else.
|
||||
@@ -288,8 +279,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
Label& L_failed);
|
||||
|
||||
// Generate generic array copy stubs
|
||||
address generate_generic_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address generate_generic_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address oop_copy_entry,
|
||||
address long_copy_entry, address checkcast_copy_entry);
|
||||
|
||||
@@ -304,19 +294,19 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.MD5.implCompress(byte[] b, int ofs)
|
||||
address generate_md5_implCompress(bool multi_block, const char *name);
|
||||
address generate_md5_implCompress(StubGenStubId stub_id);
|
||||
|
||||
|
||||
// SHA stubs
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
|
||||
address generate_sha1_implCompress(bool multi_block, const char *name);
|
||||
address generate_sha1_implCompress(StubGenStubId stub_id);
|
||||
|
||||
// ofs and limit are use for multi-block byte array.
|
||||
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
|
||||
address generate_sha256_implCompress(bool multi_block, const char *name);
|
||||
address generate_sha512_implCompress(bool multi_block, const char *name);
|
||||
address generate_sha256_implCompress(StubGenStubId stub_id);
|
||||
address generate_sha512_implCompress(StubGenStubId stub_id);
|
||||
|
||||
// Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb.
|
||||
address generate_pshuffle_byte_flip_mask_sha512();
|
||||
@@ -499,7 +489,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// SHA3 stubs
|
||||
void generate_sha3_stubs();
|
||||
address generate_sha3_implCompress(bool multiBlock, const char *name);
|
||||
address generate_sha3_implCompress(StubGenStubId stub_id);
|
||||
|
||||
// BASE64 stubs
|
||||
|
||||
@@ -595,7 +585,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void generate_string_indexof(address *fnptrs);
|
||||
#endif
|
||||
|
||||
address generate_cont_thaw(const char* label, Continuation::thaw_kind kind);
|
||||
address generate_cont_thaw(StubGenStubId stub_id);
|
||||
address generate_cont_thaw();
|
||||
|
||||
// TODO: will probably need multiple return barriers depending on return type
|
||||
@@ -604,6 +594,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
address generate_cont_preempt_stub();
|
||||
|
||||
// TODO -- delete this as it is not implemented?
|
||||
//
|
||||
// Continuation point for throwing of implicit exceptions that are
|
||||
// not handled in the current activation. Fabricates an exception
|
||||
// oop and initiates normal exception dispatching in this
|
||||
@@ -629,7 +621,7 @@ class StubGenerator: public StubCodeGenerator {
|
||||
address generate_upcall_stub_load_target();
|
||||
|
||||
// Specialized stub implementations for UseSecondarySupersTable.
|
||||
address generate_lookup_secondary_supers_table_stub(u1 super_klass_index);
|
||||
void generate_lookup_secondary_supers_table_stub();
|
||||
|
||||
// Slow path implementation for UseSecondarySupersTable.
|
||||
address generate_lookup_secondary_supers_table_slow_path_stub();
|
||||
@@ -642,8 +634,8 @@ class StubGenerator: public StubCodeGenerator {
|
||||
void generate_compiler_stubs();
|
||||
void generate_final_stubs();
|
||||
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubsKind kind);
|
||||
public:
|
||||
StubGenerator(CodeBuffer* code, StubGenBlobId blob_id);
|
||||
};
|
||||
|
||||
#endif // CPU_X86_STUBGENERATOR_X86_64_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2023, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2021, 2024, Intel Corporation. All rights reserved.
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -66,7 +66,8 @@ address StubGenerator::generate_updateBytesAdler32() {
|
||||
assert(UseAdler32Intrinsics, "");
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "updateBytesAdler32");
|
||||
StubGenStubId stub_id = StubGenStubId::updateBytesAdler32_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// Choose an appropriate LIMIT for inner loop based on the granularity
|
||||
|
||||
@@ -249,7 +249,8 @@ void StubGenerator::generate_aes_stubs() {
|
||||
// rax - number of processed bytes
|
||||
address StubGenerator::generate_galoisCounterMode_AESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register in = c_rarg0;
|
||||
@@ -335,7 +336,8 @@ address StubGenerator::generate_galoisCounterMode_AESCrypt() {
|
||||
// rax - number of processed bytes
|
||||
address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "galoisCounterMode_AESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::galoisCounterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register in = c_rarg0;
|
||||
@@ -406,7 +408,8 @@ address StubGenerator::generate_avx2_galoisCounterMode_AESCrypt() {
|
||||
// Vector AES Counter implementation
|
||||
address StubGenerator::generate_counterMode_VectorAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -494,7 +497,8 @@ address StubGenerator::generate_counterMode_VectorAESCrypt() {
|
||||
address StubGenerator::generate_counterMode_AESCrypt_Parallel() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "counterMode_AESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::counterMode_AESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -781,7 +785,8 @@ address StubGenerator::generate_counterMode_AESCrypt_Parallel() {
|
||||
address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() {
|
||||
assert(VM_Version::supports_avx512_vaes(), "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -1063,7 +1068,8 @@ address StubGenerator::generate_cipherBlockChaining_decryptVectorAESCrypt() {
|
||||
address StubGenerator::generate_aescrypt_encryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_encryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
Label L_doLast;
|
||||
address start = __ pc();
|
||||
|
||||
@@ -1157,7 +1163,8 @@ address StubGenerator::generate_aescrypt_encryptBlock() {
|
||||
address StubGenerator::generate_aescrypt_decryptBlock() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
|
||||
StubGenStubId stub_id = StubGenStubId::aescrypt_decryptBlock_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
Label L_doLast;
|
||||
address start = __ pc();
|
||||
|
||||
@@ -1258,7 +1265,8 @@ address StubGenerator::generate_aescrypt_decryptBlock() {
|
||||
address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_exit, L_key_192_256, L_key_256, L_loopTop_128, L_loopTop_192, L_loopTop_256;
|
||||
@@ -1409,7 +1417,8 @@ address StubGenerator::generate_cipherBlockChaining_encryptAESCrypt() {
|
||||
address StubGenerator::generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
|
||||
assert(UseAES, "need AES instructions and misaligned SSE support");
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::cipherBlockChaining_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -1651,7 +1660,8 @@ __ opc(xmm_result3, src_reg); \
|
||||
|
||||
address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_encryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::electronicCodeBook_encryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
@@ -1671,7 +1681,8 @@ address StubGenerator::generate_electronicCodeBook_encryptAESCrypt() {
|
||||
|
||||
address StubGenerator::generate_electronicCodeBook_decryptAESCrypt() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "electronicCodeBook_decryptAESCrypt");
|
||||
StubGenStubId stub_id = StubGenStubId::electronicCodeBook_decryptAESCrypt_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register from = c_rarg0; // source array address
|
||||
|
||||
@@ -84,74 +84,51 @@ void StubGenerator::generate_arraycopy_stubs() {
|
||||
address entry_jlong_arraycopy;
|
||||
address entry_checkcast_arraycopy;
|
||||
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,
|
||||
"jbyte_disjoint_arraycopy");
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy,
|
||||
"jbyte_arraycopy");
|
||||
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(&entry);
|
||||
StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(entry, &entry_jbyte_arraycopy);
|
||||
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry,
|
||||
"jshort_disjoint_arraycopy");
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy,
|
||||
"jshort_arraycopy");
|
||||
StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(&entry);
|
||||
StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(entry, &entry_jshort_arraycopy);
|
||||
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry,
|
||||
"jint_disjoint_arraycopy");
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry,
|
||||
&entry_jint_arraycopy, "jint_arraycopy");
|
||||
StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::jint_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::jint_arraycopy_id, entry, &entry_jint_arraycopy);
|
||||
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry,
|
||||
"jlong_disjoint_arraycopy");
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry,
|
||||
&entry_jlong_arraycopy, "jlong_arraycopy");
|
||||
StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::jlong_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::jlong_arraycopy_id, entry, &entry_jlong_arraycopy);
|
||||
if (UseCompressedOops) {
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry,
|
||||
&entry_oop_arraycopy, "oop_arraycopy");
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry,
|
||||
nullptr, "oop_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr);
|
||||
} else {
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy");
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry,
|
||||
&entry_oop_arraycopy, "oop_arraycopy");
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry,
|
||||
"oop_disjoint_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry,
|
||||
nullptr, "oop_arraycopy_uninit",
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_id, &entry);
|
||||
StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_id, entry, &entry_oop_arraycopy);
|
||||
StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(StubGenStubId::oop_disjoint_arraycopy_uninit_id, &entry);
|
||||
StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(StubGenStubId::oop_arraycopy_uninit_id, entry, nullptr);
|
||||
}
|
||||
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy);
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", nullptr,
|
||||
/*dest_uninitialized*/true);
|
||||
StubRoutines::_checkcast_arraycopy = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_id, &entry_checkcast_arraycopy);
|
||||
StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy(StubGenStubId::checkcast_arraycopy_uninit_id, nullptr);
|
||||
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy",
|
||||
entry_jbyte_arraycopy,
|
||||
StubRoutines::_unsafe_arraycopy = generate_unsafe_copy(entry_jbyte_arraycopy,
|
||||
entry_jshort_arraycopy,
|
||||
entry_jint_arraycopy,
|
||||
entry_jlong_arraycopy);
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy",
|
||||
entry_jbyte_arraycopy,
|
||||
StubRoutines::_generic_arraycopy = generate_generic_copy(entry_jbyte_arraycopy,
|
||||
entry_jshort_arraycopy,
|
||||
entry_jint_arraycopy,
|
||||
entry_oop_arraycopy,
|
||||
entry_jlong_arraycopy,
|
||||
entry_checkcast_arraycopy);
|
||||
|
||||
StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill");
|
||||
StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill");
|
||||
StubRoutines::_jint_fill = generate_fill(T_INT, false, "jint_fill");
|
||||
StubRoutines::_arrayof_jbyte_fill = generate_fill(T_BYTE, true, "arrayof_jbyte_fill");
|
||||
StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill");
|
||||
StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill");
|
||||
StubRoutines::_jbyte_fill = generate_fill(StubGenStubId::jbyte_fill_id);
|
||||
StubRoutines::_jshort_fill = generate_fill(StubGenStubId::jshort_fill_id);
|
||||
StubRoutines::_jint_fill = generate_fill(StubGenStubId::jint_fill_id);
|
||||
StubRoutines::_arrayof_jbyte_fill = generate_fill(StubGenStubId::arrayof_jbyte_fill_id);
|
||||
StubRoutines::_arrayof_jshort_fill = generate_fill(StubGenStubId::arrayof_jshort_fill_id);
|
||||
StubRoutines::_arrayof_jint_fill = generate_fill(StubGenStubId::arrayof_jint_fill_id);
|
||||
|
||||
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory("unsafe_setmemory", StubRoutines::_jbyte_fill);
|
||||
StubRoutines::_unsafe_setmemory = generate_unsafe_setmemory(StubRoutines::_jbyte_fill);
|
||||
|
||||
// We don't generate specialized code for HeapWord-aligned source
|
||||
// arrays, so just use the code we've already generated
|
||||
@@ -507,11 +484,50 @@ void StubGenerator::copy_bytes_backward(Register from, Register dest,
|
||||
// disjoint_copy_avx3_masked is set to the no-overlap entry point
|
||||
// used by generate_conjoint_[byte/int/short/long]_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_copy_avx3_masked(address* entry, const char *name,
|
||||
int shift, bool aligned, bool is_oop,
|
||||
bool dest_uninitialized) {
|
||||
address StubGenerator::generate_disjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
int shift;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_disjoint_arraycopy_id:
|
||||
shift = 0;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jshort_disjoint_arraycopy_id:
|
||||
shift = 1;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jint_disjoint_arraycopy_id:
|
||||
shift = 2;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jlong_disjoint_arraycopy_id:
|
||||
shift = 3;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_disjoint_arraycopy_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_disjoint_arraycopy_uninit_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
int avx3threshold = VM_Version::avx3_threshold();
|
||||
@@ -806,11 +822,50 @@ void StubGenerator::arraycopy_avx3_large(Register to, Register from, Register te
|
||||
// c_rarg2 - element count, treated as ssize_t, can be zero
|
||||
//
|
||||
//
|
||||
address StubGenerator::generate_conjoint_copy_avx3_masked(address* entry, const char *name, int shift,
|
||||
address nooverlap_target, bool aligned,
|
||||
bool is_oop, bool dest_uninitialized) {
|
||||
address StubGenerator::generate_conjoint_copy_avx3_masked(StubGenStubId stub_id, address* entry, address nooverlap_target) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
int shift;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_arraycopy_id:
|
||||
shift = 0;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jshort_arraycopy_id:
|
||||
shift = 1;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jint_arraycopy_id:
|
||||
shift = 2;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case jlong_arraycopy_id:
|
||||
shift = 3;
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_arraycopy_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case oop_arraycopy_uninit_id:
|
||||
shift = (UseCompressedOops ? 2 : 3);
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
int avx3threshold = VM_Version::avx3_threshold();
|
||||
@@ -1262,9 +1317,7 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
// entry - location for return of (post-push) entry
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1277,18 +1330,20 @@ void StubGenerator::copy64_avx(Register dst, Register src, Register index, XMMRe
|
||||
// and stored atomically.
|
||||
//
|
||||
// Side Effects:
|
||||
// disjoint_byte_copy_entry is set to the no-overlap entry point
|
||||
// entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_byte_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) {
|
||||
address StubGenerator::generate_disjoint_byte_copy(address* entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jbyte_disjoint_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jbyte_disjoint_arraycopy_avx3", 0,
|
||||
aligned, false, false);
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
}
|
||||
#endif
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
|
||||
|
||||
@@ -1383,9 +1438,8 @@ __ BIND(L_exit);
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1397,16 +1451,17 @@ __ BIND(L_exit);
|
||||
// dwords or qwords that span cache line boundaries will still be loaded
|
||||
// and stored atomically.
|
||||
//
|
||||
address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverlap_target,
|
||||
address* entry, const char *name) {
|
||||
address StubGenerator::generate_conjoint_byte_copy(address nooverlap_target, address* entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jbyte_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jbyte_conjoint_arraycopy_avx3", 0,
|
||||
nooverlap_target, aligned, false, false);
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
}
|
||||
#endif
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
|
||||
|
||||
@@ -1493,9 +1548,7 @@ address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverl
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
// entry - location for return of (post-push) entry
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1508,19 +1561,21 @@ address StubGenerator::generate_conjoint_byte_copy(bool aligned, address nooverl
|
||||
// and stored atomically.
|
||||
//
|
||||
// Side Effects:
|
||||
// disjoint_short_copy_entry is set to the no-overlap entry point
|
||||
// entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_short_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_short_copy(bool aligned, address *entry, const char *name) {
|
||||
address StubGenerator::generate_disjoint_short_copy(address *entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jshort_disjoint_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jshort_disjoint_arraycopy_avx3", 1,
|
||||
aligned, false, false);
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
|
||||
|
||||
@@ -1607,9 +1662,41 @@ __ BIND(L_exit);
|
||||
}
|
||||
|
||||
|
||||
address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name) {
|
||||
address StubGenerator::generate_fill(StubGenStubId stub_id) {
|
||||
BasicType t;
|
||||
bool aligned;
|
||||
|
||||
switch (stub_id) {
|
||||
case jbyte_fill_id:
|
||||
t = T_BYTE;
|
||||
aligned = false;
|
||||
break;
|
||||
case jshort_fill_id:
|
||||
t = T_SHORT;
|
||||
aligned = false;
|
||||
break;
|
||||
case jint_fill_id:
|
||||
t = T_INT;
|
||||
aligned = false;
|
||||
break;
|
||||
case arrayof_jbyte_fill_id:
|
||||
t = T_BYTE;
|
||||
aligned = true;
|
||||
break;
|
||||
case arrayof_jshort_fill_id:
|
||||
t = T_SHORT;
|
||||
aligned = true;
|
||||
break;
|
||||
case arrayof_jint_fill_id:
|
||||
t = T_INT;
|
||||
aligned = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
BLOCK_COMMENT("Entry:");
|
||||
@@ -1636,9 +1723,8 @@ address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// name - stub name string
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1650,16 +1736,18 @@ address StubGenerator::generate_fill(BasicType t, bool aligned, const char *name
|
||||
// or qwords that span cache line boundaries will still be loaded
|
||||
// and stored atomically.
|
||||
//
|
||||
address StubGenerator::generate_conjoint_short_copy(bool aligned, address nooverlap_target,
|
||||
address *entry, const char *name) {
|
||||
address StubGenerator::generate_conjoint_short_copy(address nooverlap_target, address *entry) {
|
||||
StubGenStubId stub_id = StubGenStubId::jshort_arraycopy_id;
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if (VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jshort_conjoint_arraycopy_avx3", 1,
|
||||
nooverlap_target, aligned, false, false);
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
DecoratorSet decorators = IN_HEAP | IS_ARRAY;
|
||||
|
||||
@@ -1738,10 +1826,9 @@ address StubGenerator::generate_conjoint_short_copy(bool aligned, address noover
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
// stub_id - unqiue id for stub to generate
|
||||
// entry - location for return of (post-push) entry
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1756,18 +1843,39 @@ address StubGenerator::generate_conjoint_short_copy(bool aligned, address noover
|
||||
// disjoint_int_copy_entry is set to the no-overlap entry point
|
||||
// used by generate_conjoint_int_oop_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry,
|
||||
const char *name, bool dest_uninitialized) {
|
||||
address StubGenerator::generate_disjoint_int_oop_copy(StubGenStubId stub_id, address* entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jint_disjoint_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_uninit_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jint_disjoint_arraycopy_avx3", 2,
|
||||
aligned, is_oop, dest_uninitialized);
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_copy_4_bytes, L_exit;
|
||||
@@ -1853,10 +1961,9 @@ __ BIND(L_exit);
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord == 8-byte boundary
|
||||
// ignored
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1867,18 +1974,39 @@ __ BIND(L_exit);
|
||||
// the hardware handle it. The two dwords within qwords that span
|
||||
// cache line boundaries will still be loaded and stored atomically.
|
||||
//
|
||||
address StubGenerator::generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
|
||||
address *entry, const char *name,
|
||||
bool dest_uninitialized) {
|
||||
address StubGenerator::generate_conjoint_int_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jint_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_uninit_id:
|
||||
assert(UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jint_conjoint_arraycopy_avx3", 2,
|
||||
nooverlap_target, aligned, is_oop, dest_uninitialized);
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_exit;
|
||||
@@ -1968,10 +2096,7 @@ __ BIND(L_exit);
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
|
||||
// ignored
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
// entry - location for return of (post-push) entry
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
@@ -1982,17 +2107,39 @@ __ BIND(L_exit);
|
||||
// disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the
|
||||
// no-overlap entry point used by generate_conjoint_long_oop_copy().
|
||||
//
|
||||
address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry,
|
||||
const char *name, bool dest_uninitialized) {
|
||||
address StubGenerator::generate_disjoint_long_oop_copy(StubGenStubId stub_id, address *entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jlong_disjoint_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_disjoint_arraycopy_uninit_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_disjoint_copy_avx3_masked(entry, "jlong_disjoint_arraycopy_avx3", 3,
|
||||
aligned, is_oop, dest_uninitialized);
|
||||
return generate_disjoint_copy_avx3_masked(stub_id, entry);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_exit;
|
||||
@@ -2084,28 +2231,48 @@ address StubGenerator::generate_disjoint_long_oop_copy(bool aligned, bool is_oop
|
||||
|
||||
|
||||
// Arguments:
|
||||
// aligned - true => Input and output aligned on a HeapWord boundary == 8 bytes
|
||||
// ignored
|
||||
// entry - location for return of (post-push) entry
|
||||
// nooverlap_target - entry to branch to if no overlap detected
|
||||
// is_oop - true => oop array, so generate store check code
|
||||
// name - stub name string
|
||||
//
|
||||
// Inputs:
|
||||
// c_rarg0 - source array address
|
||||
// c_rarg1 - destination array address
|
||||
// c_rarg2 - element count, treated as ssize_t, can be zero
|
||||
//
|
||||
address StubGenerator::generate_conjoint_long_oop_copy(bool aligned, bool is_oop, address nooverlap_target,
|
||||
address *entry, const char *name,
|
||||
bool dest_uninitialized) {
|
||||
address StubGenerator::generate_conjoint_long_oop_copy(StubGenStubId stub_id, address nooverlap_target, address *entry) {
|
||||
// aligned is always false -- x86_64 always uses the unaligned code
|
||||
const bool aligned = false;
|
||||
bool is_oop;
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::jlong_arraycopy_id:
|
||||
is_oop = false;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::oop_arraycopy_uninit_id:
|
||||
assert(!UseCompressedOops, "inconsistent oop copy size!");
|
||||
is_oop = true;
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
#if COMPILER2_OR_JVMCI
|
||||
if ((!is_oop || bs->supports_avx3_masked_arraycopy()) && VM_Version::supports_avx512vlbw() && VM_Version::supports_bmi2() && MaxVectorSize >= 32) {
|
||||
return generate_conjoint_copy_avx3_masked(entry, "jlong_conjoint_arraycopy_avx3", 3,
|
||||
nooverlap_target, aligned, is_oop, dest_uninitialized);
|
||||
return generate_conjoint_copy_avx3_masked(stub_id, entry, nooverlap_target);
|
||||
}
|
||||
#endif
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_copy_bytes, L_copy_8_bytes, L_exit;
|
||||
@@ -2224,7 +2391,19 @@ void StubGenerator::generate_type_check(Register sub_klass,
|
||||
// rax == 0 - success
|
||||
// rax == -1^K - failure, where K is partial transfer count
|
||||
//
|
||||
address StubGenerator::generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized) {
|
||||
address StubGenerator::generate_checkcast_copy(StubGenStubId stub_id, address *entry) {
|
||||
|
||||
bool dest_uninitialized;
|
||||
switch (stub_id) {
|
||||
case StubGenStubId::checkcast_arraycopy_id:
|
||||
dest_uninitialized = false;
|
||||
break;
|
||||
case StubGenStubId::checkcast_arraycopy_uninit_id:
|
||||
dest_uninitialized = true;
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
Label L_load_element, L_store_element, L_do_card_marks, L_done;
|
||||
|
||||
@@ -2254,7 +2433,7 @@ address StubGenerator::generate_checkcast_copy(const char *name, address *entry,
|
||||
// checked.
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
@@ -2430,8 +2609,7 @@ address StubGenerator::generate_checkcast_copy(const char *name, address *entry,
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to a long, int, short, or byte copy loop.
|
||||
//
|
||||
address StubGenerator::generate_unsafe_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address StubGenerator::generate_unsafe_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address long_copy_entry) {
|
||||
|
||||
Label L_long_aligned, L_int_aligned, L_short_aligned;
|
||||
@@ -2445,7 +2623,8 @@ address StubGenerator::generate_unsafe_copy(const char *name,
|
||||
const Register bits = rax; // test copy of low bits
|
||||
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::unsafe_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
@@ -2578,10 +2757,10 @@ static void do_setmemory_atomic_loop(USM_TYPE type, Register dest,
|
||||
// Examines the alignment of the operands and dispatches
|
||||
// to an int, short, or byte fill loop.
|
||||
//
|
||||
address StubGenerator::generate_unsafe_setmemory(const char *name,
|
||||
address unsafe_byte_fill) {
|
||||
address StubGenerator::generate_unsafe_setmemory(address unsafe_byte_fill) {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::unsafe_setmemory_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
@@ -2724,8 +2903,7 @@ void StubGenerator::arraycopy_range_checks(Register src, // source array oop
|
||||
// rax == 0 - success
|
||||
// rax == -1^K - failure, where K is partial transfer count
|
||||
//
|
||||
address StubGenerator::generate_generic_copy(const char *name,
|
||||
address byte_copy_entry, address short_copy_entry,
|
||||
address StubGenerator::generate_generic_copy(address byte_copy_entry, address short_copy_entry,
|
||||
address int_copy_entry, address oop_copy_entry,
|
||||
address long_copy_entry, address checkcast_copy_entry) {
|
||||
|
||||
@@ -2751,7 +2929,8 @@ address StubGenerator::generate_generic_copy(const char *name,
|
||||
if (advance < 0) advance += modulus;
|
||||
if (advance > 0) __ nop(advance);
|
||||
}
|
||||
StubCodeMark mark(this, "StubRoutines", name);
|
||||
StubGenStubId stub_id = StubGenStubId::generic_arraycopy_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
|
||||
// Short-hop target to L_failed. Makes for denser prologue code.
|
||||
__ BIND(L_failed_0);
|
||||
|
||||
@@ -112,7 +112,8 @@ void StubGenerator::generate_chacha_stubs() {
|
||||
/* The 2-block AVX/AVX2-enabled ChaCha20 block function implementation */
|
||||
address StubGenerator::generate_chacha20Block_avx() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "chacha20Block");
|
||||
StubGenStubId stub_id = StubGenStubId::chacha20Block_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_twoRounds;
|
||||
@@ -300,7 +301,8 @@ address StubGenerator::generate_chacha20Block_avx() {
|
||||
/* The 4-block AVX512-enabled ChaCha20 block function implementation */
|
||||
address StubGenerator::generate_chacha20Block_avx512() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "chacha20Block");
|
||||
StubGenStubId stub_id = StubGenStubId::chacha20Block_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_twoRounds;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved.
|
||||
* Intel Math Library (LIBM) Source Code
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@@ -173,7 +173,8 @@
|
||||
#define __ _masm->
|
||||
|
||||
address StubGenerator::generate_libmCos() {
|
||||
StubCodeMark mark(this, "StubRoutines", "libmCos");
|
||||
StubGenStubId stub_id = StubGenStubId::dcos_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_2TAG_PACKET_0_0_1, L_2TAG_PACKET_1_0_1, L_2TAG_PACKET_2_0_1, L_2TAG_PACKET_3_0_1;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2021, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2016, 2024, Intel Corporation. All rights reserved.
|
||||
* Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
|
||||
* Intel Math Library (LIBM) Source Code
|
||||
*
|
||||
@@ -165,7 +165,8 @@ ATTRIBUTE_ALIGNED(4) static const juint _INF[] =
|
||||
#define __ _masm->
|
||||
|
||||
address StubGenerator::generate_libmExp() {
|
||||
StubCodeMark mark(this, "StubRoutines", "libmExp");
|
||||
StubGenStubId stub_id = StubGenStubId::dexp_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Intel Corporation. All rights reserved.
|
||||
* Intel Math Library (LIBM) Source Code
|
||||
*
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@@ -73,7 +73,8 @@ ATTRIBUTE_ALIGNED(32) static const uint64_t CONST_e307[] = {
|
||||
|
||||
address StubGenerator::generate_libmFmod() {
|
||||
__ align(CodeEntryAlignment);
|
||||
StubCodeMark mark(this, "StubRoutines", "libmFmod");
|
||||
StubGenStubId stub_id = StubGenStubId::fmod_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
|
||||
@@ -82,7 +82,8 @@ void StubGenerator::generate_ghash_stubs() {
|
||||
address StubGenerator::generate_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
Label L_ghash_loop, L_exit;
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
const Register state = c_rarg0;
|
||||
@@ -218,7 +219,8 @@ address StubGenerator::generate_ghash_processBlocks() {
|
||||
address StubGenerator::generate_avx_ghash_processBlocks() {
|
||||
__ align(CodeEntryAlignment);
|
||||
|
||||
StubCodeMark mark(this, "StubRoutines", "ghash_processBlocks");
|
||||
StubGenStubId stub_id = StubGenStubId::ghash_processBlocks_id;
|
||||
StubCodeMark mark(this, stub_id);
|
||||
address start = __ pc();
|
||||
|
||||
// arguments
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user