mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-06 09:29:38 +01:00
Compare commits
107 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe9f05023e | ||
|
|
6d9a50ea14 | ||
|
|
9cc260d3a1 | ||
|
|
d8dc7118fd | ||
|
|
9a4d4ab101 | ||
|
|
ac7a3c00bb | ||
|
|
bff5e90359 | ||
|
|
d2927cfa24 | ||
|
|
ee4d54c500 | ||
|
|
306e364ffd | ||
|
|
4338cb3d70 | ||
|
|
b7f38fc54f | ||
|
|
57bc96e5cd | ||
|
|
23301d5776 | ||
|
|
5a77c29032 | ||
|
|
79c3d47cfe | ||
|
|
73c77d9624 | ||
|
|
b178894423 | ||
|
|
9c7cab5404 | ||
|
|
b5ed8cca77 | ||
|
|
f9f7a27ffa | ||
|
|
60c68a1363 | ||
|
|
78150ca9df | ||
|
|
887a93b7c9 | ||
|
|
eb2c4b0b83 | ||
|
|
b2cc1890ff | ||
|
|
f1802d5300 | ||
|
|
b40b18823b | ||
|
|
c1ea6daa5b | ||
|
|
b9a535b8ac | ||
|
|
c7f1c97312 | ||
|
|
7fc13bb537 | ||
|
|
175184cb18 | ||
|
|
dfea48b7f5 | ||
|
|
83f0f841ee | ||
|
|
7586d8e43f | ||
|
|
e5cb83cd74 | ||
|
|
247a4360f6 | ||
|
|
4799c8d40c | ||
|
|
bb43aae61f | ||
|
|
4034787ccb | ||
|
|
a91569dd20 | ||
|
|
9257505059 | ||
|
|
628e31b8c1 | ||
|
|
3017281956 | ||
|
|
8db5d865c1 | ||
|
|
ba96094128 | ||
|
|
01f780fab2 | ||
|
|
b0920c24cd | ||
|
|
3909d74af5 | ||
|
|
d115295df8 | ||
|
|
71a05bf03f | ||
|
|
d3f18d0469 | ||
|
|
07a8911ce8 | ||
|
|
749f749f04 | ||
|
|
6022e73d73 | ||
|
|
46f1df38a0 | ||
|
|
3984a00ea9 | ||
|
|
4ea14b2720 | ||
|
|
3daa936f2d | ||
|
|
db3427582d | ||
|
|
5308410724 | ||
|
|
6951987b65 | ||
|
|
34222839e8 | ||
|
|
dfe52be4ef | ||
|
|
865cf888ef | ||
|
|
71cc879bd4 | ||
|
|
46b1b1ae8d | ||
|
|
da24559051 | ||
|
|
33f07b56ef | ||
|
|
28db238d52 | ||
|
|
6b79e792ac | ||
|
|
a8df559763 | ||
|
|
acc4829ec3 | ||
|
|
e5f3366efa | ||
|
|
29ed3878cc | ||
|
|
2110300246 | ||
|
|
f27efd6a29 | ||
|
|
0442d772b0 | ||
|
|
6aace1819c | ||
|
|
28279ee615 | ||
|
|
01cb043b29 | ||
|
|
bb0e203d13 | ||
|
|
b121931959 | ||
|
|
a6e35650f9 | ||
|
|
a72afb3845 | ||
|
|
b8c88a3e91 | ||
|
|
ea6d79ff94 | ||
|
|
c249229b3c | ||
|
|
fb3cc98da3 | ||
|
|
a05f3d10cc | ||
|
|
3ae97bc502 | ||
|
|
049be572ef | ||
|
|
57d97b52c9 | ||
|
|
6b46c776e4 | ||
|
|
d7b592ab21 | ||
|
|
d62249a3ab | ||
|
|
41b7296f49 | ||
|
|
a55e18baf0 | ||
|
|
ae77bd009f | ||
|
|
9f0469b94a | ||
|
|
379a8bbb5d | ||
|
|
e88a022072 | ||
|
|
cbec97c945 | ||
|
|
e5684da6e5 | ||
|
|
508d04f71f | ||
|
|
7de0fb3613 |
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -389,6 +389,10 @@ AC_DEFUN_ONCE([TOOLCHAIN_POST_DETECTION],
|
||||
# This is necessary since AC_PROG_CC defaults CFLAGS to "-g -O2"
|
||||
CFLAGS="$ORG_CFLAGS"
|
||||
CXXFLAGS="$ORG_CXXFLAGS"
|
||||
|
||||
# filter out some unwanted additions autoconf may add to CXX; we saw this on macOS with autoconf 2.72
|
||||
UTIL_GET_NON_MATCHING_VALUES(cxx_filtered, $CXX, -std=c++11 -std=gnu++11)
|
||||
CXX="$cxx_filtered"
|
||||
])
|
||||
|
||||
# Check if a compiler is of the toolchain type we expect, and save the version
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@@ -199,7 +199,7 @@ AC_DEFUN([UTIL_GET_NON_MATCHING_VALUES],
|
||||
if test -z "$legal_values"; then
|
||||
$1="$2"
|
||||
else
|
||||
result=`$GREP -Fvx "$legal_values" <<< "$values_to_check" | $GREP -v '^$'`
|
||||
result=`$GREP -Fvx -- "$legal_values" <<< "$values_to_check" | $GREP -v '^$'`
|
||||
$1=${result//$'\n'/ }
|
||||
fi
|
||||
])
|
||||
@@ -226,7 +226,7 @@ AC_DEFUN([UTIL_GET_MATCHING_VALUES],
|
||||
if test -z "$illegal_values"; then
|
||||
$1=""
|
||||
else
|
||||
result=`$GREP -Fx "$illegal_values" <<< "$values_to_check" | $GREP -v '^$'`
|
||||
result=`$GREP -Fx -- "$illegal_values" <<< "$values_to_check" | $GREP -v '^$'`
|
||||
$1=${result//$'\n'/ }
|
||||
fi
|
||||
])
|
||||
|
||||
@@ -39,4 +39,4 @@ DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||
DEFAULT_VERSION_DOCS_API_SINCE=11
|
||||
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="21 22"
|
||||
DEFAULT_JDK_SOURCE_TARGET_VERSION=22
|
||||
DEFAULT_PROMOTED_VERSION_PRE=ea
|
||||
DEFAULT_PROMOTED_VERSION_PRE=
|
||||
|
||||
@@ -223,6 +223,7 @@ JVM_VirtualThreadEnd
|
||||
JVM_VirtualThreadMount
|
||||
JVM_VirtualThreadUnmount
|
||||
JVM_VirtualThreadHideFrames
|
||||
JVM_VirtualThreadDisableSuspend
|
||||
|
||||
# Scoped values
|
||||
JVM_EnsureMaterializedForStackWalk_func
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -1289,25 +1289,58 @@ public class CLDRConverter {
|
||||
*/
|
||||
private static void generateTZDBShortNamesMap() throws IOException {
|
||||
Files.walk(Path.of(tzDataDir), 1, FileVisitOption.FOLLOW_LINKS)
|
||||
.filter(p -> p.toFile().isFile())
|
||||
.filter(p -> p.toFile().isFile() && !p.endsWith("jdk11_backward"))
|
||||
.forEach(p -> {
|
||||
try {
|
||||
String zone = null;
|
||||
String rule = null;
|
||||
String format = null;
|
||||
boolean inVanguard = false;
|
||||
boolean inRearguard = false;
|
||||
for (var line : Files.readAllLines(p)) {
|
||||
if (line.contains("#STDOFF")) continue;
|
||||
// Interpret the line in rearguard mode so that STD/DST
|
||||
// correctly handles negative DST cases, such as "GMT/IST"
|
||||
// vs. "IST/GMT" case for Europe/Dublin
|
||||
if (inVanguard) {
|
||||
if (line.startsWith("# Rearguard")) {
|
||||
inVanguard = false;
|
||||
inRearguard = true;
|
||||
}
|
||||
continue;
|
||||
} else if (line.startsWith("# Vanguard")) {
|
||||
inVanguard = true;
|
||||
continue;
|
||||
}
|
||||
if (inRearguard) {
|
||||
if (line.startsWith("# End of rearguard")) {
|
||||
inRearguard = false;
|
||||
continue;
|
||||
} else {
|
||||
if (line.startsWith("#\t")) {
|
||||
line = line.substring(1); // omit #
|
||||
}
|
||||
}
|
||||
}
|
||||
if (line.isBlank() || line.matches("^[ \t]*#.*")) {
|
||||
// ignore blank/comment lines
|
||||
continue;
|
||||
}
|
||||
// remove comments in-line
|
||||
line = line.replaceAll("[ \t]*#.*", "");
|
||||
|
||||
// Zone line
|
||||
if (line.startsWith("Zone")) {
|
||||
if (zone != null) {
|
||||
tzdbShortNamesMap.put(zone, format + NBSP + rule);
|
||||
}
|
||||
var zl = line.split("[ \t]+", -1);
|
||||
zone = zl[1];
|
||||
rule = zl[3];
|
||||
format = zl[4];
|
||||
} else {
|
||||
if (zone != null) {
|
||||
if (line.isBlank()) {
|
||||
if (line.startsWith("Rule") ||
|
||||
line.startsWith("Link")) {
|
||||
tzdbShortNamesMap.put(zone, format + NBSP + rule);
|
||||
zone = null;
|
||||
rule = null;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -32,6 +32,7 @@ import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.InputStream;
|
||||
import java.text.SimpleDateFormat;
|
||||
import java.util.Arrays;
|
||||
import java.util.Date;
|
||||
import java.util.HashMap;
|
||||
import java.util.Locale;
|
||||
@@ -339,9 +340,15 @@ public class GenerateCurrencyData {
|
||||
validCurrencyCodes.substring(i * 7 + 3, i * 7 + 6));
|
||||
checkCurrencyCode(currencyCode);
|
||||
int tableEntry = mainTable[(currencyCode.charAt(0) - 'A') * A_TO_Z + (currencyCode.charAt(1) - 'A')];
|
||||
if (tableEntry == INVALID_COUNTRY_ENTRY ||
|
||||
(tableEntry & SPECIAL_CASE_COUNTRY_MASK) != 0 ||
|
||||
(tableEntry & SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK) != (currencyCode.charAt(2) - 'A')) {
|
||||
|
||||
// Do not allow a future currency to be classified as an otherCurrency,
|
||||
// otherwise it will leak out into Currency:getAvailableCurrencies
|
||||
boolean futureCurrency = Arrays.asList(specialCaseNewCurrencies).contains(currencyCode);
|
||||
boolean simpleCurrency = (tableEntry & SIMPLE_CASE_COUNTRY_FINAL_CHAR_MASK) == (currencyCode.charAt(2) - 'A');
|
||||
|
||||
// If neither a simple currency, or one defined in the future
|
||||
// then the current currency is applicable to be added to the otherTable
|
||||
if (!futureCurrency && !simpleCurrency) {
|
||||
if (otherCurrenciesCount == maxOtherCurrencies) {
|
||||
throw new RuntimeException("too many other currencies");
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@@ -282,7 +282,8 @@ void LIR_Assembler::osr_entry() {
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
__ ldp(r19, r20, Address(OSR_buf, slot_offset));
|
||||
__ ldr(r19, Address(OSR_buf, slot_offset));
|
||||
__ ldr(r20, Address(OSR_buf, slot_offset + BytesPerWord));
|
||||
__ str(r19, frame_map()->address_for_monitor_lock(i));
|
||||
__ str(r20, frame_map()->address_for_monitor_object(i));
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2023, Red Hat, Inc. All rights reserved.
|
||||
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -58,7 +58,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
|
||||
0x7ffc, 0x7ffe, 0x7fff
|
||||
};
|
||||
static constexpr int num_immediates = sizeof(immediates) / sizeof(immediates[0]);
|
||||
const int start_index = aslr ? os::random() : 0;
|
||||
const int start_index = aslr ? os::next_random((int)os::javaTimeNanos()) : 0;
|
||||
constexpr int max_tries = 64;
|
||||
for (int ntry = 0; result == nullptr && ntry < max_tries; ntry ++) {
|
||||
// As in os::attempt_reserve_memory_between, we alternate between higher and lower
|
||||
|
||||
@@ -310,7 +310,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
uint int_args = 0;
|
||||
uint fp_args = 0;
|
||||
uint stk_args = 0; // inc by 2 each time
|
||||
uint stk_args = 0;
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
@@ -322,8 +322,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_VOID:
|
||||
@@ -340,6 +341,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@@ -348,8 +350,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@@ -357,6 +360,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@@ -367,7 +371,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
return align_up(stk_args, 2);
|
||||
return stk_args;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@@ -303,15 +303,19 @@ void InterpreterMacroAssembler::load_field_entry(Register cache, Register index,
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::load_method_entry(Register cache, Register index, int bcp_offset) {
|
||||
assert_different_registers(cache, index);
|
||||
|
||||
// Get index out of bytecode pointer
|
||||
get_index_at_bcp(index, bcp_offset, cache /* as tmp */, sizeof(u2));
|
||||
|
||||
// sizeof(ResolvedMethodEntry) is not a power of 2 on Arm, so can't use shift
|
||||
mov(cache, sizeof(ResolvedMethodEntry));
|
||||
mul(index, index, cache); // Scale the index to be the entry index * sizeof(ResolvedMethodEntry)
|
||||
|
||||
// load constant pool cache pointer
|
||||
ldr(cache, Address(FP, frame::interpreter_frame_cache_offset * wordSize));
|
||||
// Get address of method entries array
|
||||
ldr(cache, Address(cache, ConstantPoolCache::method_entries_offset()));
|
||||
ldr(cache, Address(cache, in_bytes(ConstantPoolCache::method_entries_offset())));
|
||||
add(cache, cache, Array<ResolvedMethodEntry>::base_offset_in_bytes());
|
||||
add(cache, cache, index);
|
||||
}
|
||||
|
||||
@@ -441,7 +441,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
if (slot & 1) slot++;
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
||||
@@ -370,17 +370,16 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
if (index_size == sizeof(u4)) {
|
||||
__ load_resolved_indy_entry(Rcache, Rindex);
|
||||
__ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
|
||||
__ check_stack_top();
|
||||
__ add(Rstack_top, Rstack_top, AsmOperand(Rcache, lsl, Interpreter::logStackElementSize));
|
||||
} else {
|
||||
// Pop N words from the stack
|
||||
assert(index_size == sizeof(u2), "Can only be u2");
|
||||
__ load_method_entry(Rcache, Rindex);
|
||||
__ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedIndyEntry::num_parameters_offset())));
|
||||
__ check_stack_top();
|
||||
__ add(Rstack_top, Rstack_top, AsmOperand(Rcache, lsl, Interpreter::logStackElementSize));
|
||||
__ ldrh(Rcache, Address(Rcache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
|
||||
}
|
||||
|
||||
__ check_stack_top();
|
||||
__ add(Rstack_top, Rstack_top, AsmOperand(Rcache, lsl, Interpreter::logStackElementSize));
|
||||
|
||||
__ convert_retval_to_tos(state);
|
||||
|
||||
__ check_and_handle_popframe();
|
||||
|
||||
@@ -3666,15 +3666,15 @@ void TemplateTable::prepare_invoke(Register Rcache, Register recv) {
|
||||
// load receiver if needed (after extra argument is pushed so parameter size is correct)
|
||||
if (load_receiver) {
|
||||
__ ldrh(recv, Address(Rcache, in_bytes(ResolvedMethodEntry::num_parameters_offset())));
|
||||
Address recv_addr = __ receiver_argument_address(Rstack_top, Rtemp, recv);
|
||||
__ ldr(recv, recv_addr);
|
||||
__ add(recv, Rstack_top, AsmOperand(recv, lsl, Interpreter::logStackElementSize));
|
||||
__ ldr(recv, Address(recv, -Interpreter::stackElementSize));
|
||||
__ verify_oop(recv);
|
||||
}
|
||||
|
||||
// load return address
|
||||
{ const address table = (address) Interpreter::invoke_return_entry_table_for(code);
|
||||
__ mov_slow(Rtemp, table);
|
||||
__ ldr(LR, Address::indexed_ptr(Rtemp, ret_type));
|
||||
__ mov_slow(LR, table);
|
||||
__ ldr(LR, Address::indexed_ptr(LR, ret_type));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3744,10 +3744,13 @@ void TemplateTable::invokevirtual(int byte_no) {
|
||||
void TemplateTable::invokespecial(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
|
||||
const Register Rrecv = R2_tmp;
|
||||
load_resolved_method_entry_special_or_static(R2_tmp, // ResolvedMethodEntry*
|
||||
const Register Rflags = R3_tmp;
|
||||
|
||||
load_resolved_method_entry_special_or_static(Rrecv, // ResolvedMethodEntry*
|
||||
Rmethod, // Method*
|
||||
R3_tmp); // Flags
|
||||
Rflags); // Flags
|
||||
prepare_invoke(Rrecv, Rrecv);
|
||||
__ verify_oop(Rrecv);
|
||||
__ null_check(Rrecv, Rtemp);
|
||||
@@ -3760,12 +3763,16 @@ void TemplateTable::invokespecial(int byte_no) {
|
||||
void TemplateTable::invokestatic(int byte_no) {
|
||||
transition(vtos, vtos);
|
||||
assert(byte_no == f1_byte, "use this argument");
|
||||
load_resolved_method_entry_special_or_static(R2_tmp, // ResolvedMethodEntry*
|
||||
|
||||
const Register Rrecv = R2_tmp;
|
||||
const Register Rflags = R3_tmp;
|
||||
|
||||
load_resolved_method_entry_special_or_static(Rrecv, // ResolvedMethodEntry*
|
||||
Rmethod, // Method*
|
||||
R3_tmp); // Flags
|
||||
prepare_invoke(R2_tmp, R2_tmp);
|
||||
Rflags); // Flags
|
||||
prepare_invoke(Rrecv, Rrecv);
|
||||
// do the call
|
||||
__ profile_call(R2_tmp);
|
||||
__ profile_call(Rrecv);
|
||||
__ jump_from_interpreted(Rmethod);
|
||||
}
|
||||
|
||||
@@ -3788,10 +3795,10 @@ void TemplateTable::invokeinterface(int byte_no) {
|
||||
const Register Rflags = R3_tmp;
|
||||
const Register Rklass = R2_tmp; // Note! Same register with Rrecv
|
||||
|
||||
load_resolved_method_entry_interface(R2_tmp, // ResolvedMethodEntry*
|
||||
R1_tmp, // Klass*
|
||||
load_resolved_method_entry_interface(Rrecv, // ResolvedMethodEntry*
|
||||
Rinterf, // Klass*
|
||||
Rmethod, // Method* or itable/vtable index
|
||||
R3_tmp); // Flags
|
||||
Rflags); // Flags
|
||||
prepare_invoke(Rrecv, Rrecv);
|
||||
|
||||
// First check for Object case, then private interface method,
|
||||
|
||||
@@ -734,7 +734,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
return align_up(stk, 2);
|
||||
return stk;
|
||||
}
|
||||
|
||||
#if defined(COMPILER1) || defined(COMPILER2)
|
||||
|
||||
@@ -2771,7 +2771,7 @@ void MacroAssembler::load_reserved(Register dst,
|
||||
break;
|
||||
case uint32:
|
||||
lr_w(dst, addr, acquire);
|
||||
zero_extend(t0, t0, 32);
|
||||
zero_extend(dst, dst, 32);
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
|
||||
@@ -266,7 +266,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
uint int_args = 0;
|
||||
uint fp_args = 0;
|
||||
uint stk_args = 0; // inc by 2 each time
|
||||
uint stk_args = 0;
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
@@ -278,8 +278,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_VOID:
|
||||
@@ -295,6 +296,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@@ -303,8 +305,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@@ -312,6 +315,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@@ -321,7 +325,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
return align_up(stk_args, 2);
|
||||
return stk_args;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@@ -755,7 +755,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
return align_up(stk, 2);
|
||||
return stk;
|
||||
}
|
||||
|
||||
int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
@@ -920,6 +920,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||
case 0x11: // movups
|
||||
case 0x12: // movlps
|
||||
case 0x28: // movaps
|
||||
case 0x29: // movaps
|
||||
case 0x2E: // ucomiss
|
||||
case 0x2F: // comiss
|
||||
case 0x54: // andps
|
||||
@@ -969,7 +970,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
|
||||
assert(which == call32_operand, "jcc has no disp32 or imm");
|
||||
return ip;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
fatal("not handled: 0x0F%2X", 0xFF & *(ip-1));
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
@@ -1089,7 +1089,8 @@ void C2_MacroAssembler::vminmax_fp(int opcode, BasicType elem_bt,
|
||||
assert(opcode == Op_MinV || opcode == Op_MinReductionV ||
|
||||
opcode == Op_MaxV || opcode == Op_MaxReductionV, "sanity");
|
||||
assert(elem_bt == T_FLOAT || elem_bt == T_DOUBLE, "sanity");
|
||||
assert_different_registers(a, b, tmp, atmp, btmp);
|
||||
assert_different_registers(a, tmp, atmp, btmp);
|
||||
assert_different_registers(b, tmp, atmp, btmp);
|
||||
|
||||
bool is_min = (opcode == Op_MinV || opcode == Op_MinReductionV);
|
||||
bool is_double_word = is_double_word_type(elem_bt);
|
||||
@@ -1176,7 +1177,8 @@ void C2_MacroAssembler::evminmax_fp(int opcode, BasicType elem_bt,
|
||||
assert(opcode == Op_MinV || opcode == Op_MinReductionV ||
|
||||
opcode == Op_MaxV || opcode == Op_MaxReductionV, "sanity");
|
||||
assert(elem_bt == T_FLOAT || elem_bt == T_DOUBLE, "sanity");
|
||||
assert_different_registers(dst, a, b, atmp, btmp);
|
||||
assert_different_registers(dst, a, atmp, btmp);
|
||||
assert_different_registers(dst, b, atmp, btmp);
|
||||
|
||||
bool is_min = (opcode == Op_MinV || opcode == Op_MinReductionV);
|
||||
bool is_double_word = is_double_word_type(elem_bt);
|
||||
|
||||
@@ -1871,92 +1871,6 @@ void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) {
|
||||
}
|
||||
#endif
|
||||
|
||||
void MacroAssembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
|
||||
if ((UseAVX > 0) && (dst != src)) {
|
||||
xorpd(dst, dst);
|
||||
}
|
||||
Assembler::cvtss2sd(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtss2sd(XMMRegister dst, Address src) {
|
||||
if (UseAVX > 0) {
|
||||
xorpd(dst, dst);
|
||||
}
|
||||
Assembler::cvtss2sd(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
|
||||
if ((UseAVX > 0) && (dst != src)) {
|
||||
xorps(dst, dst);
|
||||
}
|
||||
Assembler::cvtsd2ss(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsd2ss(XMMRegister dst, Address src) {
|
||||
if (UseAVX > 0) {
|
||||
xorps(dst, dst);
|
||||
}
|
||||
Assembler::cvtsd2ss(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2sdl(XMMRegister dst, Register src) {
|
||||
if (UseAVX > 0) {
|
||||
xorpd(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2sdl(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2sdl(XMMRegister dst, Address src) {
|
||||
if (UseAVX > 0) {
|
||||
xorpd(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2sdl(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2ssl(XMMRegister dst, Register src) {
|
||||
if (UseAVX > 0) {
|
||||
xorps(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2ssl(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2ssl(XMMRegister dst, Address src) {
|
||||
if (UseAVX > 0) {
|
||||
xorps(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2ssl(dst, src);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
void MacroAssembler::cvtsi2sdq(XMMRegister dst, Register src) {
|
||||
if (UseAVX > 0) {
|
||||
xorpd(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2sdq(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2sdq(XMMRegister dst, Address src) {
|
||||
if (UseAVX > 0) {
|
||||
xorpd(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2sdq(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2ssq(XMMRegister dst, Register src) {
|
||||
if (UseAVX > 0) {
|
||||
xorps(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2ssq(dst, src);
|
||||
}
|
||||
|
||||
void MacroAssembler::cvtsi2ssq(XMMRegister dst, Address src) {
|
||||
if (UseAVX > 0) {
|
||||
xorps(dst, dst);
|
||||
}
|
||||
Assembler::cvtsi2ssq(dst, src);
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) {
|
||||
assert(rscratch != noreg || always_reachable(adr), "missing");
|
||||
|
||||
|
||||
@@ -800,23 +800,6 @@ public:
|
||||
|
||||
void cmpxchgptr(Register reg, Address adr);
|
||||
|
||||
|
||||
// cvt instructions
|
||||
void cvtss2sd(XMMRegister dst, XMMRegister src);
|
||||
void cvtss2sd(XMMRegister dst, Address src);
|
||||
void cvtsd2ss(XMMRegister dst, XMMRegister src);
|
||||
void cvtsd2ss(XMMRegister dst, Address src);
|
||||
void cvtsi2sdl(XMMRegister dst, Register src);
|
||||
void cvtsi2sdl(XMMRegister dst, Address src);
|
||||
void cvtsi2ssl(XMMRegister dst, Register src);
|
||||
void cvtsi2ssl(XMMRegister dst, Address src);
|
||||
#ifdef _LP64
|
||||
void cvtsi2sdq(XMMRegister dst, Register src);
|
||||
void cvtsi2sdq(XMMRegister dst, Address src);
|
||||
void cvtsi2ssq(XMMRegister dst, Register src);
|
||||
void cvtsi2ssq(XMMRegister dst, Address src);
|
||||
#endif
|
||||
|
||||
void locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch = noreg);
|
||||
|
||||
void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
|
||||
|
||||
@@ -528,8 +528,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
// return value can be odd number of VMRegImpl stack slots make multiple of 2
|
||||
return align_up(stack, 2);
|
||||
return stack;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@@ -498,7 +498,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
|
||||
uint int_args = 0;
|
||||
uint fp_args = 0;
|
||||
uint stk_args = 0; // inc by 2 each time
|
||||
uint stk_args = 0;
|
||||
|
||||
for (int i = 0; i < total_args_passed; i++) {
|
||||
switch (sig_bt[i]) {
|
||||
@@ -510,8 +510,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set1(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_VOID:
|
||||
@@ -528,6 +529,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (int_args < Argument::n_int_register_parameters_j) {
|
||||
regs[i].set2(INT_ArgReg[int_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@@ -536,8 +538,9 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set1(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set1(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
stk_args += 1;
|
||||
}
|
||||
break;
|
||||
case T_DOUBLE:
|
||||
@@ -545,6 +548,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
if (fp_args < Argument::n_float_register_parameters_j) {
|
||||
regs[i].set2(FP_ArgReg[fp_args++]->as_VMReg());
|
||||
} else {
|
||||
stk_args = align_up(stk_args, 2);
|
||||
regs[i].set2(VMRegImpl::stack2reg(stk_args));
|
||||
stk_args += 2;
|
||||
}
|
||||
@@ -555,7 +559,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
|
||||
}
|
||||
}
|
||||
|
||||
return align_up(stk_args, 2);
|
||||
return stk_args;
|
||||
}
|
||||
|
||||
// Patch the callers callsite with entry to compiled code if it exists.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -2318,7 +2318,7 @@ address StubGenerator::generate_base64_decodeBlock() {
|
||||
const Register isURL = c_rarg5;// Base64 or URL character set
|
||||
__ movl(isMIME, Address(rbp, 2 * wordSize));
|
||||
#else
|
||||
const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
|
||||
const Address dp_mem(rbp, 6 * wordSize); // length is on stack on Win64
|
||||
const Address isURL_mem(rbp, 7 * wordSize);
|
||||
const Register isURL = r10; // pick the volatile windows register
|
||||
const Register dp = r12;
|
||||
@@ -2540,10 +2540,12 @@ address StubGenerator::generate_base64_decodeBlock() {
|
||||
// output_size in r13
|
||||
|
||||
// Strip pad characters, if any, and adjust length and mask
|
||||
__ addq(length, start_offset);
|
||||
__ cmpb(Address(source, length, Address::times_1, -1), '=');
|
||||
__ jcc(Assembler::equal, L_padding);
|
||||
|
||||
__ BIND(L_donePadding);
|
||||
__ subq(length, start_offset);
|
||||
|
||||
// Output size is (64 - output_size), output mask is (all 1s >> output_size).
|
||||
__ kmovql(input_mask, rax);
|
||||
|
||||
@@ -1130,6 +1130,7 @@ void VM_Version::get_processor_features() {
|
||||
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
// ChaCha20 Intrinsics
|
||||
// As long as the system supports AVX as a baseline we can do a
|
||||
// SIMD-enabled block function. StubGenerator makes the determination
|
||||
@@ -1145,6 +1146,13 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
|
||||
}
|
||||
#else
|
||||
// No support currently for ChaCha20 intrinsics on 32-bit platforms
|
||||
if (UseChaCha20Intrinsics) {
|
||||
warning("ChaCha20 intrinsics are not available on this CPU.");
|
||||
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
|
||||
}
|
||||
#endif // _LP64
|
||||
|
||||
// Base64 Intrinsics (Check the condition for which the intrinsic will be active)
|
||||
if (UseAVX >= 2) {
|
||||
|
||||
@@ -10095,7 +10095,7 @@ instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{
|
||||
instruct convF2D_reg_reg(regD dst, regF src)
|
||||
%{
|
||||
match(Set dst (ConvF2D src));
|
||||
effect(TEMP dst);
|
||||
|
||||
format %{ "cvtss2sd $dst, $src" %}
|
||||
ins_encode %{
|
||||
__ cvtss2sd ($dst$$XMMRegister, $src$$XMMRegister);
|
||||
@@ -10117,7 +10117,7 @@ instruct convF2D_reg_mem(regD dst, memory src)
|
||||
instruct convD2F_reg_reg(regF dst, regD src)
|
||||
%{
|
||||
match(Set dst (ConvD2F src));
|
||||
effect(TEMP dst);
|
||||
|
||||
format %{ "cvtsd2ss $dst, $src" %}
|
||||
ins_encode %{
|
||||
__ cvtsd2ss ($dst$$XMMRegister, $src$$XMMRegister);
|
||||
|
||||
@@ -72,7 +72,7 @@ CallingConvention* FrameMap::java_calling_convention(const BasicTypeArray* signa
|
||||
}
|
||||
}
|
||||
|
||||
intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
intptr_t out_preserve = align_up(SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs), 2);
|
||||
LIR_OprList* args = new LIR_OprList(signature->length());
|
||||
for (i = 0; i < sizeargs;) {
|
||||
BasicType t = sig_bt[i];
|
||||
|
||||
@@ -421,8 +421,11 @@ void RangeCheckEliminator::add_access_indexed_info(InstructionList &indices, int
|
||||
aii->_max = idx;
|
||||
aii->_list = new AccessIndexedList();
|
||||
} else if (idx >= aii->_min && idx <= aii->_max) {
|
||||
remove_range_check(ai);
|
||||
return;
|
||||
// Guard against underflow/overflow (see 'range_cond' check in RangeCheckEliminator::in_block_motion)
|
||||
if (aii->_max < 0 || (aii->_max + min_jint) <= aii->_min) {
|
||||
remove_range_check(ai);
|
||||
return;
|
||||
}
|
||||
}
|
||||
aii->_min = MIN2(aii->_min, idx);
|
||||
aii->_max = MAX2(aii->_max, idx);
|
||||
@@ -465,9 +468,9 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int last_integer = 0;
|
||||
jint last_integer = 0;
|
||||
Instruction *last_instruction = index;
|
||||
int base = 0;
|
||||
jint base = 0;
|
||||
ArithmeticOp *ao = index->as_ArithmeticOp();
|
||||
|
||||
while (ao != nullptr && (ao->x()->as_Constant() || ao->y()->as_Constant()) && (ao->op() == Bytecodes::_iadd || ao->op() == Bytecodes::_isub)) {
|
||||
@@ -479,12 +482,12 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
|
||||
}
|
||||
|
||||
if (c) {
|
||||
int value = c->type()->as_IntConstant()->value();
|
||||
jint value = c->type()->as_IntConstant()->value();
|
||||
if (value != min_jint) {
|
||||
if (ao->op() == Bytecodes::_isub) {
|
||||
value = -value;
|
||||
}
|
||||
base += value;
|
||||
base = java_add(base, value);
|
||||
last_integer = base;
|
||||
last_instruction = other;
|
||||
}
|
||||
@@ -506,12 +509,12 @@ void RangeCheckEliminator::in_block_motion(BlockBegin *block, AccessIndexedList
|
||||
assert(info != nullptr, "Info must not be null");
|
||||
|
||||
// if idx < 0, max > 0, max + idx may fall between 0 and
|
||||
// length-1 and if min < 0, min + idx may overflow and be >=
|
||||
// length-1 and if min < 0, min + idx may underflow/overflow and be >=
|
||||
// 0. The predicate wouldn't trigger but some accesses could
|
||||
// be with a negative index. This test guarantees that for the
|
||||
// min and max value that are kept the predicate can't let
|
||||
// some incorrect accesses happen.
|
||||
bool range_cond = (info->_max < 0 || info->_max + min_jint <= info->_min);
|
||||
bool range_cond = (info->_max < 0 || (info->_max + min_jint) <= info->_min);
|
||||
|
||||
// Generate code only if more than 2 range checks can be eliminated because of that.
|
||||
// 2 because at least 2 comparisons are done
|
||||
@@ -859,7 +862,7 @@ void RangeCheckEliminator::process_access_indexed(BlockBegin *loop_header, Block
|
||||
);
|
||||
|
||||
remove_range_check(ai);
|
||||
} else if (_optimistic && loop_header) {
|
||||
} else if (false && _optimistic && loop_header) {
|
||||
assert(ai->array(), "Array must not be null!");
|
||||
assert(ai->index(), "Index must not be null!");
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -275,9 +275,11 @@
|
||||
develop(bool, InstallMethods, true, \
|
||||
"Install methods at the end of successful compilations") \
|
||||
\
|
||||
/* The compiler assumes, in many places, that methods are at most 1MB. */ \
|
||||
/* Therefore, we restrict this flag to at most 1MB. */ \
|
||||
develop(intx, NMethodSizeLimit, (64*K)*wordSize, \
|
||||
"Maximum size of a compiled method.") \
|
||||
range(0, max_jint) \
|
||||
range(0, 1*M) \
|
||||
\
|
||||
develop(bool, TraceFPUStack, false, \
|
||||
"Trace emulation of the FPU stack (intel only)") \
|
||||
|
||||
@@ -52,7 +52,7 @@ public:
|
||||
static void initialize() NOT_CDS_RETURN;
|
||||
static void check_system_property(const char* key, const char* value) NOT_CDS_RETURN;
|
||||
static void check_unsupported_dumping_properties() NOT_CDS_RETURN;
|
||||
static bool check_vm_args_consistency(bool patch_mod_javabase, bool mode_flag_cmd_line) NOT_CDS_RETURN_(false);
|
||||
static bool check_vm_args_consistency(bool patch_mod_javabase, bool mode_flag_cmd_line) NOT_CDS_RETURN_(true);
|
||||
|
||||
// Basic CDS features
|
||||
static bool is_dumping_archive() { return is_dumping_static_archive() || is_dumping_dynamic_archive(); }
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -576,12 +576,14 @@ int FileMapInfo::get_module_shared_path_index(Symbol* location) {
|
||||
const char* file = ClassLoader::skip_uri_protocol(location->as_C_string());
|
||||
for (int i = ClassLoaderExt::app_module_paths_start_index(); i < get_number_of_shared_paths(); i++) {
|
||||
SharedClassPathEntry* ent = shared_path(i);
|
||||
assert(ent->in_named_module(), "must be");
|
||||
bool cond = strcmp(file, ent->name()) == 0;
|
||||
log_debug(class, path)("get_module_shared_path_index (%d) %s : %s = %s", i,
|
||||
location->as_C_string(), ent->name(), cond ? "same" : "different");
|
||||
if (cond) {
|
||||
return i;
|
||||
if (!ent->is_non_existent()) {
|
||||
assert(ent->in_named_module(), "must be");
|
||||
bool cond = strcmp(file, ent->name()) == 0;
|
||||
log_debug(class, path)("get_module_shared_path_index (%d) %s : %s = %s", i,
|
||||
location->as_C_string(), ent->name(), cond ? "same" : "different");
|
||||
if (cond) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -89,6 +89,7 @@ public:
|
||||
bool is_dir() const { return _type == dir_entry; }
|
||||
bool is_modules_image() const { return _type == modules_image_entry; }
|
||||
bool is_jar() const { return _type == jar_entry; }
|
||||
bool is_non_existent() const { return _type == non_existent_entry; }
|
||||
bool from_class_path_attr() { return _from_class_path_attr; }
|
||||
time_t timestamp() const { return _timestamp; }
|
||||
const char* name() const;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -387,7 +387,7 @@ bool LoaderConstraintTable::add_entry(Symbol* class_name,
|
||||
} else if (pp1 == nullptr) {
|
||||
pp2->extend_loader_constraint(class_name, loader1, klass);
|
||||
} else if (pp2 == nullptr) {
|
||||
pp1->extend_loader_constraint(class_name, loader1, klass);
|
||||
pp1->extend_loader_constraint(class_name, loader2, klass);
|
||||
} else {
|
||||
merge_loader_constraints(class_name, pp1, pp2, klass);
|
||||
}
|
||||
|
||||
@@ -1111,14 +1111,13 @@ InstanceKlass* SystemDictionary::load_shared_lambda_proxy_class(InstanceKlass* i
|
||||
if (loaded_ik != nullptr) {
|
||||
assert(shared_nest_host->is_same_class_package(ik),
|
||||
"lambda proxy class and its nest host must be in the same package");
|
||||
// The lambda proxy class and its nest host have the same class loader and class loader data,
|
||||
// as verified in SystemDictionaryShared::add_lambda_proxy_class()
|
||||
assert(shared_nest_host->class_loader() == class_loader(), "mismatched class loader");
|
||||
assert(shared_nest_host->class_loader_data() == class_loader_data(class_loader), "mismatched class loader data");
|
||||
ik->set_nest_host(shared_nest_host);
|
||||
}
|
||||
|
||||
// The lambda proxy class and its nest host have the same class loader and class loader data,
|
||||
// as verified in SystemDictionaryShared::add_lambda_proxy_class()
|
||||
assert(shared_nest_host->class_loader() == class_loader(), "mismatched class loader");
|
||||
assert(shared_nest_host->class_loader_data() == class_loader_data(class_loader), "mismatched class loader data");
|
||||
ik->set_nest_host(shared_nest_host);
|
||||
|
||||
return loaded_ik;
|
||||
}
|
||||
|
||||
|
||||
@@ -2257,11 +2257,12 @@ void ClassVerifier::verify_switch(
|
||||
"low must be less than or equal to high in tableswitch");
|
||||
return;
|
||||
}
|
||||
keys = high - low + 1;
|
||||
if (keys < 0) {
|
||||
int64_t keys64 = ((int64_t)high - low) + 1;
|
||||
if (keys64 > 65535) { // Max code length
|
||||
verify_error(ErrorContext::bad_code(bci), "too many keys in tableswitch");
|
||||
return;
|
||||
}
|
||||
keys = (int)keys64;
|
||||
delta = 1;
|
||||
} else {
|
||||
keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||
|
||||
@@ -597,6 +597,7 @@ class methodHandle;
|
||||
do_intrinsic(_notifyJvmtiVThreadMount, java_lang_VirtualThread, notifyJvmtiMount_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadUnmount, java_lang_VirtualThread, notifyJvmtiUnmount_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadHideFrames, java_lang_VirtualThread, notifyJvmtiHideFrames_name, bool_void_signature, F_RN) \
|
||||
do_intrinsic(_notifyJvmtiVThreadDisableSuspend, java_lang_VirtualThread, notifyJvmtiDisableSuspend_name, bool_void_signature, F_RN) \
|
||||
\
|
||||
/* support for UnsafeConstants */ \
|
||||
do_class(jdk_internal_misc_UnsafeConstants, "jdk/internal/misc/UnsafeConstants") \
|
||||
|
||||
@@ -421,6 +421,7 @@ class SerializeClosure;
|
||||
template(notifyJvmtiMount_name, "notifyJvmtiMount") \
|
||||
template(notifyJvmtiUnmount_name, "notifyJvmtiUnmount") \
|
||||
template(notifyJvmtiHideFrames_name, "notifyJvmtiHideFrames") \
|
||||
template(notifyJvmtiDisableSuspend_name, "notifyJvmtiDisableSuspend") \
|
||||
template(doYield_name, "doYield") \
|
||||
template(enter_name, "enter") \
|
||||
template(enterSpecial_name, "enterSpecial") \
|
||||
|
||||
@@ -3017,7 +3017,7 @@ void nmethod::print_nmethod_labels(outputStream* stream, address block_begin, bo
|
||||
assert(sig_index == sizeargs, "");
|
||||
}
|
||||
const char* spname = "sp"; // make arch-specific?
|
||||
intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
int stack_slot_offset = this->frame_size() * wordSize;
|
||||
int tab1 = 14, tab2 = 24;
|
||||
int sig_index = 0;
|
||||
|
||||
@@ -841,8 +841,15 @@ void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() {
|
||||
|
||||
|
||||
JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) {
|
||||
JavaThread* new_thread = nullptr;
|
||||
Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle));
|
||||
|
||||
if (java_lang_Thread::thread(thread_oop()) != nullptr) {
|
||||
assert(type == compiler_t, "should only happen with reused compiler threads");
|
||||
// The compiler thread hasn't actually exited yet so don't try to reuse it
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
JavaThread* new_thread = nullptr;
|
||||
switch (type) {
|
||||
case compiler_t:
|
||||
assert(comp != nullptr, "Compiler instance missing.");
|
||||
@@ -871,7 +878,6 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C
|
||||
// JavaThread due to lack of resources. We will handle that failure below.
|
||||
// Also check new_thread so that static analysis is happy.
|
||||
if (new_thread != nullptr && new_thread->osthread() != nullptr) {
|
||||
Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle));
|
||||
|
||||
if (type == compiler_t) {
|
||||
CompilerThread::cast(new_thread)->set_compiler(comp);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -403,7 +403,7 @@ class G1PrepareEvacuationTask : public WorkerTask {
|
||||
_g1h->register_region_with_region_attr(hr);
|
||||
}
|
||||
log_debug(gc, humongous)("Humongous region %u (object size %zu @ " PTR_FORMAT ") remset %zu code roots %zu "
|
||||
"marked %d pinned count %u reclaim candidate %d type array %d",
|
||||
"marked %d pinned count %zu reclaim candidate %d type array %d",
|
||||
index,
|
||||
cast_to_oop(hr->bottom())->size() * HeapWordSize,
|
||||
p2i(hr->bottom()),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -424,7 +424,7 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
st->print("|-");
|
||||
}
|
||||
}
|
||||
st->print("|%3u", Atomic::load(&_pinned_object_count));
|
||||
st->print("|%3zu", Atomic::load(&_pinned_object_count));
|
||||
st->print_cr("");
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -257,7 +257,7 @@ private:
|
||||
uint _node_index;
|
||||
|
||||
// Number of objects in this region that are currently pinned.
|
||||
volatile uint _pinned_object_count;
|
||||
volatile size_t _pinned_object_count;
|
||||
|
||||
void report_region_type_change(G1HeapRegionTraceType::Type to);
|
||||
|
||||
@@ -408,7 +408,7 @@ public:
|
||||
|
||||
bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
|
||||
|
||||
uint pinned_count() const { return Atomic::load(&_pinned_object_count); }
|
||||
size_t pinned_count() const { return Atomic::load(&_pinned_object_count); }
|
||||
bool has_pinned_objects() const { return pinned_count() > 0; }
|
||||
|
||||
void set_free();
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -554,11 +554,11 @@ inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
|
||||
}
|
||||
|
||||
inline void HeapRegion::increment_pinned_object_count() {
|
||||
Atomic::add(&_pinned_object_count, 1u, memory_order_relaxed);
|
||||
Atomic::add(&_pinned_object_count, (size_t)1, memory_order_relaxed);
|
||||
}
|
||||
|
||||
inline void HeapRegion::decrement_pinned_object_count() {
|
||||
Atomic::sub(&_pinned_object_count, 1u, memory_order_relaxed);
|
||||
Atomic::sub(&_pinned_object_count, (size_t)1, memory_order_relaxed);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -41,7 +41,7 @@
|
||||
nonstatic_field(HeapRegion, _bottom, HeapWord* const) \
|
||||
nonstatic_field(HeapRegion, _top, HeapWord* volatile) \
|
||||
nonstatic_field(HeapRegion, _end, HeapWord* const) \
|
||||
nonstatic_field(HeapRegion, _pinned_object_count, volatile uint) \
|
||||
volatile_nonstatic_field(HeapRegion, _pinned_object_count, size_t) \
|
||||
\
|
||||
nonstatic_field(HeapRegionType, _tag, HeapRegionType::Tag volatile) \
|
||||
\
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
WorkerTaskDispatcher::WorkerTaskDispatcher() :
|
||||
_task(nullptr),
|
||||
@@ -141,40 +142,44 @@ void WorkerThreads::threads_do(ThreadClosure* tc) const {
|
||||
}
|
||||
}
|
||||
|
||||
void WorkerThreads::set_indirectly_suspendible_threads() {
|
||||
template <typename Function>
|
||||
void WorkerThreads::threads_do_f(Function function) const {
|
||||
for (uint i = 0; i < _created_workers; i++) {
|
||||
function(_workers[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void WorkerThreads::set_indirect_states() {
|
||||
#ifdef ASSERT
|
||||
class SetIndirectlySuspendibleThreadClosure : public ThreadClosure {
|
||||
virtual void do_thread(Thread* thread) {
|
||||
const bool is_suspendible = Thread::current()->is_suspendible_thread();
|
||||
const bool is_safepointed = Thread::current()->is_VM_thread() && SafepointSynchronize::is_at_safepoint();
|
||||
|
||||
threads_do_f([&](Thread* thread) {
|
||||
assert(!thread->is_indirectly_suspendible_thread(), "Unexpected");
|
||||
assert(!thread->is_indirectly_safepoint_thread(), "Unexpected");
|
||||
if (is_suspendible) {
|
||||
thread->set_indirectly_suspendible_thread();
|
||||
}
|
||||
};
|
||||
|
||||
if (Thread::current()->is_suspendible_thread()) {
|
||||
SetIndirectlySuspendibleThreadClosure cl;
|
||||
threads_do(&cl);
|
||||
}
|
||||
if (is_safepointed) {
|
||||
thread->set_indirectly_safepoint_thread();
|
||||
}
|
||||
});
|
||||
#endif
|
||||
}
|
||||
|
||||
void WorkerThreads::clear_indirectly_suspendible_threads() {
|
||||
void WorkerThreads::clear_indirect_states() {
|
||||
#ifdef ASSERT
|
||||
class ClearIndirectlySuspendibleThreadClosure : public ThreadClosure {
|
||||
virtual void do_thread(Thread* thread) {
|
||||
thread->clear_indirectly_suspendible_thread();
|
||||
}
|
||||
};
|
||||
|
||||
if (Thread::current()->is_suspendible_thread()) {
|
||||
ClearIndirectlySuspendibleThreadClosure cl;
|
||||
threads_do(&cl);
|
||||
}
|
||||
threads_do_f([&](Thread* thread) {
|
||||
thread->clear_indirectly_suspendible_thread();
|
||||
thread->clear_indirectly_safepoint_thread();
|
||||
});
|
||||
#endif
|
||||
}
|
||||
|
||||
void WorkerThreads::run_task(WorkerTask* task) {
|
||||
set_indirectly_suspendible_threads();
|
||||
set_indirect_states();
|
||||
_dispatcher.coordinator_distribute_task(task, _active_workers);
|
||||
clear_indirectly_suspendible_threads();
|
||||
clear_indirect_states();
|
||||
}
|
||||
|
||||
void WorkerThreads::run_task(WorkerTask* task, uint num_workers) {
|
||||
|
||||
@@ -93,8 +93,8 @@ private:
|
||||
|
||||
WorkerThread* create_worker(uint name_suffix);
|
||||
|
||||
void set_indirectly_suspendible_threads();
|
||||
void clear_indirectly_suspendible_threads();
|
||||
void set_indirect_states();
|
||||
void clear_indirect_states();
|
||||
|
||||
protected:
|
||||
virtual void on_create_worker(WorkerThread* worker) {}
|
||||
@@ -111,6 +111,8 @@ public:
|
||||
uint set_active_workers(uint num_workers);
|
||||
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
template <typename Function>
|
||||
void threads_do_f(Function function) const;
|
||||
|
||||
const char* name() const { return _name; }
|
||||
|
||||
|
||||
@@ -26,14 +26,13 @@
|
||||
|
||||
#include "gc/z/zBarrier.hpp"
|
||||
|
||||
#include "code/codeCache.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGeneration.inline.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zResurrection.inline.hpp"
|
||||
#include "gc/z/zVerify.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/continuation.hpp"
|
||||
|
||||
// A self heal must always "upgrade" the address metadata bits in
|
||||
// accordance with the metadata bits state machine. The following
|
||||
@@ -320,17 +319,9 @@ inline zaddress ZBarrier::make_load_good_no_relocate(zpointer o) {
|
||||
return remap(ZPointer::uncolor_unsafe(o), remap_generation(o));
|
||||
}
|
||||
|
||||
inline void z_assert_is_barrier_safe() {
|
||||
assert(!Thread::current()->is_ConcurrentGC_thread() || /* Need extra checks for ConcurrentGCThreads */
|
||||
Thread::current()->is_suspendible_thread() || /* Thread prevents safepoints */
|
||||
Thread::current()->is_indirectly_suspendible_thread() || /* Coordinator thread prevents safepoints */
|
||||
SafepointSynchronize::is_at_safepoint(), /* Is at safepoint */
|
||||
"Shouldn't perform load barrier");
|
||||
}
|
||||
|
||||
template <typename ZBarrierSlowPath>
|
||||
inline zaddress ZBarrier::barrier(ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path, ZBarrierColor color, volatile zpointer* p, zpointer o, bool allow_null) {
|
||||
z_assert_is_barrier_safe();
|
||||
z_verify_safepoints_are_blocked();
|
||||
|
||||
// Fast path
|
||||
if (fast_path(o)) {
|
||||
|
||||
@@ -152,6 +152,19 @@ void ZBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
|
||||
deoptimize_allocation(thread);
|
||||
}
|
||||
|
||||
void ZBarrierSet::clone_obj_array(objArrayOop src_obj, objArrayOop dst_obj, size_t size) {
|
||||
volatile zpointer* src = (volatile zpointer*)src_obj->base();
|
||||
volatile zpointer* dst = (volatile zpointer*)dst_obj->base();
|
||||
|
||||
for (const zpointer* const end = cast_from_oop<const zpointer*>(src_obj) + size; src < end; src++, dst++) {
|
||||
zaddress elem = ZBarrier::load_barrier_on_oop_field(src);
|
||||
// We avoid healing here because the store below colors the pointer store good,
|
||||
// hence avoiding the cost of a CAS.
|
||||
ZBarrier::store_barrier_on_heap_oop_field(dst, false /* heal */);
|
||||
Atomic::store(dst, ZAddress::store_good(elem));
|
||||
}
|
||||
}
|
||||
|
||||
void ZBarrierSet::print_on(outputStream* st) const {
|
||||
st->print_cr("ZBarrierSet");
|
||||
}
|
||||
|
||||
@@ -39,6 +39,8 @@ public:
|
||||
static ZBarrierSetAssembler* assembler();
|
||||
static bool barrier_needed(DecoratorSet decorators, BasicType type);
|
||||
|
||||
static void clone_obj_array(objArrayOop src, objArrayOop dst, size_t size);
|
||||
|
||||
virtual void on_thread_create(Thread* thread);
|
||||
virtual void on_thread_destroy(Thread* thread);
|
||||
virtual void on_thread_attach(Thread* thread);
|
||||
|
||||
@@ -403,14 +403,13 @@ inline bool ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_i
|
||||
return oop_arraycopy_in_heap_no_check_cast(dst, src, length);
|
||||
}
|
||||
|
||||
class ZStoreBarrierOopClosure : public BasicOopIterateClosure {
|
||||
class ZColorStoreGoodOopClosure : public BasicOopIterateClosure {
|
||||
public:
|
||||
virtual void do_oop(oop* p_) {
|
||||
volatile zpointer* const p = (volatile zpointer*)p_;
|
||||
const zpointer ptr = ZBarrier::load_atomic(p);
|
||||
const zaddress addr = ZPointer::uncolor(ptr);
|
||||
ZBarrier::store_barrier_on_heap_oop_field(p, false /* heal */);
|
||||
*p = ZAddress::store_good(addr);
|
||||
Atomic::store(p, ZAddress::store_good(addr));
|
||||
}
|
||||
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
@@ -433,6 +432,17 @@ template <DecoratorSet decorators, typename BarrierSetT>
|
||||
inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(oop src, oop dst, size_t size) {
|
||||
assert_is_valid(to_zaddress(src));
|
||||
|
||||
if (dst->is_objArray()) {
|
||||
// Cloning an object array is similar to performing array copy.
|
||||
// If an array is large enough to have its allocation segmented,
|
||||
// this operation might require GC barriers. However, the intrinsics
|
||||
// for cloning arrays transform the clone to an optimized allocation
|
||||
// and arraycopy sequence, so the performance of this runtime call
|
||||
// does not matter for object arrays.
|
||||
clone_obj_array(objArrayOop(src), objArrayOop(dst), size);
|
||||
return;
|
||||
}
|
||||
|
||||
// Fix the oops
|
||||
ZLoadBarrierOopClosure cl;
|
||||
ZIterator::oop_iterate(src, &cl);
|
||||
@@ -440,10 +450,10 @@ inline void ZBarrierSet::AccessBarrier<decorators, BarrierSetT>::clone_in_heap(o
|
||||
// Clone the object
|
||||
Raw::clone_in_heap(src, dst, size);
|
||||
|
||||
assert(ZHeap::heap()->is_young(to_zaddress(dst)), "ZColorStoreGoodOopClosure is only valid for young objects");
|
||||
assert(dst->is_typeArray() || ZHeap::heap()->is_young(to_zaddress(dst)), "ZColorStoreGoodOopClosure is only valid for young objects");
|
||||
|
||||
// Color store good before handing out
|
||||
ZStoreBarrierOopClosure cl_sg;
|
||||
ZColorStoreGoodOopClosure cl_sg;
|
||||
ZIterator::oop_iterate(dst, &cl_sg);
|
||||
}
|
||||
|
||||
|
||||
@@ -286,6 +286,10 @@ void ZGeneration::desynchronize_relocation() {
|
||||
_relocate.desynchronize();
|
||||
}
|
||||
|
||||
bool ZGeneration::is_relocate_queue_active() const {
|
||||
return _relocate.is_queue_active();
|
||||
}
|
||||
|
||||
void ZGeneration::reset_statistics() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
_freed = 0;
|
||||
@@ -1496,7 +1500,7 @@ void ZGenerationOld::remap_young_roots() {
|
||||
uint remap_nworkers = clamp(ZGeneration::young()->workers()->active_workers() + prev_nworkers, 1u, ZOldGCThreads);
|
||||
_workers.set_active_workers(remap_nworkers);
|
||||
|
||||
// TODO: The STS joiner is only needed to satisfy z_assert_is_barrier_safe that doesn't
|
||||
// TODO: The STS joiner is only needed to satisfy ZBarrier::assert_is_state_barrier_safe that doesn't
|
||||
// understand the driver locker. Consider making the assert aware of the driver locker.
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
|
||||
|
||||
@@ -166,6 +166,7 @@ public:
|
||||
// Relocation
|
||||
void synchronize_relocation();
|
||||
void desynchronize_relocation();
|
||||
bool is_relocate_queue_active() const;
|
||||
zaddress relocate_or_remap_object(zaddress_unsafe addr);
|
||||
zaddress remap_object(zaddress_unsafe addr);
|
||||
|
||||
|
||||
@@ -26,11 +26,21 @@
|
||||
|
||||
#include "gc/z/zIterator.hpp"
|
||||
|
||||
#include "gc/z/zVerify.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
inline bool ZIterator::is_invisible_object(oop obj) {
|
||||
// This is a good place to make sure that we can't concurrently iterate over
|
||||
// objects while VMThread operations think they have exclusive access to the
|
||||
// object graph.
|
||||
//
|
||||
// One example that have caused problems is the JFR Leak Profiler, which
|
||||
// sets the mark word to a value that makes the object arrays look like
|
||||
// invisible objects.
|
||||
z_verify_safepoints_are_blocked();
|
||||
|
||||
return obj->mark_acquire().is_marked();
|
||||
}
|
||||
|
||||
|
||||
@@ -87,6 +87,7 @@ ZRelocateQueue::ZRelocateQueue()
|
||||
_nworkers(0),
|
||||
_nsynchronized(0),
|
||||
_synchronize(false),
|
||||
_is_active(false),
|
||||
_needs_attention(0) {}
|
||||
|
||||
bool ZRelocateQueue::needs_attention() const {
|
||||
@@ -103,6 +104,20 @@ void ZRelocateQueue::dec_needs_attention() {
|
||||
assert(needs_attention == 0 || needs_attention == 1, "Invalid state");
|
||||
}
|
||||
|
||||
void ZRelocateQueue::activate(uint nworkers) {
|
||||
_is_active = true;
|
||||
join(nworkers);
|
||||
}
|
||||
|
||||
void ZRelocateQueue::deactivate() {
|
||||
Atomic::store(&_is_active, false);
|
||||
clear();
|
||||
}
|
||||
|
||||
bool ZRelocateQueue::is_active() const {
|
||||
return Atomic::load(&_is_active);
|
||||
}
|
||||
|
||||
void ZRelocateQueue::join(uint nworkers) {
|
||||
assert(nworkers != 0, "Must request at least one worker");
|
||||
assert(_nworkers == 0, "Invalid state");
|
||||
@@ -327,7 +342,7 @@ ZWorkers* ZRelocate::workers() const {
|
||||
}
|
||||
|
||||
void ZRelocate::start() {
|
||||
_queue.join(workers()->active_workers());
|
||||
_queue.activate(workers()->active_workers());
|
||||
}
|
||||
|
||||
void ZRelocate::add_remset(volatile zpointer* p) {
|
||||
@@ -1088,6 +1103,9 @@ public:
|
||||
|
||||
~ZRelocateTask() {
|
||||
_generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count());
|
||||
|
||||
// Signal that we're not using the queue anymore. Used mostly for asserts.
|
||||
_queue->deactivate();
|
||||
}
|
||||
|
||||
virtual void work() {
|
||||
@@ -1232,8 +1250,6 @@ void ZRelocate::relocate(ZRelocationSet* relocation_set) {
|
||||
ZRelocateAddRemsetForFlipPromoted task(relocation_set->flip_promoted_pages());
|
||||
workers()->run(&task);
|
||||
}
|
||||
|
||||
_queue.clear();
|
||||
}
|
||||
|
||||
ZPageAge ZRelocate::compute_to_age(ZPageAge from_age) {
|
||||
@@ -1316,3 +1332,7 @@ void ZRelocate::desynchronize() {
|
||||
ZRelocateQueue* ZRelocate::queue() {
|
||||
return &_queue;
|
||||
}
|
||||
|
||||
bool ZRelocate::is_queue_active() const {
|
||||
return _queue.is_active();
|
||||
}
|
||||
|
||||
@@ -41,6 +41,7 @@ private:
|
||||
uint _nworkers;
|
||||
uint _nsynchronized;
|
||||
bool _synchronize;
|
||||
volatile bool _is_active;
|
||||
volatile int _needs_attention;
|
||||
|
||||
bool needs_attention() const;
|
||||
@@ -53,6 +54,10 @@ private:
|
||||
public:
|
||||
ZRelocateQueue();
|
||||
|
||||
void activate(uint nworkers);
|
||||
void deactivate();
|
||||
bool is_active() const;
|
||||
|
||||
void join(uint nworkers);
|
||||
void resize_workers(uint nworkers);
|
||||
void leave();
|
||||
@@ -99,6 +104,8 @@ public:
|
||||
void desynchronize();
|
||||
|
||||
ZRelocateQueue* queue();
|
||||
|
||||
bool is_queue_active() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZRELOCATE_HPP
|
||||
|
||||
@@ -106,11 +106,16 @@ public:
|
||||
}
|
||||
|
||||
virtual void work() {
|
||||
// Join the STS to block out VMThreads while running promote_barrier_on_young_oop_field
|
||||
SuspendibleThreadSetJoiner sts_joiner;
|
||||
|
||||
// Allocate and install forwardings for small pages
|
||||
for (size_t page_index; _small_iter.next_index(&page_index);) {
|
||||
ZPage* page = _small->at(int(page_index));
|
||||
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
|
||||
install_small(forwarding, _medium->length() + page_index);
|
||||
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
|
||||
// Allocate and install forwardings for medium pages
|
||||
@@ -118,6 +123,8 @@ public:
|
||||
ZPage* page = _medium->at(int(page_index));
|
||||
ZForwarding* const forwarding = ZForwarding::alloc(_allocator, page, to_age(page));
|
||||
install_medium(forwarding, page_index);
|
||||
|
||||
SuspendibleThreadSet::yield();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,11 +29,12 @@
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zBarrier.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
template <typename ObjectFunctionT>
|
||||
inline void ZUncoloredRoot::barrier(ObjectFunctionT function, zaddress_unsafe* p, uintptr_t color) {
|
||||
z_assert_is_barrier_safe();
|
||||
z_verify_safepoints_are_blocked();
|
||||
|
||||
const zaddress_unsafe addr = Atomic::load(p);
|
||||
assert_is_valid(addr);
|
||||
|
||||
@@ -43,16 +43,67 @@
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "runtime/stackFrameStream.inline.hpp"
|
||||
#include "runtime/stackWatermark.inline.hpp"
|
||||
#include "runtime/stackWatermarkSet.inline.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/preserveException.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
#ifdef ASSERT
|
||||
|
||||
// Used to verify that safepoints operations can't be scheduled concurrently
|
||||
// with callers to this function. Typically used to verify that object oops
|
||||
// and headers are safe to access.
|
||||
void z_verify_safepoints_are_blocked() {
|
||||
Thread* current = Thread::current();
|
||||
|
||||
if (current->is_ConcurrentGC_thread()) {
|
||||
assert(current->is_suspendible_thread(), // Thread prevents safepoints
|
||||
"Safepoints are not blocked by current thread");
|
||||
|
||||
} else if (current->is_Worker_thread()) {
|
||||
assert(// Check if ...
|
||||
// the thread prevents safepoints
|
||||
current->is_suspendible_thread() ||
|
||||
// the coordinator thread is the safepointing VMThread
|
||||
current->is_indirectly_safepoint_thread() ||
|
||||
// the coordinator thread prevents safepoints
|
||||
current->is_indirectly_suspendible_thread() ||
|
||||
// the RelocateQueue prevents safepoints
|
||||
//
|
||||
// RelocateQueue acts as a pseudo STS leaver/joiner and blocks
|
||||
// safepoints. There's currently no infrastructure to check if the
|
||||
// current thread is active or not, so check the global states instead.
|
||||
ZGeneration::young()->is_relocate_queue_active() ||
|
||||
ZGeneration::old()->is_relocate_queue_active(),
|
||||
"Safepoints are not blocked by current thread");
|
||||
|
||||
} else if (current->is_Java_thread()) {
|
||||
JavaThreadState state = JavaThread::cast(current)->thread_state();
|
||||
assert(state == _thread_in_Java || state == _thread_in_vm || state == _thread_new,
|
||||
"Safepoints are not blocked by current thread from state: %d", state);
|
||||
|
||||
} else if (current->is_JfrSampler_thread()) {
|
||||
// The JFR sampler thread blocks out safepoints with this lock.
|
||||
assert_lock_strong(Threads_lock);
|
||||
|
||||
} else if (current->is_VM_thread()) {
|
||||
// The VM Thread doesn't schedule new safepoints while executing
|
||||
// other safepoint or handshake operations.
|
||||
|
||||
} else {
|
||||
fatal("Unexpected thread type");
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define BAD_OOP_ARG(o, p) "Bad oop " PTR_FORMAT " found at " PTR_FORMAT, untype(o), p2i(p)
|
||||
|
||||
static bool z_is_null_relaxed(zpointer o) {
|
||||
|
||||
@@ -30,6 +30,8 @@ class frame;
|
||||
class ZForwarding;
|
||||
class ZPageAllocator;
|
||||
|
||||
NOT_DEBUG(inline) void z_verify_safepoints_are_blocked() NOT_DEBUG_RETURN;
|
||||
|
||||
class ZVerify : public AllStatic {
|
||||
private:
|
||||
static void roots_strong(bool verify_after_old_mark);
|
||||
|
||||
@@ -1154,6 +1154,9 @@ JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean hide);
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadHideFrames(JNIEnv* env, jobject vthread, jboolean hide);
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
JVM_VirtualThreadDisableSuspend(JNIEnv* env, jobject vthread, jboolean enter);
|
||||
|
||||
/*
|
||||
* Core reflection support.
|
||||
*/
|
||||
|
||||
@@ -385,13 +385,18 @@ int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end)
|
||||
if (end != nullptr && aligned_bcp + 3*jintSize >= end) {
|
||||
return -1; // don't read past end of code buffer
|
||||
}
|
||||
// Promote calculation to 64 bits to do range checks, used by the verifier.
|
||||
// Promote calculation to signed 64 bits to do range checks, used by the verifier.
|
||||
int64_t lo = (int)Bytes::get_Java_u4(aligned_bcp + 1*jintSize);
|
||||
int64_t hi = (int)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
|
||||
int64_t len = (aligned_bcp - bcp) + (3 + hi - lo + 1)*jintSize;
|
||||
// only return len if it can be represented as a positive int;
|
||||
// return -1 otherwise
|
||||
return (len > 0 && len == (int)len) ? (int)len : -1;
|
||||
// Only return len if it can be represented as a positive int and lo <= hi.
|
||||
// The caller checks for bytecode stream overflow.
|
||||
if (lo <= hi && len == (int)len) {
|
||||
assert(len > 0, "must be");
|
||||
return (int)len;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
case _lookupswitch: // fall through
|
||||
@@ -404,9 +409,13 @@ int Bytecodes::special_length_at(Bytecodes::Code code, address bcp, address end)
|
||||
// Promote calculation to 64 bits to do range checks, used by the verifier.
|
||||
int64_t npairs = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
|
||||
int64_t len = (aligned_bcp - bcp) + (2 + 2*npairs)*jintSize;
|
||||
// only return len if it can be represented as a positive int;
|
||||
// return -1 otherwise
|
||||
return (len > 0 && len == (int)len) ? (int)len : -1;
|
||||
// Only return len if it can be represented as a positive int and npairs >= 0.
|
||||
if (npairs >= 0 && len == (int)len) {
|
||||
assert(len > 0, "must be");
|
||||
return (int)len;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
default:
|
||||
// Note: Length functions must return <=0 for invalid bytecodes.
|
||||
|
||||
@@ -67,7 +67,9 @@ void Jfr::on_create_vm_3() {
|
||||
}
|
||||
|
||||
void Jfr::on_unloading_classes() {
|
||||
JfrCheckpointManager::on_unloading_classes();
|
||||
if (JfrRecorder::is_created() || JfrRecorder::is_started_on_commandline()) {
|
||||
JfrCheckpointManager::on_unloading_classes();
|
||||
}
|
||||
}
|
||||
|
||||
bool Jfr::is_excluded(Thread* t) {
|
||||
|
||||
@@ -117,16 +117,16 @@ bool JfrCheckpointManager::initialize_early() {
|
||||
assert(_thread_local_mspace == nullptr, "invariant");
|
||||
_thread_local_mspace = new JfrThreadLocalCheckpointMspace();
|
||||
if (_thread_local_mspace == nullptr || !_thread_local_mspace->initialize(thread_local_buffer_size,
|
||||
thread_local_buffer_prealloc_count,
|
||||
thread_local_buffer_prealloc_count)) {
|
||||
thread_local_buffer_prealloc_count,
|
||||
thread_local_buffer_prealloc_count)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(_virtual_thread_local_mspace == nullptr, "invariant");
|
||||
_virtual_thread_local_mspace = new JfrThreadLocalCheckpointMspace();
|
||||
if (_virtual_thread_local_mspace == nullptr || !_virtual_thread_local_mspace->initialize(virtual_thread_local_buffer_size,
|
||||
JFR_MSPACE_UNLIMITED_CACHE_SIZE,
|
||||
virtual_thread_local_buffer_prealloc_count)) {
|
||||
JFR_MSPACE_UNLIMITED_CACHE_SIZE,
|
||||
virtual_thread_local_buffer_prealloc_count)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -55,6 +55,7 @@ struct JfrCheckpointContext {
|
||||
|
||||
class JfrCheckpointWriter : public JfrCheckpointWriterBase {
|
||||
friend class JfrCheckpointManager;
|
||||
friend class JfrDeprecationManager;
|
||||
friend class JfrSerializerRegistration;
|
||||
friend class JfrTypeManager;
|
||||
private:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -32,7 +32,8 @@
|
||||
|
||||
JfrArtifactSet::JfrArtifactSet(bool class_unload) : _symbol_table(nullptr),
|
||||
_klass_list(nullptr),
|
||||
_total_count(0) {
|
||||
_total_count(0),
|
||||
_class_unload(class_unload) {
|
||||
initialize(class_unload);
|
||||
assert(_klass_list != nullptr, "invariant");
|
||||
}
|
||||
@@ -41,6 +42,7 @@ static const size_t initial_klass_list_size = 256;
|
||||
const int initial_klass_loader_set_size = 64;
|
||||
|
||||
void JfrArtifactSet::initialize(bool class_unload) {
|
||||
_class_unload = class_unload;
|
||||
if (_symbol_table == nullptr) {
|
||||
_symbol_table = JfrSymbolTable::create();
|
||||
assert(_symbol_table != nullptr, "invariant");
|
||||
@@ -51,6 +53,11 @@ void JfrArtifactSet::initialize(bool class_unload) {
|
||||
// resource allocation
|
||||
_klass_list = new GrowableArray<const Klass*>(initial_klass_list_size);
|
||||
_klass_loader_set = new GrowableArray<const Klass*>(initial_klass_loader_set_size);
|
||||
_klass_loader_leakp_set = new GrowableArray<const Klass*>(initial_klass_loader_set_size);
|
||||
|
||||
if (class_unload) {
|
||||
_unloading_set = new GrowableArray<const Klass*>(initial_klass_list_size);
|
||||
}
|
||||
}
|
||||
|
||||
void JfrArtifactSet::clear() {
|
||||
@@ -97,10 +104,27 @@ int JfrArtifactSet::entries() const {
|
||||
return _klass_list->length();
|
||||
}
|
||||
|
||||
bool JfrArtifactSet::should_do_loader_klass(const Klass* k) {
|
||||
static inline bool not_in_set(GrowableArray<const Klass*>* set, const Klass* k) {
|
||||
assert(set != nullptr, "invariant");
|
||||
assert(k != nullptr, "invariant");
|
||||
return !JfrMutablePredicate<const Klass*, compare_klasses>::test(set, k);
|
||||
}
|
||||
|
||||
bool JfrArtifactSet::should_do_cld_klass(const Klass* k, bool leakp) {
|
||||
assert(k != nullptr, "invariant");
|
||||
assert(_klass_loader_set != nullptr, "invariant");
|
||||
return !JfrMutablePredicate<const Klass*, compare_klasses>::test(_klass_loader_set, k);
|
||||
assert(_klass_loader_leakp_set != nullptr, "invariant");
|
||||
return not_in_set(leakp ? _klass_loader_leakp_set : _klass_loader_set, k);
|
||||
}
|
||||
|
||||
bool JfrArtifactSet::should_do_unloading_artifact(const void* ptr) {
|
||||
assert(ptr != nullptr, "invariant");
|
||||
assert(_class_unload, "invariant");
|
||||
assert(_unloading_set != nullptr, "invariant");
|
||||
// The incoming pointers are of all kinds of different types.
|
||||
// However, we are only interested in set membership.
|
||||
// Treat them uniformly as const Klass* for simplicity and code reuse.
|
||||
return not_in_set(_unloading_set, static_cast<const Klass*>(ptr));
|
||||
}
|
||||
|
||||
void JfrArtifactSet::register_klass(const Klass* k) {
|
||||
|
||||
@@ -114,28 +114,6 @@ class ClearArtifact<const Method*> {
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class SerializePredicate {
|
||||
bool _class_unload;
|
||||
public:
|
||||
SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
|
||||
bool operator()(T const& value) {
|
||||
assert(value != nullptr, "invariant");
|
||||
return _class_unload ? true : IS_NOT_SERIALIZED(value);
|
||||
}
|
||||
};
|
||||
|
||||
template <>
|
||||
class SerializePredicate<const Method*> {
|
||||
bool _class_unload;
|
||||
public:
|
||||
SerializePredicate(bool class_unload) : _class_unload(class_unload) {}
|
||||
bool operator()(const Method* method) {
|
||||
assert(method != nullptr, "invariant");
|
||||
return _class_unload ? true : METHOD_IS_NOT_SERIALIZED(method);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename T, bool leakp>
|
||||
class SymbolPredicate {
|
||||
bool _class_unload;
|
||||
@@ -150,11 +128,23 @@ class SymbolPredicate {
|
||||
}
|
||||
};
|
||||
|
||||
class KlassUsedPredicate {
|
||||
bool _current_epoch;
|
||||
public:
|
||||
KlassUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
|
||||
bool operator()(const Klass* klass) {
|
||||
return _current_epoch ? USED_THIS_EPOCH(klass) : USED_PREVIOUS_EPOCH(klass);
|
||||
}
|
||||
};
|
||||
|
||||
class MethodUsedPredicate {
|
||||
bool _current_epoch;
|
||||
public:
|
||||
MethodUsedPredicate(bool current_epoch) : _current_epoch(current_epoch) {}
|
||||
bool operator()(const Klass* klass) {
|
||||
if (!klass->is_instance_klass()) {
|
||||
return false;
|
||||
}
|
||||
return _current_epoch ? METHOD_USED_THIS_EPOCH(klass) : METHOD_USED_PREVIOUS_EPOCH(klass);
|
||||
}
|
||||
};
|
||||
@@ -210,7 +200,10 @@ class JfrArtifactSet : public JfrCHeapObj {
|
||||
JfrSymbolTable* _symbol_table;
|
||||
GrowableArray<const Klass*>* _klass_list;
|
||||
GrowableArray<const Klass*>* _klass_loader_set;
|
||||
GrowableArray<const Klass*>* _klass_loader_leakp_set;
|
||||
GrowableArray<const Klass*>* _unloading_set;
|
||||
size_t _total_count;
|
||||
bool _class_unload;
|
||||
|
||||
public:
|
||||
JfrArtifactSet(bool class_unload);
|
||||
@@ -235,14 +228,20 @@ class JfrArtifactSet : public JfrCHeapObj {
|
||||
int entries() const;
|
||||
size_t total_count() const;
|
||||
void register_klass(const Klass* k);
|
||||
bool should_do_loader_klass(const Klass* k);
|
||||
bool should_do_cld_klass(const Klass* k, bool leakp);
|
||||
bool should_do_unloading_artifact(const void* ptr);
|
||||
void increment_checkpoint_id();
|
||||
|
||||
template <typename Functor>
|
||||
void iterate_klasses(Functor& functor) const {
|
||||
for (int i = 0; i < _klass_list->length(); ++i) {
|
||||
if (!functor(_klass_list->at(i))) {
|
||||
break;
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < _klass_loader_set->length(); ++i) {
|
||||
if (!functor(_klass_loader_set->at(i))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdBits.inline.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdEpoch.hpp"
|
||||
#include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdMacros.hpp"
|
||||
#include "jfr/support/jfrKlassExtension.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
@@ -66,10 +67,14 @@ inline traceid set_used_and_get(const T* type) {
|
||||
return TRACE_ID(type);
|
||||
}
|
||||
|
||||
// We set the 'method_and_class' bits to have a consistent
|
||||
// bit pattern set always. This is because the tag is non-atomic,
|
||||
// hence, we always need the same bit pattern in an epoch to avoid losing information.
|
||||
inline void JfrTraceIdLoadBarrier::load_barrier(const Klass* klass) {
|
||||
SET_USED_THIS_EPOCH(klass);
|
||||
enqueue(klass);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
SET_METHOD_AND_CLASS_USED_THIS_EPOCH(klass);
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
enqueue(klass);
|
||||
JfrTraceIdEpoch::set_changed_tag_state();
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const Klass* klass) {
|
||||
@@ -113,26 +118,36 @@ inline traceid JfrTraceIdLoadBarrier::load_no_enqueue(const Klass* klass, const
|
||||
return (METHOD_ID(klass, method));
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const ModuleEntry* module) {
|
||||
return set_used_and_get(module);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const PackageEntry* package) {
|
||||
return set_used_and_get(package);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const ClassLoaderData* cld) {
|
||||
assert(cld != nullptr, "invariant");
|
||||
if (cld->has_class_mirror_holder()) {
|
||||
return 0;
|
||||
}
|
||||
const Klass* const class_loader_klass = cld->class_loader_klass();
|
||||
if (class_loader_klass != nullptr && should_tag(class_loader_klass)) {
|
||||
load_barrier(class_loader_klass);
|
||||
if (class_loader_klass != nullptr) {
|
||||
load(class_loader_klass);
|
||||
}
|
||||
return set_used_and_get(cld);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const ModuleEntry* module) {
|
||||
assert(module != nullptr, "invariant");
|
||||
const ClassLoaderData* cld = module->loader_data();
|
||||
if (cld != nullptr) {
|
||||
load(cld);
|
||||
}
|
||||
return set_used_and_get(module);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load(const PackageEntry* package) {
|
||||
assert(package != nullptr, "invariant");
|
||||
const ModuleEntry* const module_entry = package->module();
|
||||
if (module_entry != nullptr) {
|
||||
load(module_entry);
|
||||
}
|
||||
return set_used_and_get(package);
|
||||
}
|
||||
|
||||
inline traceid JfrTraceIdLoadBarrier::load_leakp(const Klass* klass, const Method* method) {
|
||||
assert(klass != nullptr, "invariant");
|
||||
assert(METHOD_AND_CLASS_USED_THIS_EPOCH(klass), "invariant");
|
||||
|
||||
@@ -86,9 +86,6 @@ bool JfrRecorder::create_oop_storages() {
|
||||
return ObjectSampler::create_oop_storage();
|
||||
}
|
||||
|
||||
// Subsystem
|
||||
static JfrCheckpointManager* _checkpoint_manager = nullptr;
|
||||
|
||||
bool JfrRecorder::on_create_vm_1() {
|
||||
if (!is_disabled()) {
|
||||
if (FlightRecorder || is_started_on_commandline()) {
|
||||
@@ -99,9 +96,10 @@ bool JfrRecorder::on_create_vm_1() {
|
||||
return false;
|
||||
}
|
||||
|
||||
_checkpoint_manager = JfrCheckpointManager::create();
|
||||
if (_checkpoint_manager == nullptr || !_checkpoint_manager->initialize_early()) {
|
||||
return false;
|
||||
if (is_started_on_commandline()) {
|
||||
if (!create_checkpoint_manager()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// fast time initialization
|
||||
@@ -292,7 +290,7 @@ bool JfrRecorder::create_components() {
|
||||
if (!create_storage()) {
|
||||
return false;
|
||||
}
|
||||
if (!create_checkpoint_manager()) {
|
||||
if (!initialize_checkpoint_manager()) {
|
||||
return false;
|
||||
}
|
||||
if (!create_stacktrace_repository()) {
|
||||
@@ -321,6 +319,7 @@ static JfrStackTraceRepository* _stack_trace_repository;
|
||||
static JfrStringPool* _stringpool = nullptr;
|
||||
static JfrOSInterface* _os_interface = nullptr;
|
||||
static JfrThreadSampling* _thread_sampling = nullptr;
|
||||
static JfrCheckpointManager* _checkpoint_manager = nullptr;
|
||||
|
||||
bool JfrRecorder::create_java_event_writer() {
|
||||
return JfrJavaEventWriter::initialize();
|
||||
@@ -357,6 +356,17 @@ bool JfrRecorder::create_storage() {
|
||||
}
|
||||
|
||||
bool JfrRecorder::create_checkpoint_manager() {
|
||||
assert(_checkpoint_manager == nullptr, "invariant");
|
||||
_checkpoint_manager = JfrCheckpointManager::create();
|
||||
return _checkpoint_manager != nullptr && _checkpoint_manager->initialize_early();
|
||||
}
|
||||
|
||||
bool JfrRecorder::initialize_checkpoint_manager() {
|
||||
if (_checkpoint_manager == nullptr) {
|
||||
if (!create_checkpoint_manager()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
assert(_checkpoint_manager != nullptr, "invariant");
|
||||
assert(_repository != nullptr, "invariant");
|
||||
return _checkpoint_manager->initialize(&_repository->chunkwriter());
|
||||
|
||||
@@ -42,6 +42,7 @@ class JfrRecorder : public JfrCHeapObj {
|
||||
static bool on_create_vm_2();
|
||||
static bool on_create_vm_3();
|
||||
static bool create_checkpoint_manager();
|
||||
static bool initialize_checkpoint_manager();
|
||||
static bool create_chunk_repository();
|
||||
static bool create_java_event_writer();
|
||||
static bool create_jvmti_agent();
|
||||
|
||||
@@ -626,8 +626,8 @@ inline bool ReinitializeAllReleaseRetiredOp<Mspace, FromList>::process(typename
|
||||
const bool retired = node->retired();
|
||||
node->reinitialize();
|
||||
assert(node->empty(), "invariant");
|
||||
assert(!node->retired(), "invariant");
|
||||
if (retired) {
|
||||
assert(!node->retired(), "invariant");
|
||||
_prev = _list.excise(_prev, node);
|
||||
node->release();
|
||||
mspace_release(node, _mspace);
|
||||
|
||||
@@ -371,8 +371,10 @@ void JfrDeprecationManager::write_edges(JfrChunkWriter& cw, Thread* thread, bool
|
||||
|
||||
void JfrDeprecationManager::on_type_set(JfrCheckpointWriter& writer, JfrChunkWriter* cw, Thread* thread) {
|
||||
assert(_pending_list.is_empty(), "invariant");
|
||||
if (writer.has_data() && _pending_head != nullptr) {
|
||||
if (_pending_head != nullptr) {
|
||||
save_type_set_blob(writer);
|
||||
} else {
|
||||
writer.cancel();
|
||||
}
|
||||
if (cw != nullptr) {
|
||||
write_edges(*cw, thread);
|
||||
|
||||
@@ -220,6 +220,7 @@
|
||||
nonstatic_field(JavaThread, _lock_stack, LockStack) \
|
||||
JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_VTMS_transition, bool)) \
|
||||
JVMTI_ONLY(nonstatic_field(JavaThread, _is_in_tmp_VTMS_transition, bool)) \
|
||||
JVMTI_ONLY(nonstatic_field(JavaThread, _is_disable_suspend, bool)) \
|
||||
\
|
||||
nonstatic_field(LockStack, _top, uint32_t) \
|
||||
\
|
||||
|
||||
@@ -395,7 +395,8 @@ public:
|
||||
void remove_unshareable_flags() NOT_CDS_RETURN;
|
||||
|
||||
// the number of argument reg slots that the compiled method uses on the stack.
|
||||
int num_stack_arg_slots() const { return constMethod()->num_stack_arg_slots(); }
|
||||
int num_stack_arg_slots(bool rounded = true) const {
|
||||
return rounded ? align_up(constMethod()->num_stack_arg_slots(), 2) : constMethod()->num_stack_arg_slots(); }
|
||||
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
|
||||
virtual MetaspaceObj::Type type() const { return MethodType; }
|
||||
|
||||
@@ -155,7 +155,7 @@ public:
|
||||
|
||||
inline void* gc_data() const;
|
||||
inline BitMapView bitmap() const;
|
||||
inline BitMap::idx_t bit_index_for(intptr_t* p) const;
|
||||
inline BitMap::idx_t bit_index_for(address p) const;
|
||||
inline intptr_t* address_for_bit(BitMap::idx_t index) const;
|
||||
template <typename OopT> inline BitMap::idx_t bit_index_for(OopT* p) const;
|
||||
template <typename OopT> inline OopT* address_for_bit(BitMap::idx_t index) const;
|
||||
|
||||
@@ -256,12 +256,13 @@ inline BitMapView stackChunkOopDesc::bitmap() const {
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(intptr_t* p) const {
|
||||
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const {
|
||||
return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
|
||||
}
|
||||
|
||||
template <typename OopT>
|
||||
inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
|
||||
assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
|
||||
assert(p >= (OopT*)start_address(), "Address not in chunk");
|
||||
return p - (OopT*)start_address();
|
||||
}
|
||||
|
||||
@@ -467,7 +467,7 @@
|
||||
develop(bool, TracePostallocExpand, false, "Trace expanding nodes after" \
|
||||
" register allocation.") \
|
||||
\
|
||||
product(bool, ReduceAllocationMerges, true, DIAGNOSTIC, \
|
||||
product(bool, ReduceAllocationMerges, false, DIAGNOSTIC, \
|
||||
"Try to simplify allocation merges before Scalar Replacement") \
|
||||
\
|
||||
notproduct(bool, TraceReduceAllocationMerges, false, \
|
||||
|
||||
@@ -822,6 +822,7 @@ bool C2Compiler::is_intrinsic_supported(vmIntrinsics::ID id) {
|
||||
case vmIntrinsics::_notifyJvmtiVThreadMount:
|
||||
case vmIntrinsics::_notifyJvmtiVThreadUnmount:
|
||||
case vmIntrinsics::_notifyJvmtiVThreadHideFrames:
|
||||
case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend:
|
||||
#endif
|
||||
break;
|
||||
|
||||
|
||||
@@ -4006,6 +4006,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
|
||||
if (n == nullptr) {
|
||||
continue;
|
||||
}
|
||||
} else if (n->is_CallLeaf()) {
|
||||
// Runtime calls with narrow memory input (no MergeMem node)
|
||||
// get the memory projection
|
||||
n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
|
||||
if (n == nullptr) {
|
||||
continue;
|
||||
}
|
||||
} else if (n->Opcode() == Op_StrCompressedCopy ||
|
||||
n->Opcode() == Op_EncodeISOArray) {
|
||||
// get the memory projection
|
||||
@@ -4048,7 +4055,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
|
||||
continue;
|
||||
}
|
||||
memnode_worklist.append_if_missing(use);
|
||||
} else if (use->is_MemBar()) {
|
||||
} else if (use->is_MemBar() || use->is_CallLeaf()) {
|
||||
if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
|
||||
memnode_worklist.append_if_missing(use);
|
||||
}
|
||||
|
||||
@@ -1872,6 +1872,46 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
// then we are guaranteed to fail, so just start interpreting there.
|
||||
// We 'expand' the top 3 range checks to include all post-dominating
|
||||
// checks.
|
||||
//
|
||||
// Example:
|
||||
// a[i+x] // (1) 1 < x < 6
|
||||
// a[i+3] // (2)
|
||||
// a[i+4] // (3)
|
||||
// a[i+6] // max = max of all constants
|
||||
// a[i+2]
|
||||
// a[i+1] // min = min of all constants
|
||||
//
|
||||
// If x < 3:
|
||||
// (1) a[i+x]: Leave unchanged
|
||||
// (2) a[i+3]: Replace with a[i+max] = a[i+6]: i+x < i+3 <= i+6 -> (2) is covered
|
||||
// (3) a[i+4]: Replace with a[i+min] = a[i+1]: i+1 < i+4 <= i+6 -> (3) and all following checks are covered
|
||||
// Remove all other a[i+c] checks
|
||||
//
|
||||
// If x >= 3:
|
||||
// (1) a[i+x]: Leave unchanged
|
||||
// (2) a[i+3]: Replace with a[i+min] = a[i+1]: i+1 < i+3 <= i+x -> (2) is covered
|
||||
// (3) a[i+4]: Replace with a[i+max] = a[i+6]: i+1 < i+4 <= i+6 -> (3) and all following checks are covered
|
||||
// Remove all other a[i+c] checks
|
||||
//
|
||||
// We only need the top 2 range checks if x is the min or max of all constants.
|
||||
//
|
||||
// This, however, only works if the interval [i+min,i+max] is not larger than max_int (i.e. abs(max - min) < max_int):
|
||||
// The theoretical max size of an array is max_int with:
|
||||
// - Valid index space: [0,max_int-1]
|
||||
// - Invalid index space: [max_int,-1] // max_int, min_int, min_int - 1 ..., -1
|
||||
//
|
||||
// The size of the consecutive valid index space is smaller than the size of the consecutive invalid index space.
|
||||
// If we choose min and max in such a way that:
|
||||
// - abs(max - min) < max_int
|
||||
// - i+max and i+min are inside the valid index space
|
||||
// then all indices [i+min,i+max] must be in the valid index space. Otherwise, the invalid index space must be
|
||||
// smaller than the valid index space which is never the case for any array size.
|
||||
//
|
||||
// Choosing a smaller array size only makes the valid index space smaller and the invalid index space larger and
|
||||
// the argument above still holds.
|
||||
//
|
||||
// Note that the same optimization with the same maximal accepted interval size can also be found in C1.
|
||||
const jlong maximum_number_of_min_max_interval_indices = (jlong)max_jint;
|
||||
|
||||
// The top 3 range checks seen
|
||||
const int NRC = 3;
|
||||
@@ -1906,13 +1946,18 @@ Node* RangeCheckNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
found_immediate_dominator = true;
|
||||
break;
|
||||
}
|
||||
// Gather expanded bounds
|
||||
off_lo = MIN2(off_lo,offset2);
|
||||
off_hi = MAX2(off_hi,offset2);
|
||||
// Record top NRC range checks
|
||||
prev_checks[nb_checks%NRC].ctl = prev_dom;
|
||||
prev_checks[nb_checks%NRC].off = offset2;
|
||||
nb_checks++;
|
||||
|
||||
// "x - y" -> must add one to the difference for number of elements in [x,y]
|
||||
const jlong diff = (jlong)MIN2(offset2, off_lo) - (jlong)MAX2(offset2, off_hi);
|
||||
if (ABS(diff) < maximum_number_of_min_max_interval_indices) {
|
||||
// Gather expanded bounds
|
||||
off_lo = MIN2(off_lo, offset2);
|
||||
off_hi = MAX2(off_hi, offset2);
|
||||
// Record top NRC range checks
|
||||
prev_checks[nb_checks % NRC].ctl = prev_dom;
|
||||
prev_checks[nb_checks % NRC].off = offset2;
|
||||
nb_checks++;
|
||||
}
|
||||
}
|
||||
}
|
||||
prev_dom = dom;
|
||||
|
||||
@@ -492,7 +492,8 @@ bool LibraryCallKit::try_to_inline(int predicate) {
|
||||
"notifyJvmtiMount", false, false);
|
||||
case vmIntrinsics::_notifyJvmtiVThreadUnmount: return inline_native_notify_jvmti_funcs(CAST_FROM_FN_PTR(address, OptoRuntime::notify_jvmti_vthread_unmount()),
|
||||
"notifyJvmtiUnmount", false, false);
|
||||
case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
|
||||
case vmIntrinsics::_notifyJvmtiVThreadHideFrames: return inline_native_notify_jvmti_hide();
|
||||
case vmIntrinsics::_notifyJvmtiVThreadDisableSuspend: return inline_native_notify_jvmti_sync();
|
||||
#endif
|
||||
|
||||
#ifdef JFR_HAVE_INTRINSICS
|
||||
@@ -2950,6 +2951,29 @@ bool LibraryCallKit::inline_native_notify_jvmti_hide() {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Always update the is_disable_suspend bit.
|
||||
bool LibraryCallKit::inline_native_notify_jvmti_sync() {
|
||||
if (!DoJVMTIVirtualThreadTransitions) {
|
||||
return true;
|
||||
}
|
||||
IdealKit ideal(this);
|
||||
|
||||
{
|
||||
// unconditionally update the is_disable_suspend bit in current JavaThread
|
||||
Node* thread = ideal.thread();
|
||||
Node* arg = _gvn.transform(argument(1)); // argument for notification
|
||||
Node* addr = basic_plus_adr(thread, in_bytes(JavaThread::is_disable_suspend_offset()));
|
||||
const TypePtr *addr_type = _gvn.type(addr)->isa_ptr();
|
||||
|
||||
sync_kit(ideal);
|
||||
access_store_at(nullptr, addr, addr_type, arg, _gvn.type(arg), T_BOOLEAN, IN_NATIVE | MO_UNORDERED);
|
||||
ideal.sync_kit(this);
|
||||
}
|
||||
final_sync(ideal);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
#ifdef JFR_HAVE_INTRINSICS
|
||||
|
||||
@@ -245,6 +245,7 @@ class LibraryCallKit : public GraphKit {
|
||||
#if INCLUDE_JVMTI
|
||||
bool inline_native_notify_jvmti_funcs(address funcAddr, const char* funcName, bool is_start, bool is_end);
|
||||
bool inline_native_notify_jvmti_hide();
|
||||
bool inline_native_notify_jvmti_sync();
|
||||
#endif
|
||||
|
||||
#ifdef JFR_HAVE_INTRINSICS
|
||||
|
||||
@@ -849,9 +849,10 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree* loop, Node* ctrl, int scal
|
||||
// Check if (scale * max_idx_expr) may overflow
|
||||
const TypeInt* scale_type = TypeInt::make(scale);
|
||||
MulINode* mul = new MulINode(max_idx_expr, con_scale);
|
||||
idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
|
||||
if (overflow || TypeInt::INT->higher_equal(idx_type)) {
|
||||
|
||||
if (overflow || MulINode::does_overflow(idx_type, scale_type)) {
|
||||
// May overflow
|
||||
idx_type = TypeInt::INT;
|
||||
mul->destruct(&_igvn);
|
||||
if (!overflow) {
|
||||
max_idx_expr = new ConvI2LNode(max_idx_expr);
|
||||
@@ -864,6 +865,7 @@ BoolNode* PhaseIdealLoop::rc_predicate(IdealLoopTree* loop, Node* ctrl, int scal
|
||||
} else {
|
||||
// No overflow possible
|
||||
max_idx_expr = mul;
|
||||
idx_type = (TypeInt*)mul->mul_ring(idx_type, scale_type);
|
||||
}
|
||||
register_new_node(max_idx_expr, ctrl);
|
||||
}
|
||||
|
||||
@@ -491,19 +491,19 @@ PhiNode* PhaseIdealLoop::loop_iv_phi(Node* xphi, Node* phi_incr, Node* x, IdealL
|
||||
return phi;
|
||||
}
|
||||
|
||||
static int check_stride_overflow(jlong stride_con, const TypeInteger* limit_t, BasicType bt) {
|
||||
if (stride_con > 0) {
|
||||
if (limit_t->lo_as_long() > (max_signed_integer(bt) - stride_con)) {
|
||||
static int check_stride_overflow(jlong final_correction, const TypeInteger* limit_t, BasicType bt) {
|
||||
if (final_correction > 0) {
|
||||
if (limit_t->lo_as_long() > (max_signed_integer(bt) - final_correction)) {
|
||||
return -1;
|
||||
}
|
||||
if (limit_t->hi_as_long() > (max_signed_integer(bt) - stride_con)) {
|
||||
if (limit_t->hi_as_long() > (max_signed_integer(bt) - final_correction)) {
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
if (limit_t->hi_as_long() < (min_signed_integer(bt) - stride_con)) {
|
||||
if (limit_t->hi_as_long() < (min_signed_integer(bt) - final_correction)) {
|
||||
return -1;
|
||||
}
|
||||
if (limit_t->lo_as_long() < (min_signed_integer(bt) - stride_con)) {
|
||||
if (limit_t->lo_as_long() < (min_signed_integer(bt) - final_correction)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@@ -1773,49 +1773,204 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
C->print_method(PHASE_BEFORE_CLOOPS, 3);
|
||||
|
||||
// ===================================================
|
||||
// Generate loop limit check to avoid integer overflow
|
||||
// in cases like next (cyclic loops):
|
||||
// We can only convert this loop to a counted loop if we can guarantee that the iv phi will never overflow at runtime.
|
||||
// This is an implicit assumption taken by some loop optimizations. We therefore must ensure this property at all cost.
|
||||
// At this point, we've already excluded some trivial cases where an overflow could have been proven statically.
|
||||
// But even though we cannot prove that an overflow will *not* happen, we still want to speculatively convert this loop
|
||||
// to a counted loop. This can be achieved by adding additional iv phi overflow checks before the loop. If they fail,
|
||||
// we trap and resume execution before the loop without having executed any iteration of the loop, yet.
|
||||
//
|
||||
// for (i=0; i <= max_jint; i++) {}
|
||||
// for (i=0; i < max_jint; i+=2) {}
|
||||
// These additional iv phi overflow checks can be inserted as Loop Limit Check Predicates above the Loop Limit Check
|
||||
// Parse Predicate which captures a JVM state just before the entry of the loop. If there is no such Parse Predicate,
|
||||
// we cannot generate a Loop Limit Check Predicate and thus cannot speculatively convert the loop to a counted loop.
|
||||
//
|
||||
// In the following, we only focus on int loops with stride > 0 to keep things simple. The argumentation and proof
|
||||
// for stride < 0 is analogously. For long loops, we would replace max_int with max_long.
|
||||
//
|
||||
//
|
||||
// Limit check predicate depends on the loop test:
|
||||
// The loop to be converted does not always need to have the often used shape:
|
||||
//
|
||||
// for(;i != limit; i++) --> limit <= (max_jint)
|
||||
// for(;i < limit; i+=stride) --> limit <= (max_jint - stride + 1)
|
||||
// for(;i <= limit; i+=stride) --> limit <= (max_jint - stride )
|
||||
// i = init
|
||||
// i = init loop:
|
||||
// do { ...
|
||||
// // ... equivalent i+=stride
|
||||
// i+=stride <==> if (i < limit)
|
||||
// } while (i < limit); goto loop
|
||||
// exit:
|
||||
// ...
|
||||
//
|
||||
// where the loop exit check uses the post-incremented iv phi and a '<'-operator.
|
||||
//
|
||||
// We could also have '<='-operator (or '>='-operator for negative strides) or use the pre-incremented iv phi value
|
||||
// in the loop exit check:
|
||||
//
|
||||
// i = init
|
||||
// loop:
|
||||
// ...
|
||||
// if (i <= limit)
|
||||
// i+=stride
|
||||
// goto loop
|
||||
// exit:
|
||||
// ...
|
||||
//
|
||||
// Let's define the following terms:
|
||||
// - iv_pre_i: The pre-incremented iv phi before the i-th iteration.
|
||||
// - iv_post_i: The post-incremented iv phi after the i-th iteration.
|
||||
//
|
||||
// The iv_pre_i and iv_post_i have the following relation:
|
||||
// iv_pre_i + stride = iv_post_i
|
||||
//
|
||||
// When converting a loop to a counted loop, we want to have a canonicalized loop exit check of the form:
|
||||
// iv_post_i < adjusted_limit
|
||||
//
|
||||
// If that is not the case, we need to canonicalize the loop exit check by using different values for adjusted_limit:
|
||||
// (LE1) iv_post_i < limit: Already canonicalized. We can directly use limit as adjusted_limit.
|
||||
// -> adjusted_limit = limit.
|
||||
// (LE2) iv_post_i <= limit:
|
||||
// iv_post_i < limit + 1
|
||||
// -> adjusted limit = limit + 1
|
||||
// (LE3) iv_pre_i < limit:
|
||||
// iv_pre_i + stride < limit + stride
|
||||
// iv_post_i < limit + stride
|
||||
// -> adjusted_limit = limit + stride
|
||||
// (LE4) iv_pre_i <= limit:
|
||||
// iv_pre_i < limit + 1
|
||||
// iv_pre_i + stride < limit + stride + 1
|
||||
// iv_post_i < limit + stride + 1
|
||||
// -> adjusted_limit = limit + stride + 1
|
||||
//
|
||||
// Note that:
|
||||
// (AL) limit <= adjusted_limit.
|
||||
//
|
||||
// The following loop invariant has to hold for counted loops with n iterations (i.e. loop exit check true after n-th
|
||||
// loop iteration) and a canonicalized loop exit check to guarantee that no iv_post_i over- or underflows:
|
||||
// (INV) For i = 1..n, min_int <= iv_post_i <= max_int
|
||||
//
|
||||
// To prove (INV), we require the following two conditions/assumptions:
|
||||
// (i): adjusted_limit - 1 + stride <= max_int
|
||||
// (ii): init < limit
|
||||
//
|
||||
// If we can prove (INV), we know that there can be no over- or underflow of any iv phi value. We prove (INV) by
|
||||
// induction by assuming (i) and (ii).
|
||||
//
|
||||
// Proof by Induction
|
||||
// ------------------
|
||||
// > Base case (i = 1): We show that (INV) holds after the first iteration:
|
||||
// min_int <= iv_post_1 = init + stride <= max_int
|
||||
// Proof:
|
||||
// First, we note that (ii) implies
|
||||
// (iii) init <= limit - 1
|
||||
// max_int >= adjusted_limit - 1 + stride [using (i)]
|
||||
// >= limit - 1 + stride [using (AL)]
|
||||
// >= init + stride [using (iii)]
|
||||
// >= min_int [using stride > 0, no underflow]
|
||||
// Thus, no overflow happens after the first iteration and (INV) holds for i = 1.
|
||||
//
|
||||
// Note that to prove the base case we need (i) and (ii).
|
||||
//
|
||||
// > Induction Hypothesis (i = j, j > 1): Assume that (INV) holds after the j-th iteration:
|
||||
// min_int <= iv_post_j <= max_int
|
||||
// > Step case (i = j + 1): We show that (INV) also holds after the j+1-th iteration:
|
||||
// min_int <= iv_post_{j+1} = iv_post_j + stride <= max_int
|
||||
// Proof:
|
||||
// If iv_post_j >= adjusted_limit:
|
||||
// We exit the loop after the j-th iteration, and we don't execute the j+1-th iteration anymore. Thus, there is
|
||||
// also no iv_{j+1}. Since (INV) holds for iv_j, there is nothing left to prove.
|
||||
// If iv_post_j < adjusted_limit:
|
||||
// First, we note that:
|
||||
// (iv) iv_post_j <= adjusted_limit - 1
|
||||
// max_int >= adjusted_limit - 1 + stride [using (i)]
|
||||
// >= iv_post_j + stride [using (iv)]
|
||||
// >= min_int [using stride > 0, no underflow]
|
||||
//
|
||||
// Note that to prove the step case we only need (i).
|
||||
//
|
||||
// Thus, by assuming (i) and (ii), we proved (INV).
|
||||
//
|
||||
//
|
||||
// It is therefore enough to add the following two Loop Limit Check Predicates to check assumptions (i) and (ii):
|
||||
//
|
||||
// (1) Loop Limit Check Predicate for (i):
|
||||
// Using (i): adjusted_limit - 1 + stride <= max_int
|
||||
//
|
||||
// This condition is now restated to use limit instead of adjusted_limit:
|
||||
//
|
||||
// To prevent an overflow of adjusted_limit -1 + stride itself, we rewrite this check to
|
||||
// max_int - stride + 1 >= adjusted_limit
|
||||
// We can merge the two constants into
|
||||
// canonicalized_correction = stride - 1
|
||||
// which gives us
|
||||
// max_int - canonicalized_correction >= adjusted_limit
|
||||
//
|
||||
// To directly use limit instead of adjusted_limit in the predicate condition, we split adjusted_limit into:
|
||||
// adjusted_limit = limit + limit_correction
|
||||
// Since stride > 0 and limit_correction <= stride + 1, we can restate this with no over- or underflow into:
|
||||
// max_int - canonicalized_correction - limit_correction >= limit
|
||||
// Since canonicalized_correction and limit_correction are both constants, we can replace them with a new constant:
|
||||
// final_correction = canonicalized_correction + limit_correction
|
||||
// which gives us:
|
||||
//
|
||||
// Final predicate condition:
|
||||
// max_int - final_correction >= limit
|
||||
//
|
||||
// (2) Loop Limit Check Predicate for (ii):
|
||||
// Using (ii): init < limit
|
||||
//
|
||||
// This Loop Limit Check Predicate is not required if we can prove at compile time that either:
|
||||
// (2.1) type(init) < type(limit)
|
||||
// In this case, we know:
|
||||
// all possible values of init < all possible values of limit
|
||||
// and we can skip the predicate.
|
||||
//
|
||||
// (2.2) init < limit is already checked before (i.e. found as a dominating check)
|
||||
// In this case, we do not need to re-check the condition and can skip the predicate.
|
||||
// This is often found for while- and for-loops which have the following shape:
|
||||
//
|
||||
// if (init < limit) { // Dominating test. Do not need the Loop Limit Check Predicate below.
|
||||
// i = init;
|
||||
// if (init >= limit) { trap(); } // Here we would insert the Loop Limit Check Predicate
|
||||
// do {
|
||||
// i += stride;
|
||||
// } while (i < limit);
|
||||
// }
|
||||
//
|
||||
// (2.3) init + stride <= max_int
|
||||
// In this case, there is no overflow of the iv phi after the first loop iteration.
|
||||
// In the proof of the base case above we showed that init + stride <= max_int by using assumption (ii):
|
||||
// init < limit
|
||||
// In the proof of the step case above, we did not need (ii) anymore. Therefore, if we already know at
|
||||
// compile time that init + stride <= max_int then we have trivially proven the base case and that
|
||||
// there is no overflow of the iv phi after the first iteration. In this case, we don't need to check (ii)
|
||||
// again and can skip the predicate.
|
||||
|
||||
// Check if limit is excluded to do more precise int overflow check.
|
||||
bool incl_limit = (bt == BoolTest::le || bt == BoolTest::ge);
|
||||
jlong stride_m = stride_con - (incl_limit ? 0 : (stride_con > 0 ? 1 : -1));
|
||||
|
||||
// If compare points directly to the phi we need to adjust
|
||||
// the compare so that it points to the incr. Limit have
|
||||
// to be adjusted to keep trip count the same and the
|
||||
// adjusted limit should be checked for int overflow.
|
||||
Node* adjusted_limit = limit;
|
||||
if (phi_incr != nullptr) {
|
||||
stride_m += stride_con;
|
||||
}
|
||||
// Accounting for (LE3) and (LE4) where we use pre-incremented phis in the loop exit check.
|
||||
const jlong limit_correction_for_pre_iv_exit_check = (phi_incr != nullptr) ? stride_con : 0;
|
||||
|
||||
Node *init_control = x->in(LoopNode::EntryControl);
|
||||
// Accounting for (LE2) and (LE4) where we use <= or >= in the loop exit check.
|
||||
const bool includes_limit = (bt == BoolTest::le || bt == BoolTest::ge);
|
||||
const jlong limit_correction_for_le_ge_exit_check = (includes_limit ? (stride_con > 0 ? 1 : -1) : 0);
|
||||
|
||||
const jlong limit_correction = limit_correction_for_pre_iv_exit_check + limit_correction_for_le_ge_exit_check;
|
||||
const jlong canonicalized_correction = stride_con + (stride_con > 0 ? -1 : 1);
|
||||
const jlong final_correction = canonicalized_correction + limit_correction;
|
||||
|
||||
int sov = check_stride_overflow(final_correction, limit_t, iv_bt);
|
||||
Node* init_control = x->in(LoopNode::EntryControl);
|
||||
|
||||
int sov = check_stride_overflow(stride_m, limit_t, iv_bt);
|
||||
// If sov==0, limit's type always satisfies the condition, for
|
||||
// example, when it is an array length.
|
||||
if (sov != 0) {
|
||||
if (sov < 0) {
|
||||
return false; // Bailout: integer overflow is certain.
|
||||
}
|
||||
// (1) Loop Limit Check Predicate is required because we could not statically prove that
|
||||
// limit + final_correction = adjusted_limit - 1 + stride <= max_int
|
||||
assert(!x->as_Loop()->is_loop_nest_inner_loop(), "loop was transformed");
|
||||
// Generate loop's limit check.
|
||||
// Loop limit check predicate should be near the loop.
|
||||
const Predicates predicates(init_control);
|
||||
const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
|
||||
if (!loop_limit_check_predicate_block->has_parse_predicate()) {
|
||||
// The limit check predicate is not generated if this method trapped here before.
|
||||
// The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
|
||||
#ifdef ASSERT
|
||||
if (TraceLoopLimitCheck) {
|
||||
tty->print("Missing Loop Limit Check Parse Predicate:");
|
||||
@@ -1835,67 +1990,81 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
Node* bol;
|
||||
|
||||
if (stride_con > 0) {
|
||||
cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - stride_m, iv_bt), iv_bt);
|
||||
cmp_limit = CmpNode::make(limit, _igvn.integercon(max_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
|
||||
bol = new BoolNode(cmp_limit, BoolTest::le);
|
||||
} else {
|
||||
cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - stride_m, iv_bt), iv_bt);
|
||||
cmp_limit = CmpNode::make(limit, _igvn.integercon(min_signed_integer(iv_bt) - final_correction, iv_bt), iv_bt);
|
||||
bol = new BoolNode(cmp_limit, BoolTest::ge);
|
||||
}
|
||||
|
||||
insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
|
||||
}
|
||||
|
||||
// Now we need to canonicalize loop condition.
|
||||
if (bt == BoolTest::ne) {
|
||||
assert(stride_con == 1 || stride_con == -1, "simple increment only");
|
||||
if (stride_con > 0 && init_t->hi_as_long() < limit_t->lo_as_long()) {
|
||||
// 'ne' can be replaced with 'lt' only when init < limit.
|
||||
bt = BoolTest::lt;
|
||||
} else if (stride_con < 0 && init_t->lo_as_long() > limit_t->hi_as_long()) {
|
||||
// 'ne' can be replaced with 'gt' only when init > limit.
|
||||
bt = BoolTest::gt;
|
||||
} else {
|
||||
const Predicates predicates(init_control);
|
||||
const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
|
||||
if (!loop_limit_check_predicate_block->has_parse_predicate()) {
|
||||
// The limit check predicate is not generated if this method trapped here before.
|
||||
// (2.3)
|
||||
const bool init_plus_stride_could_overflow =
|
||||
(stride_con > 0 && init_t->hi_as_long() > max_signed_integer(iv_bt) - stride_con) ||
|
||||
(stride_con < 0 && init_t->lo_as_long() < min_signed_integer(iv_bt) - stride_con);
|
||||
// (2.1)
|
||||
const bool init_gte_limit = (stride_con > 0 && init_t->hi_as_long() >= limit_t->lo_as_long()) ||
|
||||
(stride_con < 0 && init_t->lo_as_long() <= limit_t->hi_as_long());
|
||||
|
||||
if (init_gte_limit && // (2.1)
|
||||
((bt == BoolTest::ne || init_plus_stride_could_overflow) && // (2.3)
|
||||
!has_dominating_loop_limit_check(init_trip, limit, stride_con, iv_bt, init_control))) { // (2.2)
|
||||
// (2) Iteration Loop Limit Check Predicate is required because neither (2.1), (2.2), nor (2.3) holds.
|
||||
// We use the following condition:
|
||||
// - stride > 0: init < limit
|
||||
// - stride < 0: init > limit
|
||||
//
|
||||
// This predicate is always required if we have a non-equal-operator in the loop exit check (where stride = 1 is
|
||||
// a requirement). We transform the loop exit check by using a less-than-operator. By doing so, we must always
|
||||
// check that init < limit. Otherwise, we could have a different number of iterations at runtime.
|
||||
|
||||
const Predicates predicates(init_control);
|
||||
const PredicateBlock* loop_limit_check_predicate_block = predicates.loop_limit_check_predicate_block();
|
||||
if (!loop_limit_check_predicate_block->has_parse_predicate()) {
|
||||
// The Loop Limit Check Parse Predicate is not generated if this method trapped here before.
|
||||
#ifdef ASSERT
|
||||
if (TraceLoopLimitCheck) {
|
||||
tty->print("Missing Loop Limit Check Parse Predicate:");
|
||||
loop->dump_head();
|
||||
x->dump(1);
|
||||
}
|
||||
if (TraceLoopLimitCheck) {
|
||||
tty->print("Missing Loop Limit Check Parse Predicate:");
|
||||
loop->dump_head();
|
||||
x->dump(1);
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
|
||||
Node* parse_predicate_entry = loop_limit_check_parse_predicate->in(0);
|
||||
if (!is_dominator(get_ctrl(limit), parse_predicate_entry) ||
|
||||
!is_dominator(get_ctrl(init_trip), parse_predicate_entry)) {
|
||||
return false;
|
||||
}
|
||||
ParsePredicateNode* loop_limit_check_parse_predicate = loop_limit_check_predicate_block->parse_predicate();
|
||||
Node* parse_predicate_entry = loop_limit_check_parse_predicate->in(0);
|
||||
if (!is_dominator(get_ctrl(limit), parse_predicate_entry) ||
|
||||
!is_dominator(get_ctrl(init_trip), parse_predicate_entry)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Node* cmp_limit;
|
||||
Node* bol;
|
||||
Node* cmp_limit;
|
||||
Node* bol;
|
||||
|
||||
if (stride_con > 0) {
|
||||
cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
|
||||
bol = new BoolNode(cmp_limit, BoolTest::lt);
|
||||
} else {
|
||||
cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
|
||||
bol = new BoolNode(cmp_limit, BoolTest::gt);
|
||||
}
|
||||
if (stride_con > 0) {
|
||||
cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
|
||||
bol = new BoolNode(cmp_limit, BoolTest::lt);
|
||||
} else {
|
||||
cmp_limit = CmpNode::make(init_trip, limit, iv_bt);
|
||||
bol = new BoolNode(cmp_limit, BoolTest::gt);
|
||||
}
|
||||
|
||||
insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
|
||||
insert_loop_limit_check_predicate(init_control->as_IfTrue(), cmp_limit, bol);
|
||||
}
|
||||
|
||||
if (stride_con > 0) {
|
||||
// 'ne' can be replaced with 'lt' only when init < limit.
|
||||
bt = BoolTest::lt;
|
||||
} else if (stride_con < 0) {
|
||||
// 'ne' can be replaced with 'gt' only when init > limit.
|
||||
bt = BoolTest::gt;
|
||||
}
|
||||
if (bt == BoolTest::ne) {
|
||||
// Now we need to canonicalize the loop condition if it is 'ne'.
|
||||
assert(stride_con == 1 || stride_con == -1, "simple increment only - checked before");
|
||||
if (stride_con > 0) {
|
||||
// 'ne' can be replaced with 'lt' only when init < limit. This is ensured by the inserted predicate above.
|
||||
bt = BoolTest::lt;
|
||||
} else {
|
||||
assert(stride_con < 0, "must be");
|
||||
// 'ne' can be replaced with 'gt' only when init > limit. This is ensured by the inserted predicate above.
|
||||
bt = BoolTest::gt;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1940,6 +2109,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
}
|
||||
#endif
|
||||
|
||||
Node* adjusted_limit = limit;
|
||||
if (phi_incr != nullptr) {
|
||||
// If compare points directly to the phi we need to adjust
|
||||
// the compare so that it points to the incr. Limit have
|
||||
@@ -1953,7 +2123,7 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
adjusted_limit = gvn->transform(AddNode::make(limit, stride, iv_bt));
|
||||
}
|
||||
|
||||
if (incl_limit) {
|
||||
if (includes_limit) {
|
||||
// The limit check guaranties that 'limit <= (max_jint - stride)' so
|
||||
// we can convert 'i <= limit' to 'i < limit+1' since stride != 0.
|
||||
//
|
||||
@@ -2134,6 +2304,37 @@ bool PhaseIdealLoop::is_counted_loop(Node* x, IdealLoopTree*&loop, BasicType iv_
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if there is a dominating loop limit check of the form 'init < limit' starting at the loop entry.
|
||||
// If there is one, then we do not need to create an additional Loop Limit Check Predicate.
|
||||
bool PhaseIdealLoop::has_dominating_loop_limit_check(Node* init_trip, Node* limit, const jlong stride_con,
|
||||
const BasicType iv_bt, Node* loop_entry) {
|
||||
// Eagerly call transform() on the Cmp and Bool node to common them up if possible. This is required in order to
|
||||
// successfully find a dominated test with the If node below.
|
||||
Node* cmp_limit;
|
||||
Node* bol;
|
||||
if (stride_con > 0) {
|
||||
cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
|
||||
bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::lt));
|
||||
} else {
|
||||
cmp_limit = _igvn.transform(CmpNode::make(init_trip, limit, iv_bt));
|
||||
bol = _igvn.transform(new BoolNode(cmp_limit, BoolTest::gt));
|
||||
}
|
||||
|
||||
// Check if there is already a dominating init < limit check. If so, we do not need a Loop Limit Check Predicate.
|
||||
IfNode* iff = new IfNode(loop_entry, bol, PROB_MIN, COUNT_UNKNOWN);
|
||||
// Also add fake IfProj nodes in order to call transform() on the newly created IfNode.
|
||||
IfFalseNode* if_false = new IfFalseNode(iff);
|
||||
IfTrueNode* if_true = new IfTrueNode(iff);
|
||||
Node* dominated_iff = _igvn.transform(iff);
|
||||
// ConI node? Found dominating test (IfNode::dominated_by() returns a ConI node).
|
||||
const bool found_dominating_test = dominated_iff != nullptr && dominated_iff->is_ConI();
|
||||
|
||||
// Kill the If with its projections again in the next IGVN round by cutting it off from the graph.
|
||||
_igvn.replace_input_of(iff, 0, C->top());
|
||||
_igvn.replace_input_of(iff, 1, C->top());
|
||||
return found_dominating_test;
|
||||
}
|
||||
|
||||
//----------------------exact_limit-------------------------------------------
|
||||
Node* PhaseIdealLoop::exact_limit( IdealLoopTree *loop ) {
|
||||
assert(loop->_head->is_CountedLoop(), "");
|
||||
|
||||
@@ -1346,6 +1346,8 @@ public:
|
||||
void rewire_cloned_nodes_to_ctrl(const ProjNode* old_ctrl, Node* new_ctrl, const Node_List& nodes_with_same_ctrl,
|
||||
const Dict& old_new_mapping);
|
||||
void rewire_inputs_of_clones_to_clones(Node* new_ctrl, Node* clone, const Dict& old_new_mapping, const Node* next);
|
||||
bool has_dominating_loop_limit_check(Node* init_trip, Node* limit, jlong stride_con, BasicType iv_bt,
|
||||
Node* loop_entry);
|
||||
|
||||
public:
|
||||
void register_control(Node* n, IdealLoopTree *loop, Node* pred, bool update_body = true);
|
||||
|
||||
@@ -287,7 +287,11 @@ bool PhaseIdealLoop::cannot_split_division(const Node* n, const Node* region) co
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(n->in(0) == nullptr, "divisions with zero check should already have bailed out earlier in split-if");
|
||||
if (n->in(0) != nullptr) {
|
||||
// Cannot split through phi if Div or Mod node has a control dependency to a zero check.
|
||||
return true;
|
||||
}
|
||||
|
||||
Node* divisor = n->in(2);
|
||||
return is_divisor_counted_loop_phi(divisor, region) &&
|
||||
loop_phi_backedge_type_contains_zero(divisor, zero);
|
||||
|
||||
@@ -281,45 +281,86 @@ Node *MulINode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
return res; // Return final result
|
||||
}
|
||||
|
||||
// Classes to perform mul_ring() for MulI/MulLNode.
|
||||
// This template class performs type multiplication for MulI/MulLNode. NativeType is either jint or jlong.
|
||||
// In this class, the inputs of the MulNodes are named left and right with types [left_lo,left_hi] and [right_lo,right_hi].
|
||||
//
|
||||
// This class checks if all cross products of the left and right input of a multiplication have the same "overflow value".
|
||||
// Without overflow/underflow:
|
||||
// Product is positive? High signed multiplication result: 0
|
||||
// Product is negative? High signed multiplication result: -1
|
||||
// In general, the multiplication of two x-bit values could produce a result that consumes up to 2x bits if there is
|
||||
// enough space to hold them all. We can therefore distinguish the following two cases for the product:
|
||||
// - no overflow (i.e. product fits into x bits)
|
||||
// - overflow (i.e. product does not fit into x bits)
|
||||
//
|
||||
// We normalize these values (see normalize_overflow_value()) such that we get the same "overflow value" by adding 1 if
|
||||
// the product is negative. This allows us to compare all the cross product "overflow values". If one is different,
|
||||
// compared to the others, then we know that this multiplication has a different number of over- or underflows compared
|
||||
// to the others. In this case, we need to use bottom type and cannot guarantee a better type. Otherwise, we can take
|
||||
// the min und max of all computed cross products as type of this Mul node.
|
||||
template<typename IntegerType>
|
||||
class IntegerMulRing {
|
||||
using NativeType = std::conditional_t<std::is_same<TypeInt, IntegerType>::value, jint, jlong>;
|
||||
// When multiplying the two x-bit inputs 'left' and 'right' with their x-bit types [left_lo,left_hi] and [right_lo,right_hi]
|
||||
// we need to find the minimum and maximum of all possible products to define a new type. To do that, we compute the
|
||||
// cross product of [left_lo,left_hi] and [right_lo,right_hi] in 2x-bit space where no over- or underflow can happen.
|
||||
// The cross product consists of the following four multiplications with 2x-bit results:
|
||||
// (1) left_lo * right_lo
|
||||
// (2) left_lo * right_hi
|
||||
// (3) left_hi * right_lo
|
||||
// (4) left_hi * right_hi
|
||||
//
|
||||
// Let's define the following two functions:
|
||||
// - Lx(i): Returns the lower x bits of the 2x-bit number i.
|
||||
// - Ux(i): Returns the upper x bits of the 2x-bit number i.
|
||||
//
|
||||
// Let's first assume all products are positive where only overflows are possible but no underflows. If there is no
|
||||
// overflow for a product p, then the upper x bits of the 2x-bit result p are all zero:
|
||||
// Ux(p) = 0
|
||||
// Lx(p) = p
|
||||
//
|
||||
// If none of the multiplications (1)-(4) overflow, we can truncate the upper x bits and use the following result type
|
||||
// with x bits:
|
||||
// [result_lo,result_hi] = [MIN(Lx(1),Lx(2),Lx(3),Lx(4)),MAX(Lx(1),Lx(2),Lx(3),Lx(4))]
|
||||
//
|
||||
// If any of these multiplications overflows, we could pessimistically take the bottom type for the x bit result
|
||||
// (i.e. all values in the x-bit space could be possible):
|
||||
// [result_lo,result_hi] = [NativeType_min,NativeType_max]
|
||||
//
|
||||
// However, in case of any overflow, we can do better by analyzing the upper x bits of all multiplications (1)-(4) with
|
||||
// 2x-bit results. The upper x bits tell us something about how many times a multiplication has overflown the lower
|
||||
// x bits. If the upper x bits of (1)-(4) are all equal, then we know that all of these multiplications overflowed
|
||||
// the lower x bits the same number of times:
|
||||
// Ux((1)) = Ux((2)) = Ux((3)) = Ux((4))
|
||||
//
|
||||
// If all upper x bits are equal, we can conclude:
|
||||
// Lx(MIN((1),(2),(3),(4))) = MIN(Lx(1),Lx(2),Lx(3),Lx(4)))
|
||||
// Lx(MAX((1),(2),(3),(4))) = MAX(Lx(1),Lx(2),Lx(3),Lx(4)))
|
||||
//
|
||||
// Therefore, we can use the same precise x-bit result type as for the no-overflow case:
|
||||
// [result_lo,result_hi] = [(MIN(Lx(1),Lx(2),Lx(3),Lx(4))),MAX(Lx(1),Lx(2),Lx(3),Lx(4)))]
|
||||
//
|
||||
//
|
||||
// Now let's assume that (1)-(4) are signed multiplications where over- and underflow could occur:
|
||||
// Negative numbers are all sign extend with ones. Therefore, if a negative product does not underflow, then the
|
||||
// upper x bits of the 2x-bit result are all set to ones which is minus one in two's complement. If there is an underflow,
|
||||
// the upper x bits are decremented by the number of times an underflow occurred. The smallest possible negative product
|
||||
// is NativeType_min*NativeType_max, where the upper x bits are set to NativeType_min / 2 (b11...0). It is therefore
|
||||
// impossible to underflow the upper x bits. Thus, when having all ones (i.e. minus one) in the upper x bits, we know
|
||||
// that there is no underflow.
|
||||
//
|
||||
// To be able to compare the number of over-/underflows of positive and negative products, respectively, we normalize
|
||||
// the upper x bits of negative 2x-bit products by adding one. This way a product has no over- or underflow if the
|
||||
// normalized upper x bits are zero. Now we can use the same improved type as for strictly positive products because we
|
||||
// can compare the upper x bits in a unified way with N() being the normalization function:
|
||||
// N(Ux((1))) = N(Ux((2))) = N(Ux((3)) = N(Ux((4)))
|
||||
template<typename NativeType>
|
||||
class IntegerTypeMultiplication {
|
||||
|
||||
NativeType _lo_left;
|
||||
NativeType _lo_right;
|
||||
NativeType _hi_left;
|
||||
NativeType _hi_right;
|
||||
NativeType _lo_lo_product;
|
||||
NativeType _lo_hi_product;
|
||||
NativeType _hi_lo_product;
|
||||
NativeType _hi_hi_product;
|
||||
short _widen_left;
|
||||
short _widen_right;
|
||||
|
||||
static const Type* overflow_type();
|
||||
static NativeType multiply_high_signed_overflow_value(NativeType x, NativeType y);
|
||||
static NativeType multiply_high(NativeType x, NativeType y);
|
||||
const Type* create_type(NativeType lo, NativeType hi) const;
|
||||
|
||||
// Pre-compute cross products which are used at several places
|
||||
void compute_cross_products() {
|
||||
_lo_lo_product = java_multiply(_lo_left, _lo_right);
|
||||
_lo_hi_product = java_multiply(_lo_left, _hi_right);
|
||||
_hi_lo_product = java_multiply(_hi_left, _lo_right);
|
||||
_hi_hi_product = java_multiply(_hi_left, _hi_right);
|
||||
static NativeType multiply_high_signed_overflow_value(NativeType x, NativeType y) {
|
||||
return normalize_overflow_value(x, y, multiply_high(x, y));
|
||||
}
|
||||
|
||||
bool cross_products_not_same_overflow() const {
|
||||
bool cross_product_not_same_overflow_value() const {
|
||||
const NativeType lo_lo_high_product = multiply_high_signed_overflow_value(_lo_left, _lo_right);
|
||||
const NativeType lo_hi_high_product = multiply_high_signed_overflow_value(_lo_left, _hi_right);
|
||||
const NativeType hi_lo_high_product = multiply_high_signed_overflow_value(_hi_left, _lo_right);
|
||||
@@ -329,66 +370,95 @@ class IntegerMulRing {
|
||||
hi_lo_high_product != hi_hi_high_product;
|
||||
}
|
||||
|
||||
bool does_product_overflow(NativeType x, NativeType y) const {
|
||||
return multiply_high_signed_overflow_value(x, y) != 0;
|
||||
}
|
||||
|
||||
static NativeType normalize_overflow_value(const NativeType x, const NativeType y, NativeType result) {
|
||||
return java_multiply(x, y) < 0 ? result + 1 : result;
|
||||
}
|
||||
|
||||
public:
|
||||
IntegerMulRing(const IntegerType* left, const IntegerType* right) : _lo_left(left->_lo), _lo_right(right->_lo),
|
||||
_hi_left(left->_hi), _hi_right(right->_hi), _widen_left(left->_widen), _widen_right(right->_widen) {
|
||||
compute_cross_products();
|
||||
}
|
||||
template<class IntegerType>
|
||||
IntegerTypeMultiplication(const IntegerType* left, const IntegerType* right)
|
||||
: _lo_left(left->_lo), _lo_right(right->_lo),
|
||||
_hi_left(left->_hi), _hi_right(right->_hi),
|
||||
_widen_left(left->_widen), _widen_right(right->_widen) {}
|
||||
|
||||
// Compute the product type by multiplying the two input type ranges. We take the minimum and maximum of all possible
|
||||
// values (requires 4 multiplications of all possible combinations of the two range boundary values). If any of these
|
||||
// multiplications overflows/underflows, we need to make sure that they all have the same number of overflows/underflows
|
||||
// If that is not the case, we return the bottom type to cover all values due to the inconsistent overflows/underflows).
|
||||
const Type* compute() const {
|
||||
if (cross_products_not_same_overflow()) {
|
||||
if (cross_product_not_same_overflow_value()) {
|
||||
return overflow_type();
|
||||
}
|
||||
const NativeType min = MIN4(_lo_lo_product, _lo_hi_product, _hi_lo_product, _hi_hi_product);
|
||||
const NativeType max = MAX4(_lo_lo_product, _lo_hi_product, _hi_lo_product, _hi_hi_product);
|
||||
return IntegerType::make(min, max, MAX2(_widen_left, _widen_right));
|
||||
|
||||
NativeType lo_lo_product = java_multiply(_lo_left, _lo_right);
|
||||
NativeType lo_hi_product = java_multiply(_lo_left, _hi_right);
|
||||
NativeType hi_lo_product = java_multiply(_hi_left, _lo_right);
|
||||
NativeType hi_hi_product = java_multiply(_hi_left, _hi_right);
|
||||
const NativeType min = MIN4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
|
||||
const NativeType max = MAX4(lo_lo_product, lo_hi_product, hi_lo_product, hi_hi_product);
|
||||
return create_type(min, max);
|
||||
}
|
||||
|
||||
bool does_overflow() const {
|
||||
return does_product_overflow(_lo_left, _lo_right) ||
|
||||
does_product_overflow(_lo_left, _hi_right) ||
|
||||
does_product_overflow(_hi_left, _lo_right) ||
|
||||
does_product_overflow(_hi_left, _hi_right);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <>
|
||||
const Type* IntegerMulRing<TypeInt>::overflow_type() {
|
||||
const Type* IntegerTypeMultiplication<jint>::overflow_type() {
|
||||
return TypeInt::INT;
|
||||
}
|
||||
|
||||
template <>
|
||||
jint IntegerMulRing<TypeInt>::multiply_high_signed_overflow_value(const jint x, const jint y) {
|
||||
jint IntegerTypeMultiplication<jint>::multiply_high(const jint x, const jint y) {
|
||||
const jlong x_64 = x;
|
||||
const jlong y_64 = y;
|
||||
const jlong product = x_64 * y_64;
|
||||
const jint result = (jint)((uint64_t)product >> 32u);
|
||||
return normalize_overflow_value(x, y, result);
|
||||
return (jint)((uint64_t)product >> 32u);
|
||||
}
|
||||
|
||||
template <>
|
||||
const Type* IntegerMulRing<TypeLong>::overflow_type() {
|
||||
const Type* IntegerTypeMultiplication<jint>::create_type(jint lo, jint hi) const {
|
||||
return TypeInt::make(lo, hi, MAX2(_widen_left, _widen_right));
|
||||
}
|
||||
|
||||
template <>
|
||||
const Type* IntegerTypeMultiplication<jlong>::overflow_type() {
|
||||
return TypeLong::LONG;
|
||||
}
|
||||
|
||||
template <>
|
||||
jlong IntegerMulRing<TypeLong>::multiply_high_signed_overflow_value(const jlong x, const jlong y) {
|
||||
const jlong result = multiply_high_signed(x, y);
|
||||
return normalize_overflow_value(x, y, result);
|
||||
jlong IntegerTypeMultiplication<jlong>::multiply_high(const jlong x, const jlong y) {
|
||||
return multiply_high_signed(x, y);
|
||||
}
|
||||
|
||||
template <>
|
||||
const Type* IntegerTypeMultiplication<jlong>::create_type(jlong lo, jlong hi) const {
|
||||
return TypeLong::make(lo, hi, MAX2(_widen_left, _widen_right));
|
||||
}
|
||||
|
||||
// Compute the product type of two integer ranges into this node.
|
||||
const Type* MulINode::mul_ring(const Type* type_left, const Type* type_right) const {
|
||||
const IntegerMulRing<TypeInt> integer_mul_ring(type_left->is_int(), type_right->is_int());
|
||||
return integer_mul_ring.compute();
|
||||
const IntegerTypeMultiplication<jint> integer_multiplication(type_left->is_int(), type_right->is_int());
|
||||
return integer_multiplication.compute();
|
||||
}
|
||||
|
||||
bool MulINode::does_overflow(const TypeInt* type_left, const TypeInt* type_right) {
|
||||
const IntegerTypeMultiplication<jint> integer_multiplication(type_left, type_right);
|
||||
return integer_multiplication.does_overflow();
|
||||
}
|
||||
|
||||
// Compute the product type of two long ranges into this node.
|
||||
const Type* MulLNode::mul_ring(const Type* type_left, const Type* type_right) const {
|
||||
const IntegerMulRing<TypeLong> integer_mul_ring(type_left->is_long(), type_right->is_long());
|
||||
return integer_mul_ring.compute();
|
||||
const IntegerTypeMultiplication<jlong> integer_multiplication(type_left->is_long(), type_right->is_long());
|
||||
return integer_multiplication.compute();
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
||||
@@ -95,6 +95,7 @@ public:
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual const Type *mul_ring( const Type *, const Type * ) const;
|
||||
static bool does_overflow(const TypeInt* type_left, const TypeInt* type_right);
|
||||
const Type *mul_id() const { return TypeInt::ONE; }
|
||||
const Type *add_id() const { return TypeInt::ZERO; }
|
||||
int add_opcode() const { return Op_AddI; }
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -775,7 +775,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
|
||||
SafePointScalarMergeNode* smerge = local->as_SafePointScalarMerge();
|
||||
ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
|
||||
|
||||
if (mv == NULL) {
|
||||
if (mv == nullptr) {
|
||||
GrowableArray<ScopeValue*> deps;
|
||||
|
||||
int merge_pointer_idx = smerge->merge_pointer_idx(sfpt->jvms());
|
||||
@@ -783,7 +783,7 @@ void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
|
||||
assert(deps.length() == 1, "missing value");
|
||||
|
||||
int selector_idx = smerge->selector_idx(sfpt->jvms());
|
||||
(void)FillLocArray(1, NULL, sfpt->in(selector_idx), &deps, NULL);
|
||||
(void)FillLocArray(1, nullptr, sfpt->in(selector_idx), &deps, nullptr);
|
||||
assert(deps.length() == 2, "missing value");
|
||||
|
||||
mv = new ObjectMergeValue(smerge->_idx, deps.at(0), deps.at(1));
|
||||
@@ -1085,6 +1085,30 @@ void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) {
|
||||
}
|
||||
scval = sv;
|
||||
}
|
||||
} else if (obj_node->is_SafePointScalarMerge()) {
|
||||
SafePointScalarMergeNode* smerge = obj_node->as_SafePointScalarMerge();
|
||||
ObjectMergeValue* mv = (ObjectMergeValue*) sv_for_node_id(objs, smerge->_idx);
|
||||
|
||||
if (mv == nullptr) {
|
||||
GrowableArray<ScopeValue*> deps;
|
||||
|
||||
int merge_pointer_idx = smerge->merge_pointer_idx(youngest_jvms);
|
||||
FillLocArray(0, sfn, sfn->in(merge_pointer_idx), &deps, objs);
|
||||
assert(deps.length() == 1, "missing value");
|
||||
|
||||
int selector_idx = smerge->selector_idx(youngest_jvms);
|
||||
FillLocArray(1, nullptr, sfn->in(selector_idx), &deps, nullptr);
|
||||
assert(deps.length() == 2, "missing value");
|
||||
|
||||
mv = new ObjectMergeValue(smerge->_idx, deps.at(0), deps.at(1));
|
||||
set_sv_for_object_node(objs, mv);
|
||||
|
||||
for (uint i = 1; i < smerge->req(); i++) {
|
||||
Node* obj_node = smerge->in(i);
|
||||
FillLocArray(mv->possible_objects()->length(), sfn, obj_node, mv->possible_objects(), objs);
|
||||
}
|
||||
}
|
||||
scval = mv;
|
||||
} else if (!obj_node->is_Con()) {
|
||||
OptoReg::Name obj_reg = C->regalloc()->get_reg_first(obj_node);
|
||||
if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
|
||||
|
||||
@@ -43,18 +43,6 @@ const Type* SubTypeCheckNode::sub(const Type* sub_t, const Type* super_t) const
|
||||
if (!superklass->is_interface() && superklass->is_abstract() &&
|
||||
!superklass->as_instance_klass()->has_subklass()) {
|
||||
Compile::current()->dependencies()->assert_leaf_type(superklass);
|
||||
if (subk->is_same_java_type_as(superk) && !sub_t->maybe_null()) {
|
||||
// The super_t has no subclasses, and sub_t has the same type and is not null,
|
||||
// hence the check should always evaluate to EQ. However, this is an impossible
|
||||
// situation since super_t is also abstract, and hence sub_t cannot have the
|
||||
// same type and be non-null.
|
||||
// Still, if the non-static method of an abstract class without subclasses is
|
||||
// force-compiled, the Param0 has the self/this pointer with NotNull. This
|
||||
// method would now never be called, because of the leaf-type dependency. Hence,
|
||||
// just for consistency with verification, we return EQ.
|
||||
return TypeInt::CC_EQ;
|
||||
}
|
||||
// subk is either a supertype of superk, or null. In either case, superk is a subtype.
|
||||
return TypeInt::CC_GT;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4664,7 +4664,7 @@ template <class T1, class T2> bool TypePtr::is_meet_subtype_of_helper_for_array
|
||||
}
|
||||
|
||||
if (other_elem == nullptr && this_elem == nullptr) {
|
||||
return this_one->_klass->is_subtype_of(other->_klass);
|
||||
return this_one->klass()->is_subtype_of(other->klass());
|
||||
}
|
||||
|
||||
return false;
|
||||
@@ -5993,7 +5993,7 @@ template <class T1, class T2> bool TypePtr::is_java_subtype_of_helper_for_instan
|
||||
return true;
|
||||
}
|
||||
|
||||
return this_one->_klass->is_subtype_of(other->_klass) && this_one->_interfaces->contains(other->_interfaces);
|
||||
return this_one->klass()->is_subtype_of(other->klass()) && this_one->_interfaces->contains(other->_interfaces);
|
||||
}
|
||||
|
||||
bool TypeInstKlassPtr::is_java_subtype_of_helper(const TypeKlassPtr* other, bool this_exact, bool other_exact) const {
|
||||
@@ -6008,7 +6008,7 @@ template <class T1, class T2> bool TypePtr::is_same_java_type_as_helper_for_inst
|
||||
if (!this_one->is_instance_type(other)) {
|
||||
return false;
|
||||
}
|
||||
return this_one->_klass->equals(other->_klass) && this_one->_interfaces->eq(other->_interfaces);
|
||||
return this_one->klass()->equals(other->klass()) && this_one->_interfaces->eq(other->_interfaces);
|
||||
}
|
||||
|
||||
bool TypeInstKlassPtr::is_same_java_type_as_helper(const TypeKlassPtr* other) const {
|
||||
@@ -6022,7 +6022,7 @@ template <class T1, class T2> bool TypePtr::maybe_java_subtype_of_helper_for_ins
|
||||
}
|
||||
|
||||
if (this_one->is_array_type(other)) {
|
||||
return !this_exact && this_one->_klass->equals(ciEnv::current()->Object_klass()) && other->_interfaces->contains(this_one->_interfaces);
|
||||
return !this_exact && this_one->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces->contains(this_one->_interfaces);
|
||||
}
|
||||
|
||||
assert(this_one->is_instance_type(other), "unsupported");
|
||||
@@ -6031,12 +6031,12 @@ template <class T1, class T2> bool TypePtr::maybe_java_subtype_of_helper_for_ins
|
||||
return this_one->is_java_subtype_of(other);
|
||||
}
|
||||
|
||||
if (!this_one->_klass->is_subtype_of(other->_klass) && !other->_klass->is_subtype_of(this_one->_klass)) {
|
||||
if (!this_one->klass()->is_subtype_of(other->klass()) && !other->klass()->is_subtype_of(this_one->klass())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (this_exact) {
|
||||
return this_one->_klass->is_subtype_of(other->_klass) && this_one->_interfaces->contains(other->_interfaces);
|
||||
return this_one->klass()->is_subtype_of(other->klass()) && this_one->_interfaces->contains(other->_interfaces);
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -6116,7 +6116,7 @@ uint TypeAryKlassPtr::hash(void) const {
|
||||
|
||||
//----------------------compute_klass------------------------------------------
|
||||
// Compute the defining klass for this class
|
||||
ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
|
||||
ciKlass* TypeAryPtr::compute_klass() const {
|
||||
// Compute _klass based on element type.
|
||||
ciKlass* k_ary = nullptr;
|
||||
const TypeInstPtr *tinst;
|
||||
@@ -6137,28 +6137,7 @@ ciKlass* TypeAryPtr::compute_klass(DEBUG_ONLY(bool verify)) const {
|
||||
// and object; Top occurs when doing join on Bottom.
|
||||
// Leave k_ary at null.
|
||||
} else {
|
||||
// Cannot compute array klass directly from basic type,
|
||||
// since subtypes of TypeInt all have basic type T_INT.
|
||||
#ifdef ASSERT
|
||||
if (verify && el->isa_int()) {
|
||||
// Check simple cases when verifying klass.
|
||||
BasicType bt = T_ILLEGAL;
|
||||
if (el == TypeInt::BYTE) {
|
||||
bt = T_BYTE;
|
||||
} else if (el == TypeInt::SHORT) {
|
||||
bt = T_SHORT;
|
||||
} else if (el == TypeInt::CHAR) {
|
||||
bt = T_CHAR;
|
||||
} else if (el == TypeInt::INT) {
|
||||
bt = T_INT;
|
||||
} else {
|
||||
return _klass; // just return specified klass
|
||||
}
|
||||
return ciTypeArrayKlass::make(bt);
|
||||
}
|
||||
#endif
|
||||
assert(!el->isa_int(),
|
||||
"integral arrays must be pre-equipped with a class");
|
||||
assert(!el->isa_int(), "integral arrays must be pre-equipped with a class");
|
||||
// Compute array klass directly from basic type
|
||||
k_ary = ciTypeArrayKlass::make(el->basic_type());
|
||||
}
|
||||
@@ -6434,7 +6413,7 @@ template <class T1, class T2> bool TypePtr::is_java_subtype_of_helper_for_array(
|
||||
return this_one->is_reference_type(this_elem)->is_java_subtype_of_helper(this_one->is_reference_type(other_elem), this_exact, other_exact);
|
||||
}
|
||||
if (this_elem == nullptr && other_elem == nullptr) {
|
||||
return this_one->_klass->is_subtype_of(other->_klass);
|
||||
return this_one->klass()->is_subtype_of(other->klass());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -6466,8 +6445,7 @@ template <class T1, class T2> bool TypePtr::is_same_java_type_as_helper_for_arra
|
||||
return this_one->is_reference_type(this_elem)->is_same_java_type_as(this_one->is_reference_type(other_elem));
|
||||
}
|
||||
if (other_elem == nullptr && this_elem == nullptr) {
|
||||
assert(this_one->_klass != nullptr && other->_klass != nullptr, "");
|
||||
return this_one->_klass->equals(other->_klass);
|
||||
return this_one->klass()->equals(other->klass());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@@ -6487,7 +6465,7 @@ template <class T1, class T2> bool TypePtr::maybe_java_subtype_of_helper_for_arr
|
||||
return true;
|
||||
}
|
||||
if (this_one->is_instance_type(other)) {
|
||||
return other->_klass->equals(ciEnv::current()->Object_klass()) && other->_interfaces->intersection_with(this_one->_interfaces)->eq(other->_interfaces);
|
||||
return other->klass()->equals(ciEnv::current()->Object_klass()) && other->_interfaces->intersection_with(this_one->_interfaces)->eq(other->_interfaces);
|
||||
}
|
||||
assert(this_one->is_array_type(other), "");
|
||||
|
||||
@@ -6506,7 +6484,7 @@ template <class T1, class T2> bool TypePtr::maybe_java_subtype_of_helper_for_arr
|
||||
return this_one->is_reference_type(this_elem)->maybe_java_subtype_of_helper(this_one->is_reference_type(other_elem), this_exact, other_exact);
|
||||
}
|
||||
if (other_elem == nullptr && this_elem == nullptr) {
|
||||
return this_one->_klass->is_subtype_of(other->_klass);
|
||||
return this_one->klass()->is_subtype_of(other->klass());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1413,7 +1413,7 @@ class TypeAryPtr : public TypeOopPtr {
|
||||
const TypeAry *_ary; // Array we point into
|
||||
const bool _is_autobox_cache;
|
||||
|
||||
ciKlass* compute_klass(DEBUG_ONLY(bool verify = false)) const;
|
||||
ciKlass* compute_klass() const;
|
||||
|
||||
// A pointer to delay allocation to Type::Initialize_shared()
|
||||
|
||||
|
||||
@@ -140,7 +140,7 @@ int ForeignGlobals::compute_out_arg_bytes(const GrowableArray<VMStorage>& out_re
|
||||
|
||||
int ForeignGlobals::java_calling_convention(const BasicType* signature, int num_args, GrowableArray<VMStorage>& out_regs) {
|
||||
VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args);
|
||||
int slots = SharedRuntime::java_calling_convention(signature, vm_regs, num_args);
|
||||
int slots = align_up(SharedRuntime::java_calling_convention(signature, vm_regs, num_args), 2);
|
||||
for (int i = 0; i < num_args; i++) {
|
||||
VMRegPair pair = vm_regs[i];
|
||||
// note, we ignore second here. Signature should consist of register-size values. So there should be
|
||||
|
||||
@@ -930,6 +930,11 @@ static void jni_invoke_nonstatic(JNIEnv *env, JavaValue* result, jobject receive
|
||||
}
|
||||
}
|
||||
|
||||
if (selected_method->is_abstract()) {
|
||||
ResourceMark rm(THREAD);
|
||||
THROW_MSG(vmSymbols::java_lang_AbstractMethodError(), selected_method->name()->as_C_string());
|
||||
}
|
||||
|
||||
methodHandle method(THREAD, selected_method);
|
||||
|
||||
// Create object to hold arguments for the JavaCall, and associate it with
|
||||
|
||||
@@ -3933,8 +3933,6 @@ JVM_ENTRY(void, JVM_VirtualThreadStart(JNIEnv* env, jobject vthread))
|
||||
// set VTMS transition bit value in JavaThread and java.lang.VirtualThread object
|
||||
JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, false);
|
||||
}
|
||||
#else
|
||||
fatal("Should only be called with JVMTI enabled");
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
@@ -3950,8 +3948,6 @@ JVM_ENTRY(void, JVM_VirtualThreadEnd(JNIEnv* env, jobject vthread))
|
||||
// set VTMS transition bit value in JavaThread and java.lang.VirtualThread object
|
||||
JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, true);
|
||||
}
|
||||
#else
|
||||
fatal("Should only be called with JVMTI enabled");
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
@@ -3969,8 +3965,6 @@ JVM_ENTRY(void, JVM_VirtualThreadMount(JNIEnv* env, jobject vthread, jboolean hi
|
||||
// set VTMS transition bit value in JavaThread and java.lang.VirtualThread object
|
||||
JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, hide);
|
||||
}
|
||||
#else
|
||||
fatal("Should only be called with JVMTI enabled");
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
@@ -3988,8 +3982,6 @@ JVM_ENTRY(void, JVM_VirtualThreadUnmount(JNIEnv* env, jobject vthread, jboolean
|
||||
// set VTMS transition bit value in JavaThread and java.lang.VirtualThread object
|
||||
JvmtiVTMSTransitionDisabler::set_is_in_VTMS_transition(thread, vthread, hide);
|
||||
}
|
||||
#else
|
||||
fatal("Should only be called with JVMTI enabled");
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
@@ -4003,8 +3995,20 @@ JVM_ENTRY(void, JVM_VirtualThreadHideFrames(JNIEnv* env, jobject vthread, jboole
|
||||
assert(!thread->is_in_VTMS_transition(), "sanity check");
|
||||
assert(thread->is_in_tmp_VTMS_transition() != (bool)hide, "sanity check");
|
||||
thread->toggle_is_in_tmp_VTMS_transition();
|
||||
#else
|
||||
fatal("Should only be called with JVMTI enabled");
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
// Notification from VirtualThread about disabling JVMTI Suspend in a sync critical section.
|
||||
// Needed to avoid deadlocks with JVMTI suspend mechanism.
|
||||
JVM_ENTRY(void, JVM_VirtualThreadDisableSuspend(JNIEnv* env, jobject vthread, jboolean enter))
|
||||
#if INCLUDE_JVMTI
|
||||
if (!DoJVMTIVirtualThreadTransitions) {
|
||||
assert(!JvmtiExport::can_support_virtual_threads(), "sanity check");
|
||||
return;
|
||||
}
|
||||
assert(thread->is_disable_suspend() != (bool)enter,
|
||||
"nested or unbalanced monitor enter/exit is not allowed");
|
||||
thread->toggle_is_disable_suspend();
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@@ -2407,6 +2407,7 @@ UpdateForPopTopFrameClosure::doit(Thread *target, bool self) {
|
||||
void
|
||||
SetFramePopClosure::do_thread(Thread *target) {
|
||||
Thread* current = Thread::current();
|
||||
ResourceMark rm(current); // vframes are resource allocated
|
||||
JavaThread* java_thread = JavaThread::cast(target);
|
||||
|
||||
if (java_thread->is_exiting()) {
|
||||
@@ -2433,6 +2434,9 @@ SetFramePopClosure::do_thread(Thread *target) {
|
||||
|
||||
void
|
||||
SetFramePopClosure::do_vthread(Handle target_h) {
|
||||
Thread* current = Thread::current();
|
||||
ResourceMark rm(current); // vframes are resource allocated
|
||||
|
||||
if (!_self && !JvmtiVTSuspender::is_vthread_suspended(target_h())) {
|
||||
_result = JVMTI_ERROR_THREAD_NOT_SUSPENDED;
|
||||
return;
|
||||
|
||||
@@ -390,7 +390,10 @@ UNSAFE_ENTRY_SCOPED(void, Unsafe_SetMemory0(JNIEnv *env, jobject unsafe, jobject
|
||||
oop base = JNIHandles::resolve(obj);
|
||||
void* p = index_oop_from_field_offset_long(base, offset);
|
||||
|
||||
Copy::fill_to_memory_atomic(p, sz, value);
|
||||
{
|
||||
GuardUnsafeAccess guard(thread);
|
||||
Copy::fill_to_memory_atomic(p, sz, value);
|
||||
}
|
||||
} UNSAFE_END
|
||||
|
||||
UNSAFE_ENTRY_SCOPED(void, Unsafe_CopyMemory0(JNIEnv *env, jobject unsafe, jobject srcObj, jlong srcOffset, jobject dstObj, jlong dstOffset, jlong size)) {
|
||||
|
||||
@@ -315,7 +315,6 @@ static bool matches_property_suffix(const char* option, const char* property, si
|
||||
// any of the reserved module properties.
|
||||
// property should be passed without the leading "-D".
|
||||
bool Arguments::is_internal_module_property(const char* property) {
|
||||
assert((strncmp(property, "-D", 2) != 0), "Unexpected leading -D");
|
||||
if (strncmp(property, MODULE_PROPERTY_PREFIX, MODULE_PROPERTY_PREFIX_LEN) == 0) {
|
||||
const char* property_suffix = property + MODULE_PROPERTY_PREFIX_LEN;
|
||||
if (matches_property_suffix(property_suffix, ADDEXPORTS, ADDEXPORTS_LEN) ||
|
||||
|
||||
@@ -1775,7 +1775,7 @@ private:
|
||||
inline void before_thaw_java_frame(const frame& hf, const frame& caller, bool bottom, int num_frame);
|
||||
inline void after_thaw_java_frame(const frame& f, bool bottom);
|
||||
inline void patch(frame& f, const frame& caller, bool bottom);
|
||||
void clear_bitmap_bits(intptr_t* start, int range);
|
||||
void clear_bitmap_bits(address start, address end);
|
||||
|
||||
NOINLINE void recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames);
|
||||
void recurse_thaw_compiled_frame(const frame& hf, frame& caller, int num_frames, bool stub_caller);
|
||||
@@ -2166,13 +2166,22 @@ inline void ThawBase::patch(frame& f, const frame& caller, bool bottom) {
|
||||
assert(!bottom || (_cont.is_empty() != Continuation::is_cont_barrier_frame(f)), "");
|
||||
}
|
||||
|
||||
void ThawBase::clear_bitmap_bits(intptr_t* start, int range) {
|
||||
void ThawBase::clear_bitmap_bits(address start, address end) {
|
||||
assert(is_aligned(start, wordSize), "should be aligned: " PTR_FORMAT, p2i(start));
|
||||
assert(is_aligned(end, VMRegImpl::stack_slot_size), "should be aligned: " PTR_FORMAT, p2i(end));
|
||||
|
||||
// we need to clear the bits that correspond to arguments as they reside in the caller frame
|
||||
// or they will keep objects that are otherwise unreachable alive
|
||||
log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(start+range));
|
||||
// or they will keep objects that are otherwise unreachable alive.
|
||||
|
||||
// Align `end` if UseCompressedOops is not set to avoid UB when calculating the bit index, since
|
||||
// `end` could be at an odd number of stack slots from `start`, i.e might not be oop aligned.
|
||||
// If that's the case the bit range corresponding to the last stack slot should not have bits set
|
||||
// anyways and we assert that before returning.
|
||||
address effective_end = UseCompressedOops ? end : align_down(end, wordSize);
|
||||
log_develop_trace(continuations)("clearing bitmap for " INTPTR_FORMAT " - " INTPTR_FORMAT, p2i(start), p2i(effective_end));
|
||||
stackChunkOop chunk = _cont.tail();
|
||||
chunk->bitmap().clear_range(chunk->bit_index_for(start),
|
||||
chunk->bit_index_for(start+range));
|
||||
chunk->bitmap().clear_range(chunk->bit_index_for(start), chunk->bit_index_for(effective_end));
|
||||
assert(effective_end == end || !chunk->bitmap().at(chunk->bit_index_for(effective_end)), "bit should not be set");
|
||||
}
|
||||
|
||||
NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& caller, int num_frames) {
|
||||
@@ -2225,7 +2234,9 @@ NOINLINE void ThawBase::recurse_thaw_interpreted_frame(const frame& hf, frame& c
|
||||
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
|
||||
} else if (_cont.tail()->has_bitmap() && locals > 0) {
|
||||
assert(hf.is_heap_frame(), "should be");
|
||||
clear_bitmap_bits(heap_frame_bottom - locals, locals);
|
||||
address start = (address)(heap_frame_bottom - locals);
|
||||
address end = (address)heap_frame_bottom;
|
||||
clear_bitmap_bits(start, end);
|
||||
}
|
||||
|
||||
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
|
||||
@@ -2298,7 +2309,10 @@ void ThawBase::recurse_thaw_compiled_frame(const frame& hf, frame& caller, int n
|
||||
// can only fix caller once this frame is thawed (due to callee saved regs); this happens on the stack
|
||||
_cont.tail()->fix_thawed_frame(caller, SmallRegisterMap::instance);
|
||||
} else if (_cont.tail()->has_bitmap() && added_argsize > 0) {
|
||||
clear_bitmap_bits(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top, added_argsize);
|
||||
address start = (address)(heap_frame_top + ContinuationHelper::CompiledFrame::size(hf) + frame::metadata_words_at_top);
|
||||
int stack_args_slots = f.cb()->as_compiled_method()->method()->num_stack_arg_slots(false /* rounded */);
|
||||
int argsize_in_bytes = stack_args_slots * VMRegImpl::stack_slot_size;
|
||||
clear_bitmap_bits(start, start + argsize_in_bytes);
|
||||
}
|
||||
|
||||
DEBUG_ONLY(after_thaw_java_frame(f, is_bottom_frame);)
|
||||
|
||||
@@ -1439,7 +1439,7 @@ void frame::describe(FrameValues& values, int frame_no, const RegisterMap* reg_m
|
||||
assert(sig_index == sizeargs, "");
|
||||
}
|
||||
int stack_arg_slots = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs);
|
||||
assert(stack_arg_slots == m->num_stack_arg_slots(), "");
|
||||
assert(stack_arg_slots == m->num_stack_arg_slots(false /* rounded */), "");
|
||||
int out_preserve = SharedRuntime::out_preserve_stack_slots();
|
||||
int sig_index = 0;
|
||||
int arg_index = (m->is_static() ? 0 : -1);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user